os/mm/mmhais/videohai/devvideo/inc/devvideoconstants.h
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2003-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#ifndef __DEVVIDEOCONSTANTS_H__
sl@0
    17
#define __DEVVIDEOCONSTANTS_H__
sl@0
    18
sl@0
    19
#include <e32base.h>
sl@0
    20
#include <mmf/devvideo/devvideoplugininterfaceuids.hrh>
sl@0
    21
#include <mm/conversioncoefficient.h>
sl@0
    22
sl@0
    23
/**
sl@0
    24
DevVideo Panic Category
sl@0
    25
sl@0
    26
@publishedAll
sl@0
    27
@released
sl@0
    28
*/
sl@0
    29
_LIT(KDevVideoPanicCategory, "DevVideo");
sl@0
    30
sl@0
    31
/**
sl@0
    32
DevVideo Panic Codes
sl@0
    33
sl@0
    34
@publishedAll
sl@0
    35
@released
sl@0
    36
*/
sl@0
    37
enum TDevVideoPanicCodes
sl@0
    38
	{
sl@0
    39
	/**
sl@0
    40
	A pre-condition on a method has been violated.
sl@0
    41
	*/
sl@0
    42
	EDevVideoPanicPreConditionViolation = 1,
sl@0
    43
	/**
sl@0
    44
	A post-condition on a method has been violated.
sl@0
    45
	*/
sl@0
    46
	EDevVideoPanicPostConditionViolation = 2,
sl@0
    47
	/**
sl@0
    48
	An invalid hardware device ID has been supplied.
sl@0
    49
	*/
sl@0
    50
	EDevVideoPanicInvalidHwDeviceId = 3
sl@0
    51
	};
sl@0
    52
sl@0
    53
sl@0
    54
// DevVideo Plugin Interface UIDs
sl@0
    55
sl@0
    56
/** Video Decoder HW Device Plugin Interface UID 
sl@0
    57
@publishedAll
sl@0
    58
@released
sl@0
    59
*/
sl@0
    60
const TUid KUidDevVideoDecoderHwDevice = {KUidDevVideoDecoderHwDeviceDefine};
sl@0
    61
sl@0
    62
/** Video Post Processor HW Device Plugin Interface UID
sl@0
    63
@publishedAll
sl@0
    64
@released
sl@0
    65
*/
sl@0
    66
const TUid KUidDevVideoPostProcessorHwDevice = {KUidDevVideoPostProcessorHwDeviceDefine};
sl@0
    67
sl@0
    68
/** Video Encoder HW Device Plugin Interface UID 
sl@0
    69
@publishedAll
sl@0
    70
@released
sl@0
    71
*/
sl@0
    72
const TUid KUidDevVideoEncoderHwDevice = {KUidDevVideoEncoderHwDeviceDefine};
sl@0
    73
sl@0
    74
/** Video Pre Processor HW Device Plugin Interface UID
sl@0
    75
@publishedAll
sl@0
    76
@released
sl@0
    77
*/
sl@0
    78
const TUid KUidDevVideoPreProcessorHwDevice = {KUidDevVideoPreProcessorHwDeviceDefine};
sl@0
    79
sl@0
    80
// DevVideo Custom Interface Uids
sl@0
    81
sl@0
    82
/** MMMFVideoPlayHwDeviceExtensionScanCopy Custom Interface UID
sl@0
    83
@publishedAll
sl@0
    84
@released
sl@0
    85
*/
sl@0
    86
const TUid KUidDevVideoPlayHwDeviceExtensionScanCopy = {KUidDevVideoPlayHwDeviceExScanCopyDefine};
sl@0
    87
sl@0
    88
/** 
sl@0
    89
Picture frame rate constants
sl@0
    90
sl@0
    91
Using these constants is recommended when the picture rate is known to match 
sl@0
    92
one of them, to ensure that floating point equality comparisons work as expected.
sl@0
    93
sl@0
    94
Note that the MSL video APIs currently only deal with non-interlaced frames.  For interlaced 
sl@0
    95
video, all references to the term "picture" should be considered to refer to complete frames. 
sl@0
    96
As such, the term "picture rate" here refers to the frame rate for interlaced video.
sl@0
    97
sl@0
    98
@publishedAll
sl@0
    99
@released
sl@0
   100
*/
sl@0
   101
const TReal KPictureRate5 = 5.0;
sl@0
   102
const TReal KPictureRate75 = 7.5;
sl@0
   103
const TReal KPictureRate10 = 10.0;
sl@0
   104
const TReal KPictureRate15 = 15.0;
sl@0
   105
const TReal KPictureRateNTSC24 = 23.97602397602398; // == 24000/1001
sl@0
   106
const TReal KPictureRate25 = 25.0;
sl@0
   107
const TReal KPictureRateNTSC30 = 29.97002997002997; // == 30000/1001
sl@0
   108
const TReal KPictureRate30 = 30.0;
sl@0
   109
sl@0
   110
sl@0
   111
/**
sl@0
   112
Specifies the data format used for an uncompressed picture. 
sl@0
   113
The values are bit patterns that can be combined with other format definition constants.
sl@0
   114
sl@0
   115
@publishedAll
sl@0
   116
@released
sl@0
   117
*/
sl@0
   118
enum TImageDataFormat
sl@0
   119
	{
sl@0
   120
	/** Raw RGB picture data in a memory area.
sl@0
   121
	*/
sl@0
   122
	ERgbRawData		= 0x01000000,
sl@0
   123
	/** RGB picture data stored in a Symbian OS CFbsBitmap object.
sl@0
   124
	*/
sl@0
   125
	ERgbFbsBitmap	  = 0x02000000,
sl@0
   126
	/** Raw YUV picture data stored in a memory area. The data storage 
sl@0
   127
	format depends on the YUV sampling pattern and data layout used.
sl@0
   128
	*/
sl@0
   129
	EYuvRawData		= 0x04000000,
sl@0
   130
	
sl@0
   131
	/** Picture stored in a surface buffer.
sl@0
   132
	 @See MMmfVideoSurfaceHandleControl::MmvshcSetSurfaceHandle
sl@0
   133
	*/
sl@0
   134
	ESurfaceBuffer = 0x08000000
sl@0
   135
	};
sl@0
   136
sl@0
   137
sl@0
   138
/**
sl@0
   139
RGB uncompressed image format alternatives.
sl@0
   140
@publishedAll
sl@0
   141
@released
sl@0
   142
*/
sl@0
   143
enum TRgbFormat 
sl@0
   144
	{
sl@0
   145
	/**
sl@0
   146
	16-bit RGB data format with four pixels per component. 
sl@0
   147
	The data format is the same as used in Symbian EColor4K bitmaps, 
sl@0
   148
	with each pixel using two bytes with the bit layout [ggggbbbb xxxxrrrr]
sl@0
   149
	where "x" indicates unused bits. (This corresponds to "XRGB" 16-bit little-endian halfwords)
sl@0
   150
	*/
sl@0
   151
	ERgb16bit444	   = ERgbRawData   | 0x00000001,
sl@0
   152
sl@0
   153
	/**
sl@0
   154
	16-bit RGB data format with five bits per component for red and blue and 
sl@0
   155
	six bits for green. The data format is the same as used in Symbian EColor64K bitmaps, 
sl@0
   156
	with each pixel using two bytes with the bit layout [gggbbbbb rrrrrggg]
sl@0
   157
	(This corresponds to "RGB" 16-bit little-endian halfwords)
sl@0
   158
	*/
sl@0
   159
	ERgb16bit565	   = ERgbRawData   | 0x00000002,
sl@0
   160
sl@0
   161
	/**
sl@0
   162
	32-bit RGB data format with eight bits per component. 
sl@0
   163
	This data format is the same as is used in Symbian EColor16MU bitmaps. The bit layout is
sl@0
   164
	[bbbbbbbb gggggggg rrrrrrrr xxxxxxxx] where "x" indicates unused bits. 
sl@0
   165
	(This corresponds to "XRGB" 32-bit little-endian words)
sl@0
   166
	*/
sl@0
   167
	ERgb32bit888	   = ERgbRawData   | 0x00000004,
sl@0
   168
sl@0
   169
	/**
sl@0
   170
	CFbsBitmap object with EColor4K data format.
sl@0
   171
	*/
sl@0
   172
	EFbsBitmapColor4K  = ERgbFbsBitmap | 0x00000001,
sl@0
   173
sl@0
   174
 	/**
sl@0
   175
	CFbsBitmap object with EColor64K data format.
sl@0
   176
	*/
sl@0
   177
	EFbsBitmapColor64K = ERgbFbsBitmap | 0x00000002,
sl@0
   178
sl@0
   179
	/**
sl@0
   180
	CFbsBitmap object with EColor16M data format.
sl@0
   181
	*/
sl@0
   182
	EFbsBitmapColor16M = ERgbFbsBitmap | 0x00000004,
sl@0
   183
sl@0
   184
	/**
sl@0
   185
	CFbsBitmap object with EColor16MU data format.
sl@0
   186
	*/
sl@0
   187
	EFbsBitmapColor16MU = ERgbFbsBitmap | 0x00000008
sl@0
   188
	};
sl@0
   189
sl@0
   190
sl@0
   191
/**
sl@0
   192
YUV (YCbCr) uncompressed image data sampling pattern.
sl@0
   193
@publishedAll
sl@0
   194
@released
sl@0
   195
*/
sl@0
   196
enum TYuvSamplingPattern 
sl@0
   197
	{
sl@0
   198
	/**
sl@0
   199
	4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. 
sl@0
   200
	The four luminance sample positions are on the corners of a square. The chrominance sample position 
sl@0
   201
	is vertically half-way of the luminance sample positions and horizontally aligned with the left 
sl@0
   202
	side of the square. This is the MPEG-2 and the MPEG-4 Part 2 sampling pattern.
sl@0
   203
	*/
sl@0
   204
	EYuv420Chroma1 = 0x00000001,
sl@0
   205
sl@0
   206
	/**
sl@0
   207
	4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. 
sl@0
   208
	The four luminance sample positions are on the corners of a square. The chrominance sample position 
sl@0
   209
	is vertically and horizontally in the middle of the luminance sample positions. This is the MPEG-1 sampling pattern.
sl@0
   210
	*/
sl@0
   211
	EYuv420Chroma2 = 0x00000002,
sl@0
   212
sl@0
   213
	/**
sl@0
   214
	4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. 
sl@0
   215
	The four luminance sample positions are on the corners of a square. The chrominance sample position 
sl@0
   216
	colocates with the top-left corner of the square. This sampling format is one of the options in Annex E of H.264 | MPEG-4 AVC.
sl@0
   217
	*/
sl@0
   218
	EYuv420Chroma3 = 0x00000004,
sl@0
   219
sl@0
   220
	/**
sl@0
   221
	4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position. 
sl@0
   222
	The luminance sample positions reside on the same pixel row. The chrominance sample position is co-located 
sl@0
   223
	with the left one of the luminance sample positions. This is the MPEG-2 4:2:2 sampling pattern.
sl@0
   224
	*/
sl@0
   225
	EYuv422Chroma1 = 0x00000008,
sl@0
   226
sl@0
   227
	/**
sl@0
   228
	4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position. 
sl@0
   229
	The luminance sample positions reside on the same pixel row. The chrominance sample position is in the 
sl@0
   230
	middle of the luminance sample positions. This is the MPEG-1 4:2:2 sampling pattern.
sl@0
   231
	*/
sl@0
   232
	EYuv422Chroma2 = 0x00000010
sl@0
   233
	};
sl@0
   234
sl@0
   235
sl@0
   236
/**
sl@0
   237
Defines the YUV data layout in a decoded picture.
sl@0
   238
@publishedAll
sl@0
   239
@released
sl@0
   240
*/
sl@0
   241
enum TYuvDataLayout
sl@0
   242
	{
sl@0
   243
	/**
sl@0
   244
	The data is stored in a plane mode. The memory buffer contains first all Y component 
sl@0
   245
	data for the whole picture, followed by U and V, making the data format Y00Y01Y02Y03...U0...V0... 
sl@0
   246
	For YUV 4:2:0 data, this is the same data format as EFormatYUV420Planar in the Onboard Camera API
sl@0
   247
	*/
sl@0
   248
	EYuvDataPlanar		= 0x00000001,
sl@0
   249
sl@0
   250
	/**
sl@0
   251
	The data is stored interleaved mode, all components interleaved in a single memory block. 
sl@0
   252
	Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is Y1VY0U, 
sl@0
   253
	corresponding to "UY0VY1" little-endian 32-bit words. 
sl@0
   254
	This is the same data format as EFormatYUV422Reversed in the Onboard Camera API
sl@0
   255
	*/
sl@0
   256
	EYuvDataInterleavedLE = 0x00000002,
sl@0
   257
sl@0
   258
	/**
sl@0
   259
	The data is stored interleaved mode, all components interleaved in a single memory block. 
sl@0
   260
	Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is UY0VY1, 
sl@0
   261
	corresponding to "UY0VY1" big-endian 32-bit words. 
sl@0
   262
	This is the same data format as EFormatYUV422 in the Onboard Camera API
sl@0
   263
	*/
sl@0
   264
	EYuvDataInterleavedBE = 0x00000004,
sl@0
   265
	/**
sl@0
   266
	The data is stored in a semi-planar mode. The memory buffer contains first all Y component 
sl@0
   267
	data for the whole picture, followed by U and V components, which are interlaced, making the data 
sl@0
   268
	format Y00Y01Y02Y03...U0V0U1V1... For YUV 4:2:0 data, this is the same data format as 
sl@0
   269
	FormatYUV420SemiPlanar in the Onboard Camera API
sl@0
   270
	*/
sl@0
   271
	EYuvDataSemiPlanar = 0x00000008
sl@0
   272
	};
sl@0
   273
sl@0
   274
/**
sl@0
   275
Defines the picture effect used for an input picture. Please refer to ITU-T H.264 | ISO/IEC MPEG-4 AVC [] for the definitions of the transition effects.
sl@0
   276
@publishedAll
sl@0
   277
@released
sl@0
   278
*/
sl@0
   279
enum TPictureEffect
sl@0
   280
	{
sl@0
   281
	/**
sl@0
   282
	No effect.
sl@0
   283
	*/
sl@0
   284
	EEffectNone          					= 0x00000001,
sl@0
   285
sl@0
   286
	/**
sl@0
   287
	Fade from black.
sl@0
   288
	*/
sl@0
   289
	EEffectFadeFromBlack 					= 0x00000002,
sl@0
   290
sl@0
   291
	/**
sl@0
   292
	Fade to black.
sl@0
   293
	*/
sl@0
   294
	EEffectFadeToBlack   					= 0x00000004,
sl@0
   295
sl@0
   296
	/**
sl@0
   297
	Unspecified transition from or to constant colour.
sl@0
   298
	*/
sl@0
   299
	EEffectUnspecifiedThroughConstantColor 	= 0x00000008,
sl@0
   300
sl@0
   301
	/**
sl@0
   302
	Dissolve.
sl@0
   303
	*/
sl@0
   304
	EEffectDissolve   						= 0x00000010,
sl@0
   305
sl@0
   306
	/**
sl@0
   307
	Wipe.
sl@0
   308
	*/
sl@0
   309
	EEffectWipe   							= 0x00000020,
sl@0
   310
sl@0
   311
	/**
sl@0
   312
	Unspecified mixture of two scenes.
sl@0
   313
	*/
sl@0
   314
	EEffectUnspecifiedMixOfTwoScenes  		= 0x00000040
sl@0
   315
	};
sl@0
   316
sl@0
   317
/**
sl@0
   318
Defines the data value range used for RGB data. Used for determining the correct color space conversion factors. 
sl@0
   319
@publishedAll
sl@0
   320
@released
sl@0
   321
*/
sl@0
   322
enum TRgbRange
sl@0
   323
	{
sl@0
   324
	/**
sl@0
   325
	The RGB data uses the full 8-bit range of [0…255].
sl@0
   326
	*/
sl@0
   327
	ERgbRangeFull	= 0x00000001,
sl@0
   328
sl@0
   329
	/**
sl@0
   330
	The RGB data uses the nominal range of [16…235]. Individual samples can still contain 
sl@0
   331
	values beyond that range, the rest of the 8-bit range is used for headroom and footroom.
sl@0
   332
	*/
sl@0
   333
	ERgbRange16to235 = 0x00000002
sl@0
   334
	};
sl@0
   335
sl@0
   336
sl@0
   337
sl@0
   338
/**
sl@0
   339
Defines possible data unit types for encoded video data. The data unit types are used both 
sl@0
   340
for encoded video input for playback as well as encoded video output from recording.
sl@0
   341
@publishedAll
sl@0
   342
@released
sl@0
   343
*/
sl@0
   344
enum TVideoDataUnitType
sl@0
   345
	{
sl@0
   346
	/**
sl@0
   347
	Each data unit is a single coded picture.
sl@0
   348
	*/
sl@0
   349
	EDuCodedPicture		   = 0x00000001,
sl@0
   350
sl@0
   351
	/**
sl@0
   352
	Each data unit is a coded video segment.  
sl@0
   353
	A coded video segment is a part of the coded video data that forms an independently 
sl@0
   354
	decodable part of a coded video frame. For example, a video packet in MPEG-4 Part 2 
sl@0
   355
	and slice in H.263 are coded video segments.
sl@0
   356
	*/
sl@0
   357
	EDuVideoSegment		   = 0x00000002,
sl@0
   358
sl@0
   359
	/**
sl@0
   360
	Each data unit contains an integer number of video segments consecutive in decoding order, 
sl@0
   361
	possibly more than one. The video segments shall be a subset of one coded picture.
sl@0
   362
	*/
sl@0
   363
	EDuSeveralSegments		= 0x00000004,
sl@0
   364
sl@0
   365
	/**
sl@0
   366
	Each data unit contains a piece of raw video bitstream, not necessarily aligned at any headers. 
sl@0
   367
	The data must be written in decoding order. This data unit type can be used for playback if the client 
sl@0
   368
	does not have information about the bitstream syntax, and just writes data in random-sized chunks. For 
sl@0
   369
	recording this data unit type is useful if the client can handle arbitrarily split data units, giving the
sl@0
   370
	encoder maximum flexibility in buffer allocation. For encoded data output, each data unit must still 
sl@0
   371
	belong to exactly one output picture.
sl@0
   372
	*/
sl@0
   373
	EDuArbitraryStreamSection = 0x00000008
sl@0
   374
	};
sl@0
   375
sl@0
   376
/**
sl@0
   377
Defines possible encapsulation types for coded video data units. The encapsulation information is 
sl@0
   378
used both for encoded video input for playback as well as encoded video output from recording.
sl@0
   379
@publishedAll
sl@0
   380
@released
sl@0
   381
*/
sl@0
   382
enum TVideoDataUnitEncapsulation
sl@0
   383
	{
sl@0
   384
	/**
sl@0
   385
	The coded data units can be chained in a bitstream that can be decoded. For example, MPEG-4 
sl@0
   386
	Part 2 elementary streams, H.263 bitstreams, and H.264 | MPEG-4 AVC Annex B bitstreams fall into this category.
sl@0
   387
	*/
sl@0
   388
	EDuElementaryStream = 0x00010000,
sl@0
   389
sl@0
   390
	/**
sl@0
   391
	The coded data units are encapsulated in a general-purpose packet payload format whose coded 
sl@0
   392
	data units can be decoded independently but cannot be generally chained into a bitstream. 
sl@0
   393
	For example, the Network Abstraction Layer Units of H.264 | MPEG-4 AVC fall into this category. 
sl@0
   394
	*/
sl@0
   395
	EDuGenericPayload   = 0x00020000,
sl@0
   396
sl@0
   397
	/**
sl@0
   398
	The coded data units are encapsulated in RTP packet payload format. The RTP payload header 
sl@0
   399
	may contain codec-specific items, such as a redundant copy of a picture header in the H.263 
sl@0
   400
	payload specification RFC2429.
sl@0
   401
	*/
sl@0
   402
	EDuRtpPayload	   = 0x00040000
sl@0
   403
	};
sl@0
   404
sl@0
   405
/**
sl@0
   406
Defines the HRD/VBV specification used in a stream.
sl@0
   407
@publishedAll
sl@0
   408
@released
sl@0
   409
*/
sl@0
   410
enum THrdVbvSpecification
sl@0
   411
	{
sl@0
   412
	/** No HRD/VBV specification. */
sl@0
   413
	EHrdVbvNone		   = 0x00000001,
sl@0
   414
sl@0
   415
	/** The HRD/VBV specification in the corresponding coding standard. */
sl@0
   416
	EHrdVbvCodingStandard = 0x00000002,
sl@0
   417
sl@0
   418
	/** Annex G of 3GPP TS 26.234 Release 5. */
sl@0
   419
	EHrdVbv3GPP		   = 0x00000004
sl@0
   420
	};
sl@0
   421
sl@0
   422
/**
sl@0
   423
Defines the pre-processor and post-processor types available in the system. 
sl@0
   424
One pre-processor or post-processor can implement multiple operations simultaneously, and thus the 
sl@0
   425
types are defined as bit values that can be combined as a bitfield.
sl@0
   426
@publishedAll
sl@0
   427
@released
sl@0
   428
*/
sl@0
   429
enum TPrePostProcessType
sl@0
   430
	{
sl@0
   431
	/**
sl@0
   432
	Input cropping, used for pan-scan cropping in video playback and digital zoom in video recording. 
sl@0
   433
	Pan-scan cropping is useful, for example, for displaying arbitrary-sized pictures with codecs that 
sl@0
   434
	only support image dimensions that are multiples of 16 pixels.
sl@0
   435
	*/
sl@0
   436
	EPpInputCrop =		0x00000001,
sl@0
   437
sl@0
   438
	/**
sl@0
   439
	Horizontal mirroring, flips the image data around a vertical line in its center.
sl@0
   440
	*/
sl@0
   441
	EPpMirror =		   0x00000002,
sl@0
   442
sl@0
   443
	/**
sl@0
   444
	Picture rotation, supports rotation by 90 or 180 degrees, clockwise and anticlockwise.
sl@0
   445
	*/
sl@0
   446
	EPpRotate =		   0x00000004,
sl@0
   447
sl@0
   448
	/**
sl@0
   449
	Picture scaling to a new size, includes both upscaling and downscaling. 
sl@0
   450
	The supported scaling types and scale factors depend on the pixel processor.
sl@0
   451
	*/
sl@0
   452
	EPpScale =			0x00000008,
sl@0
   453
sl@0
   454
	/**
sl@0
   455
	Crops the picture to a final output rectangle.
sl@0
   456
	*/
sl@0
   457
	EPpOutputCrop =	   0x00000010,
sl@0
   458
sl@0
   459
	/**
sl@0
   460
	Pads the output picture to a defined size. Used in video recording to pad pictures to 
sl@0
   461
	suit the encoder input requirements.
sl@0
   462
	*/
sl@0
   463
	EPpOutputPad =		0x00000020,
sl@0
   464
sl@0
   465
	/**
sl@0
   466
	YUV to RGB color space conversion. Supported only for video playback.
sl@0
   467
	*/
sl@0
   468
	EPpYuvToRgb =		 0x00000040,
sl@0
   469
sl@0
   470
	/**
sl@0
   471
	RGB to YUV color space conversion. Supported only for video recording.
sl@0
   472
	*/
sl@0
   473
	EPpRgbToYuv =		 0x00000080,
sl@0
   474
sl@0
   475
	/**
sl@0
   476
	YUV to YUV data format conversion. Supported only for video recording.
sl@0
   477
	*/
sl@0
   478
	EPpYuvToYuv =		 0x00000100,
sl@0
   479
sl@0
   480
	/**
sl@0
   481
	Noise filtering. Noise filtering is typically used to enhance the input 
sl@0
   482
	picture from the camera, and is usually only supported for video recording.
sl@0
   483
	*/
sl@0
   484
	EPpNoiseFilter =	  0x00000200,
sl@0
   485
sl@0
   486
	/**
sl@0
   487
	Color enhancement.  Color enhancement is typically used to enhance the input picture 
sl@0
   488
	from the camera, and is usually only supported for video recording.
sl@0
   489
	*/
sl@0
   490
	EPpColorEnhancement = 0x00000400,
sl@0
   491
sl@0
   492
	/**
sl@0
   493
	Frame stabilisation. Supported only for video recording.
sl@0
   494
	*/
sl@0
   495
	EPpFrameStabilisation = 0x00000800,
sl@0
   496
	
sl@0
   497
	/**
sl@0
   498
	Deblocking is typically used to remove artefacts from the output picture that result from 
sl@0
   499
	high compression or a noisy input signal. Only supported for video playback.
sl@0
   500
	*/
sl@0
   501
    EPpDeblocking =         0x00001000,
sl@0
   502
sl@0
   503
	/**
sl@0
   504
	Deringing is typically used to remove artefacts from the output picture that result from 
sl@0
   505
	a noisy input signal corrupting motion estimates. Only supported for video playback.
sl@0
   506
	*/
sl@0
   507
    EPpDeringing =          0x00002000,
sl@0
   508
 
sl@0
   509
	/**
sl@0
   510
	Custom hardware device specific processing.
sl@0
   511
	*/
sl@0
   512
	EPpCustom =		   0x10000000
sl@0
   513
	};
sl@0
   514
sl@0
   515
/**
sl@0
   516
Dithering types.
sl@0
   517
@publishedAll
sl@0
   518
@released
sl@0
   519
*/
sl@0
   520
enum TDitherType
sl@0
   521
	{
sl@0
   522
	/** No dithering. */
sl@0
   523
	EDitherNone		   = 0x00000001,
sl@0
   524
sl@0
   525
	/** Ordered dither. */
sl@0
   526
	EDitherOrdered		= 0x00000002,
sl@0
   527
sl@0
   528
	/** Error diffusion dither. */
sl@0
   529
	EDitherErrorDiffusion = 0x00000004,
sl@0
   530
sl@0
   531
	/** Other hardware device specific dithering type. */
sl@0
   532
	EDitherOther		  = 0x00000008
sl@0
   533
	};
sl@0
   534
sl@0
   535
/**
sl@0
   536
Rotation types for pre-processors and post-processors.
sl@0
   537
@publishedAll
sl@0
   538
@released
sl@0
   539
*/
sl@0
   540
enum TRotationType
sl@0
   541
	{
sl@0
   542
	/**	No rotation. */
sl@0
   543
	ERotateNone			   = 0x00000001,
sl@0
   544
sl@0
   545
	/** Rotate the picture 90 degrees clockwise. */
sl@0
   546
	ERotate90Clockwise	 = 0x00000002,
sl@0
   547
sl@0
   548
	/** Rotate the picture 90 degrees anticlockwise. */
sl@0
   549
	ERotate90Anticlockwise = 0x00000004,
sl@0
   550
sl@0
   551
	/** Rotate the picture 180 degrees. */
sl@0
   552
	ERotate180			 = 0x00000008
sl@0
   553
	};
sl@0
   554
sl@0
   555
sl@0
   556
sl@0
   557
/**
sl@0
   558
Defines possible encoding bit-rate control modes.
sl@0
   559
@publishedAll
sl@0
   560
@released
sl@0
   561
*/
sl@0
   562
enum TBitrateControlType
sl@0
   563
	{
sl@0
   564
	/**
sl@0
   565
	The encoder does not control the bit-rate, but uses specified target picture quality and picture 
sl@0
   566
	rate as such. The coded data stream must still remain compliant with the standard and buffer settings 
sl@0
   567
	in use, if any, and thus HRD/VBV settings can limit the possible bit-rate.
sl@0
   568
	*/
sl@0
   569
	EBrControlNone	= 0x00000001,
sl@0
   570
sl@0
   571
	/**
sl@0
   572
	The encoder controls the coded bit-rate of the stream. The caller indicates target bit-rate, target 
sl@0
   573
	picture quality, target frame rate, spatial-temporal trade-off, and latency-quality trade-off.
sl@0
   574
	*/
sl@0
   575
	EBrControlStream  = 0x00000002,
sl@0
   576
sl@0
   577
	/**
sl@0
   578
	The encoder controls the coded bit-rate of each picture. The caller gives the target amount of bits per 
sl@0
   579
	frame. Each given input frame is coded. This type of operation is applicable only in memory-buffer-based 
sl@0
   580
	input.
sl@0
   581
	*/
sl@0
   582
	EBrControlPicture = 0x00000004
sl@0
   583
	};
sl@0
   584
sl@0
   585
sl@0
   586
/**
sl@0
   587
Defines the scalability type for a single bit-rate scalability layer.
sl@0
   588
@publishedAll
sl@0
   589
@released
sl@0
   590
*/
sl@0
   591
enum TScalabilityType
sl@0
   592
	{
sl@0
   593
	/**
sl@0
   594
	The layer uses temporal scalability. Using the layer increases the picture rate.
sl@0
   595
	*/
sl@0
   596
	EScalabilityTemporal		= 0x00000001,
sl@0
   597
sl@0
   598
	/**
sl@0
   599
	The layer uses quality scalability. Using the layer improves picture quality.
sl@0
   600
	*/
sl@0
   601
	EScalabilityQuality		 = 0x00000002,
sl@0
   602
sl@0
   603
	/**
sl@0
   604
	The layer uses spatial scalability. Using the layer increases picture resolution.
sl@0
   605
	*/
sl@0
   606
	EScalabilitySpatial		 = 0x00000004,
sl@0
   607
sl@0
   608
	/**
sl@0
   609
	The layer is a fine-granularity scalability layer. In fine granularity scalability, the output 
sl@0
   610
	quality increases gradually as a function of decoded bits from the enhancement layer.
sl@0
   611
	*/
sl@0
   612
	EScalabilityFineGranularity = 0x10000000,
sl@0
   613
sl@0
   614
	/**
sl@0
   615
	The layer is a fine-granularity quality scalability layer.
sl@0
   616
	*/
sl@0
   617
	EScalabilityQualityFG	   = EScalabilityFineGranularity | EScalabilityQuality
sl@0
   618
	};
sl@0
   619
sl@0
   620
/**
sl@0
   621
Forward error control strength used for an unequal error protection level. Also other values between 
sl@0
   622
EFecStrengthNone and EFecStrengthHigh can be used, the encoder will round the values to the levels 
sl@0
   623
it supports.
sl@0
   624
@publishedAll
sl@0
   625
@released
sl@0
   626
*/
sl@0
   627
enum TErrorControlStrength
sl@0
   628
	{
sl@0
   629
	/** No error control. */
sl@0
   630
	EFecStrengthNone = 0,
sl@0
   631
sl@0
   632
	/** Low error control strength. */
sl@0
   633
	EFecStrengthLow = 256,
sl@0
   634
sl@0
   635
	/** Normal error control strength. */
sl@0
   636
	EFecStrengthNormal = 512,
sl@0
   637
sl@0
   638
	/** High error control strength. */
sl@0
   639
	EFecStrengthHigh = 768
sl@0
   640
	};
sl@0
   641
sl@0
   642
/**
sl@0
   643
Defines the scalability type for in-layer bit-rate scalability.
sl@0
   644
@publishedAll
sl@0
   645
@released
sl@0
   646
*/
sl@0
   647
enum TInLayerScalabilityType
sl@0
   648
	{
sl@0
   649
	/** Temporal scalability, such as B-pictures. */
sl@0
   650
	EInLScalabilityTemporal = 1,
sl@0
   651
sl@0
   652
	/** Other scalability type. */
sl@0
   653
	EInLScalabilityOther
sl@0
   654
	};
sl@0
   655
sl@0
   656
/**
sl@0
   657
Defines what part of a frame is contained within a video buffer.
sl@0
   658
@publishedAll
sl@0
   659
@released
sl@0
   660
*/
sl@0
   661
enum TFramePortion
sl@0
   662
	{
sl@0
   663
	/** The frame portion is unknown. */
sl@0
   664
	EFramePortionUnknown,
sl@0
   665
sl@0
   666
	/** An entire frame. */
sl@0
   667
	EFramePortionWhole,
sl@0
   668
sl@0
   669
	/** A fragment of a frame containing the start but not the end. */
sl@0
   670
	EFramePortionStartFragment,
sl@0
   671
sl@0
   672
	/** An fragment of a frame containing neither the start nor the end. */
sl@0
   673
	EFramePortionMidFragment,
sl@0
   674
sl@0
   675
	/** A fragment of a frame containing the end but not the start. */
sl@0
   676
	EFramePortionEndFragment
sl@0
   677
	};
sl@0
   678
sl@0
   679
#endif
sl@0
   680