epoc32/include/mmf/devvideo/devvideoconstants.h
author William Roberts <williamr@symbian.org>
Wed, 31 Mar 2010 12:33:34 +0100
branchSymbian3
changeset 4 837f303aceeb
parent 2 2fe1408b6811
permissions -rw-r--r--
Current Symbian^3 public API header files (from PDK 3.0.h)
This is the epoc32/include tree with the "platform" subtrees removed, and
all but a selected few mbg and rsg files removed.
williamr@2
     1
// Copyright (c) 2003-2009 Nokia Corporation and/or its subsidiary(-ies).
williamr@2
     2
// All rights reserved.
williamr@2
     3
// This component and the accompanying materials are made available
williamr@4
     4
// under the terms of "Eclipse Public License v1.0"
williamr@2
     5
// which accompanies this distribution, and is available
williamr@4
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
williamr@2
     7
//
williamr@2
     8
// Initial Contributors:
williamr@2
     9
// Nokia Corporation - initial contribution.
williamr@2
    10
//
williamr@2
    11
// Contributors:
williamr@2
    12
//
williamr@2
    13
// Description:
williamr@2
    14
//
williamr@2
    15
williamr@2
    16
#ifndef __DEVVIDEOCONSTANTS_H__
williamr@2
    17
#define __DEVVIDEOCONSTANTS_H__
williamr@2
    18
williamr@2
    19
#include <e32base.h>
williamr@2
    20
#include <mmf/devvideo/devvideoplugininterfaceuids.hrh>
williamr@2
    21
#include <mm/conversioncoefficient.h>
williamr@2
    22
williamr@2
    23
/**
williamr@2
    24
DevVideo Panic Category
williamr@2
    25
williamr@2
    26
@publishedAll
williamr@2
    27
@released
williamr@2
    28
*/
williamr@2
    29
_LIT(KDevVideoPanicCategory, "DevVideo");
williamr@2
    30
williamr@2
    31
/**
williamr@2
    32
DevVideo Panic Codes
williamr@2
    33
williamr@2
    34
@publishedAll
williamr@2
    35
@released
williamr@2
    36
*/
williamr@2
    37
enum TDevVideoPanicCodes
williamr@2
    38
	{
williamr@2
    39
	/**
williamr@2
    40
	A pre-condition on a method has been violated.
williamr@2
    41
	*/
williamr@2
    42
	EDevVideoPanicPreConditionViolation = 1,
williamr@2
    43
	/**
williamr@2
    44
	A post-condition on a method has been violated.
williamr@2
    45
	*/
williamr@2
    46
	EDevVideoPanicPostConditionViolation = 2,
williamr@2
    47
	/**
williamr@2
    48
	An invalid hardware device ID has been supplied.
williamr@2
    49
	*/
williamr@2
    50
	EDevVideoPanicInvalidHwDeviceId = 3
williamr@2
    51
	};
williamr@2
    52
williamr@2
    53
williamr@2
    54
// DevVideo Plugin Interface UIDs
williamr@2
    55
williamr@2
    56
/** Video Decoder HW Device Plugin Interface UID 
williamr@2
    57
@publishedAll
williamr@2
    58
@released
williamr@2
    59
*/
williamr@2
    60
const TUid KUidDevVideoDecoderHwDevice = {KUidDevVideoDecoderHwDeviceDefine};
williamr@2
    61
williamr@2
    62
/** Video Post Processor HW Device Plugin Interface UID
williamr@2
    63
@publishedAll
williamr@2
    64
@released
williamr@2
    65
*/
williamr@2
    66
const TUid KUidDevVideoPostProcessorHwDevice = {KUidDevVideoPostProcessorHwDeviceDefine};
williamr@2
    67
williamr@2
    68
/** Video Encoder HW Device Plugin Interface UID 
williamr@2
    69
@publishedAll
williamr@2
    70
@released
williamr@2
    71
*/
williamr@2
    72
const TUid KUidDevVideoEncoderHwDevice = {KUidDevVideoEncoderHwDeviceDefine};
williamr@2
    73
williamr@2
    74
/** Video Pre Processor HW Device Plugin Interface UID
williamr@2
    75
@publishedAll
williamr@2
    76
@released
williamr@2
    77
*/
williamr@2
    78
const TUid KUidDevVideoPreProcessorHwDevice = {KUidDevVideoPreProcessorHwDeviceDefine};
williamr@2
    79
williamr@2
    80
// DevVideo Custom Interface Uids
williamr@2
    81
williamr@2
    82
/** MMMFVideoPlayHwDeviceExtensionScanCopy Custom Interface UID
williamr@2
    83
@publishedAll
williamr@2
    84
@released
williamr@2
    85
*/
williamr@2
    86
const TUid KUidDevVideoPlayHwDeviceExtensionScanCopy = {KUidDevVideoPlayHwDeviceExScanCopyDefine};
williamr@2
    87
williamr@2
    88
/** 
williamr@2
    89
Picture frame rate constants
williamr@2
    90
williamr@2
    91
Using these constants is recommended when the picture rate is known to match 
williamr@2
    92
one of them, to ensure that floating point equality comparisons work as expected.
williamr@2
    93
williamr@2
    94
Note that the MSL video APIs currently only deal with non-interlaced frames.  For interlaced 
williamr@2
    95
video, all references to the term "picture" should be considered to refer to complete frames. 
williamr@2
    96
As such, the term "picture rate" here refers to the frame rate for interlaced video.
williamr@2
    97
williamr@2
    98
@publishedAll
williamr@2
    99
@released
williamr@2
   100
*/
williamr@2
   101
const TReal KPictureRate5 = 5.0;
williamr@2
   102
const TReal KPictureRate75 = 7.5;
williamr@2
   103
const TReal KPictureRate10 = 10.0;
williamr@2
   104
const TReal KPictureRate15 = 15.0;
williamr@2
   105
const TReal KPictureRateNTSC24 = 23.97602397602398; // == 24000/1001
williamr@2
   106
const TReal KPictureRate25 = 25.0;
williamr@2
   107
const TReal KPictureRateNTSC30 = 29.97002997002997; // == 30000/1001
williamr@2
   108
const TReal KPictureRate30 = 30.0;
williamr@2
   109
williamr@2
   110
williamr@2
   111
/**
williamr@2
   112
Specifies the data format used for an uncompressed picture. 
williamr@2
   113
The values are bit patterns that can be combined with other format definition constants.
williamr@2
   114
williamr@2
   115
@publishedAll
williamr@2
   116
@released
williamr@2
   117
*/
williamr@2
   118
enum TImageDataFormat
williamr@2
   119
	{
williamr@2
   120
	/** Raw RGB picture data in a memory area.
williamr@2
   121
	*/
williamr@2
   122
	ERgbRawData		= 0x01000000,
williamr@2
   123
	/** RGB picture data stored in a Symbian OS CFbsBitmap object.
williamr@2
   124
	*/
williamr@2
   125
	ERgbFbsBitmap	  = 0x02000000,
williamr@2
   126
	/** Raw YUV picture data stored in a memory area. The data storage 
williamr@2
   127
	format depends on the YUV sampling pattern and data layout used.
williamr@2
   128
	*/
williamr@4
   129
	EYuvRawData		= 0x04000000,
williamr@4
   130
	
williamr@4
   131
	/** Picture stored in a surface buffer.
williamr@4
   132
	 @See MMmfVideoSurfaceHandleControl::MmvshcSetSurfaceHandle
williamr@4
   133
	*/
williamr@4
   134
	ESurfaceBuffer = 0x08000000
williamr@2
   135
	};
williamr@2
   136
williamr@2
   137
williamr@2
   138
/**
williamr@2
   139
RGB uncompressed image format alternatives.
williamr@2
   140
@publishedAll
williamr@2
   141
@released
williamr@2
   142
*/
williamr@2
   143
enum TRgbFormat 
williamr@2
   144
	{
williamr@2
   145
	/**
williamr@2
   146
	16-bit RGB data format with four pixels per component. 
williamr@2
   147
	The data format is the same as used in Symbian EColor4K bitmaps, 
williamr@2
   148
	with each pixel using two bytes with the bit layout [ggggbbbb xxxxrrrr]
williamr@2
   149
	where "x" indicates unused bits. (This corresponds to "XRGB" 16-bit little-endian halfwords)
williamr@2
   150
	*/
williamr@2
   151
	ERgb16bit444	   = ERgbRawData   | 0x00000001,
williamr@2
   152
williamr@2
   153
	/**
williamr@2
   154
	16-bit RGB data format with five bits per component for red and blue and 
williamr@2
   155
	six bits for green. The data format is the same as used in Symbian EColor64K bitmaps, 
williamr@2
   156
	with each pixel using two bytes with the bit layout [gggbbbbb rrrrrggg]
williamr@2
   157
	(This corresponds to "RGB" 16-bit little-endian halfwords)
williamr@2
   158
	*/
williamr@2
   159
	ERgb16bit565	   = ERgbRawData   | 0x00000002,
williamr@2
   160
williamr@2
   161
	/**
williamr@2
   162
	32-bit RGB data format with eight bits per component. 
williamr@2
   163
	This data format is the same as is used in Symbian EColor16MU bitmaps. The bit layout is
williamr@2
   164
	[bbbbbbbb gggggggg rrrrrrrr xxxxxxxx] where "x" indicates unused bits. 
williamr@2
   165
	(This corresponds to "XRGB" 32-bit little-endian words)
williamr@2
   166
	*/
williamr@2
   167
	ERgb32bit888	   = ERgbRawData   | 0x00000004,
williamr@2
   168
williamr@2
   169
	/**
williamr@2
   170
	CFbsBitmap object with EColor4K data format.
williamr@2
   171
	*/
williamr@2
   172
	EFbsBitmapColor4K  = ERgbFbsBitmap | 0x00000001,
williamr@2
   173
williamr@2
   174
 	/**
williamr@2
   175
	CFbsBitmap object with EColor64K data format.
williamr@2
   176
	*/
williamr@2
   177
	EFbsBitmapColor64K = ERgbFbsBitmap | 0x00000002,
williamr@2
   178
williamr@2
   179
	/**
williamr@2
   180
	CFbsBitmap object with EColor16M data format.
williamr@2
   181
	*/
williamr@2
   182
	EFbsBitmapColor16M = ERgbFbsBitmap | 0x00000004,
williamr@2
   183
williamr@2
   184
	/**
williamr@2
   185
	CFbsBitmap object with EColor16MU data format.
williamr@2
   186
	*/
williamr@2
   187
	EFbsBitmapColor16MU = ERgbFbsBitmap | 0x00000008
williamr@2
   188
	};
williamr@2
   189
williamr@2
   190
williamr@2
   191
/**
williamr@2
   192
YUV (YCbCr) uncompressed image data sampling pattern.
williamr@2
   193
@publishedAll
williamr@2
   194
@released
williamr@2
   195
*/
williamr@2
   196
enum TYuvSamplingPattern 
williamr@2
   197
	{
williamr@2
   198
	/**
williamr@2
   199
	4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. 
williamr@2
   200
	The four luminance sample positions are on the corners of a square. The chrominance sample position 
williamr@2
   201
	is vertically half-way of the luminance sample positions and horizontally aligned with the left 
williamr@2
   202
	side of the square. This is the MPEG-2 and the MPEG-4 Part 2 sampling pattern.
williamr@2
   203
	*/
williamr@2
   204
	EYuv420Chroma1 = 0x00000001,
williamr@2
   205
williamr@2
   206
	/**
williamr@2
   207
	4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. 
williamr@2
   208
	The four luminance sample positions are on the corners of a square. The chrominance sample position 
williamr@2
   209
	is vertically and horizontally in the middle of the luminance sample positions. This is the MPEG-1 sampling pattern.
williamr@2
   210
	*/
williamr@2
   211
	EYuv420Chroma2 = 0x00000002,
williamr@2
   212
williamr@2
   213
	/**
williamr@2
   214
	4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. 
williamr@2
   215
	The four luminance sample positions are on the corners of a square. The chrominance sample position 
williamr@2
   216
	colocates with the top-left corner of the square. This sampling format is one of the options in Annex E of H.264 | MPEG-4 AVC.
williamr@2
   217
	*/
williamr@2
   218
	EYuv420Chroma3 = 0x00000004,
williamr@2
   219
williamr@2
   220
	/**
williamr@2
   221
	4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position. 
williamr@2
   222
	The luminance sample positions reside on the same pixel row. The chrominance sample position is co-located 
williamr@2
   223
	with the left one of the luminance sample positions. This is the MPEG-2 4:2:2 sampling pattern.
williamr@2
   224
	*/
williamr@2
   225
	EYuv422Chroma1 = 0x00000008,
williamr@2
   226
williamr@2
   227
	/**
williamr@2
   228
	4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position. 
williamr@2
   229
	The luminance sample positions reside on the same pixel row. The chrominance sample position is in the 
williamr@2
   230
	middle of the luminance sample positions. This is the MPEG-1 4:2:2 sampling pattern.
williamr@2
   231
	*/
williamr@2
   232
	EYuv422Chroma2 = 0x00000010
williamr@2
   233
	};
williamr@2
   234
williamr@2
   235
williamr@2
   236
/**
williamr@2
   237
Defines the YUV data layout in a decoded picture.
williamr@2
   238
@publishedAll
williamr@2
   239
@released
williamr@2
   240
*/
williamr@2
   241
enum TYuvDataLayout
williamr@2
   242
	{
williamr@2
   243
	/**
williamr@2
   244
	The data is stored in a plane mode. The memory buffer contains first all Y component 
williamr@2
   245
	data for the whole picture, followed by U and V, making the data format Y00Y01Y02Y03...U0...V0... 
williamr@2
   246
	For YUV 4:2:0 data, this is the same data format as EFormatYUV420Planar in the Onboard Camera API
williamr@2
   247
	*/
williamr@2
   248
	EYuvDataPlanar		= 0x00000001,
williamr@2
   249
williamr@2
   250
	/**
williamr@2
   251
	The data is stored interleaved mode, all components interleaved in a single memory block. 
williamr@2
   252
	Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is Y1VY0U, 
williamr@2
   253
	corresponding to "UY0VY1" little-endian 32-bit words. 
williamr@2
   254
	This is the same data format as EFormatYUV422Reversed in the Onboard Camera API
williamr@2
   255
	*/
williamr@2
   256
	EYuvDataInterleavedLE = 0x00000002,
williamr@2
   257
williamr@2
   258
	/**
williamr@2
   259
	The data is stored interleaved mode, all components interleaved in a single memory block. 
williamr@2
   260
	Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is UY0VY1, 
williamr@2
   261
	corresponding to "UY0VY1" big-endian 32-bit words. 
williamr@2
   262
	This is the same data format as EFormatYUV422 in the Onboard Camera API
williamr@2
   263
	*/
williamr@2
   264
	EYuvDataInterleavedBE = 0x00000004,
williamr@2
   265
	/**
williamr@2
   266
	The data is stored in a semi-planar mode. The memory buffer contains first all Y component 
williamr@2
   267
	data for the whole picture, followed by U and V components, which are interlaced, making the data 
williamr@2
   268
	format Y00Y01Y02Y03...U0V0U1V1... For YUV 4:2:0 data, this is the same data format as 
williamr@2
   269
	FormatYUV420SemiPlanar in the Onboard Camera API
williamr@2
   270
	*/
williamr@2
   271
	EYuvDataSemiPlanar = 0x00000008
williamr@2
   272
	};
williamr@2
   273
williamr@2
   274
/**
williamr@2
   275
Defines the picture effect used for an input picture. Please refer to ITU-T H.264 | ISO/IEC MPEG-4 AVC [] for the definitions of the transition effects.
williamr@2
   276
@publishedAll
williamr@2
   277
@released
williamr@2
   278
*/
williamr@2
   279
enum TPictureEffect
williamr@2
   280
	{
williamr@2
   281
	/**
williamr@2
   282
	No effect.
williamr@2
   283
	*/
williamr@2
   284
	EEffectNone          					= 0x00000001,
williamr@2
   285
williamr@2
   286
	/**
williamr@2
   287
	Fade from black.
williamr@2
   288
	*/
williamr@2
   289
	EEffectFadeFromBlack 					= 0x00000002,
williamr@2
   290
williamr@2
   291
	/**
williamr@2
   292
	Fade to black.
williamr@2
   293
	*/
williamr@2
   294
	EEffectFadeToBlack   					= 0x00000004,
williamr@2
   295
williamr@2
   296
	/**
williamr@2
   297
	Unspecified transition from or to constant colour.
williamr@2
   298
	*/
williamr@2
   299
	EEffectUnspecifiedThroughConstantColor 	= 0x00000008,
williamr@2
   300
williamr@2
   301
	/**
williamr@2
   302
	Dissolve.
williamr@2
   303
	*/
williamr@2
   304
	EEffectDissolve   						= 0x00000010,
williamr@2
   305
williamr@2
   306
	/**
williamr@2
   307
	Wipe.
williamr@2
   308
	*/
williamr@2
   309
	EEffectWipe   							= 0x00000020,
williamr@2
   310
williamr@2
   311
	/**
williamr@2
   312
	Unspecified mixture of two scenes.
williamr@2
   313
	*/
williamr@2
   314
	EEffectUnspecifiedMixOfTwoScenes  		= 0x00000040
williamr@2
   315
	};
williamr@2
   316
williamr@2
   317
/**
williamr@2
   318
Defines the data value range used for RGB data. Used for determining the correct color space conversion factors. 
williamr@2
   319
@publishedAll
williamr@2
   320
@released
williamr@2
   321
*/
williamr@2
   322
enum TRgbRange
williamr@2
   323
	{
williamr@2
   324
	/**
williamr@2
   325
	The RGB data uses the full 8-bit range of [0…255].
williamr@2
   326
	*/
williamr@2
   327
	ERgbRangeFull	= 0x00000001,
williamr@2
   328
williamr@2
   329
	/**
williamr@2
   330
	The RGB data uses the nominal range of [16…235]. Individual samples can still contain 
williamr@2
   331
	values beyond that range, the rest of the 8-bit range is used for headroom and footroom.
williamr@2
   332
	*/
williamr@2
   333
	ERgbRange16to235 = 0x00000002
williamr@2
   334
	};
williamr@2
   335
williamr@2
   336
williamr@2
   337
williamr@2
   338
/**
williamr@2
   339
Defines possible data unit types for encoded video data. The data unit types are used both 
williamr@2
   340
for encoded video input for playback as well as encoded video output from recording.
williamr@2
   341
@publishedAll
williamr@2
   342
@released
williamr@2
   343
*/
williamr@2
   344
enum TVideoDataUnitType
williamr@2
   345
	{
williamr@2
   346
	/**
williamr@2
   347
	Each data unit is a single coded picture.
williamr@2
   348
	*/
williamr@2
   349
	EDuCodedPicture		   = 0x00000001,
williamr@2
   350
williamr@2
   351
	/**
williamr@2
   352
	Each data unit is a coded video segment.  
williamr@2
   353
	A coded video segment is a part of the coded video data that forms an independently 
williamr@2
   354
	decodable part of a coded video frame. For example, a video packet in MPEG-4 Part 2 
williamr@2
   355
	and slice in H.263 are coded video segments.
williamr@2
   356
	*/
williamr@2
   357
	EDuVideoSegment		   = 0x00000002,
williamr@2
   358
williamr@2
   359
	/**
williamr@2
   360
	Each data unit contains an integer number of video segments consecutive in decoding order, 
williamr@2
   361
	possibly more than one. The video segments shall be a subset of one coded picture.
williamr@2
   362
	*/
williamr@2
   363
	EDuSeveralSegments		= 0x00000004,
williamr@2
   364
williamr@2
   365
	/**
williamr@2
   366
	Each data unit contains a piece of raw video bitstream, not necessarily aligned at any headers. 
williamr@2
   367
	The data must be written in decoding order. This data unit type can be used for playback if the client 
williamr@2
   368
	does not have information about the bitstream syntax, and just writes data in random-sized chunks. For 
williamr@2
   369
	recording this data unit type is useful if the client can handle arbitrarily split data units, giving the
williamr@2
   370
	encoder maximum flexibility in buffer allocation. For encoded data output, each data unit must still 
williamr@2
   371
	belong to exactly one output picture.
williamr@2
   372
	*/
williamr@2
   373
	EDuArbitraryStreamSection = 0x00000008
williamr@2
   374
	};
williamr@2
   375
williamr@2
   376
/**
williamr@2
   377
Defines possible encapsulation types for coded video data units. The encapsulation information is 
williamr@2
   378
used both for encoded video input for playback as well as encoded video output from recording.
williamr@2
   379
@publishedAll
williamr@2
   380
@released
williamr@2
   381
*/
williamr@2
   382
enum TVideoDataUnitEncapsulation
williamr@2
   383
	{
williamr@2
   384
	/**
williamr@2
   385
	The coded data units can be chained in a bitstream that can be decoded. For example, MPEG-4 
williamr@2
   386
	Part 2 elementary streams, H.263 bitstreams, and H.264 | MPEG-4 AVC Annex B bitstreams fall into this category.
williamr@2
   387
	*/
williamr@2
   388
	EDuElementaryStream = 0x00010000,
williamr@2
   389
williamr@2
   390
	/**
williamr@2
   391
	The coded data units are encapsulated in a general-purpose packet payload format whose coded 
williamr@2
   392
	data units can be decoded independently but cannot be generally chained into a bitstream. 
williamr@2
   393
	For example, the Network Abstraction Layer Units of H.264 | MPEG-4 AVC fall into this category. 
williamr@2
   394
	*/
williamr@2
   395
	EDuGenericPayload   = 0x00020000,
williamr@2
   396
williamr@2
   397
	/**
williamr@2
   398
	The coded data units are encapsulated in RTP packet payload format. The RTP payload header 
williamr@2
   399
	may contain codec-specific items, such as a redundant copy of a picture header in the H.263 
williamr@2
   400
	payload specification RFC2429.
williamr@2
   401
	*/
williamr@2
   402
	EDuRtpPayload	   = 0x00040000
williamr@2
   403
	};
williamr@2
   404
williamr@2
   405
/**
williamr@2
   406
Defines the HRD/VBV specification used in a stream.
williamr@2
   407
@publishedAll
williamr@2
   408
@released
williamr@2
   409
*/
williamr@2
   410
enum THrdVbvSpecification
williamr@2
   411
	{
williamr@2
   412
	/** No HRD/VBV specification. */
williamr@2
   413
	EHrdVbvNone		   = 0x00000001,
williamr@2
   414
williamr@2
   415
	/** The HRD/VBV specification in the corresponding coding standard. */
williamr@2
   416
	EHrdVbvCodingStandard = 0x00000002,
williamr@2
   417
williamr@2
   418
	/** Annex G of 3GPP TS 26.234 Release 5. */
williamr@2
   419
	EHrdVbv3GPP		   = 0x00000004
williamr@2
   420
	};
williamr@2
   421
williamr@2
   422
/**
williamr@2
   423
Defines the pre-processor and post-processor types available in the system. 
williamr@2
   424
One pre-processor or post-processor can implement multiple operations simultaneously, and thus the 
williamr@2
   425
types are defined as bit values that can be combined as a bitfield.
williamr@2
   426
@publishedAll
williamr@2
   427
@released
williamr@2
   428
*/
williamr@2
   429
enum TPrePostProcessType
williamr@2
   430
	{
williamr@2
   431
	/**
williamr@2
   432
	Input cropping, used for pan-scan cropping in video playback and digital zoom in video recording. 
williamr@2
   433
	Pan-scan cropping is useful, for example, for displaying arbitrary-sized pictures with codecs that 
williamr@2
   434
	only support image dimensions that are multiples of 16 pixels.
williamr@2
   435
	*/
williamr@2
   436
	EPpInputCrop =		0x00000001,
williamr@2
   437
williamr@2
   438
	/**
williamr@2
   439
	Horizontal mirroring, flips the image data around a vertical line in its center.
williamr@2
   440
	*/
williamr@2
   441
	EPpMirror =		   0x00000002,
williamr@2
   442
williamr@2
   443
	/**
williamr@2
   444
	Picture rotation, supports rotation by 90 or 180 degrees, clockwise and anticlockwise.
williamr@2
   445
	*/
williamr@2
   446
	EPpRotate =		   0x00000004,
williamr@2
   447
williamr@2
   448
	/**
williamr@2
   449
	Picture scaling to a new size, includes both upscaling and downscaling. 
williamr@2
   450
	The supported scaling types and scale factors depend on the pixel processor.
williamr@2
   451
	*/
williamr@2
   452
	EPpScale =			0x00000008,
williamr@2
   453
williamr@2
   454
	/**
williamr@2
   455
	Crops the picture to a final output rectangle.
williamr@2
   456
	*/
williamr@2
   457
	EPpOutputCrop =	   0x00000010,
williamr@2
   458
williamr@2
   459
	/**
williamr@2
   460
	Pads the output picture to a defined size. Used in video recording to pad pictures to 
williamr@2
   461
	suit the encoder input requirements.
williamr@2
   462
	*/
williamr@2
   463
	EPpOutputPad =		0x00000020,
williamr@2
   464
williamr@2
   465
	/**
williamr@2
   466
	YUV to RGB color space conversion. Supported only for video playback.
williamr@2
   467
	*/
williamr@2
   468
	EPpYuvToRgb =		 0x00000040,
williamr@2
   469
williamr@2
   470
	/**
williamr@2
   471
	RGB to YUV color space conversion. Supported only for video recording.
williamr@2
   472
	*/
williamr@2
   473
	EPpRgbToYuv =		 0x00000080,
williamr@2
   474
williamr@2
   475
	/**
williamr@2
   476
	YUV to YUV data format conversion. Supported only for video recording.
williamr@2
   477
	*/
williamr@2
   478
	EPpYuvToYuv =		 0x00000100,
williamr@2
   479
williamr@2
   480
	/**
williamr@2
   481
	Noise filtering. Noise filtering is typically used to enhance the input 
williamr@2
   482
	picture from the camera, and is usually only supported for video recording.
williamr@2
   483
	*/
williamr@2
   484
	EPpNoiseFilter =	  0x00000200,
williamr@2
   485
williamr@2
   486
	/**
williamr@2
   487
	Color enhancement.  Color enhancement is typically used to enhance the input picture 
williamr@2
   488
	from the camera, and is usually only supported for video recording.
williamr@2
   489
	*/
williamr@2
   490
	EPpColorEnhancement = 0x00000400,
williamr@2
   491
williamr@2
   492
	/**
williamr@2
   493
	Frame stabilisation. Supported only for video recording.
williamr@2
   494
	*/
williamr@2
   495
	EPpFrameStabilisation = 0x00000800,
williamr@2
   496
	
williamr@2
   497
	/**
williamr@2
   498
	Deblocking is typically used to remove artefacts from the output picture that result from 
williamr@2
   499
	high compression or a noisy input signal. Only supported for video playback.
williamr@2
   500
	*/
williamr@2
   501
    EPpDeblocking =         0x00001000,
williamr@2
   502
williamr@2
   503
	/**
williamr@2
   504
	Deringing is typically used to remove artefacts from the output picture that result from 
williamr@2
   505
	a noisy input signal corrupting motion estimates. Only supported for video playback.
williamr@2
   506
	*/
williamr@2
   507
    EPpDeringing =          0x00002000,
williamr@2
   508
 
williamr@2
   509
	/**
williamr@2
   510
	Custom hardware device specific processing.
williamr@2
   511
	*/
williamr@2
   512
	EPpCustom =		   0x10000000
williamr@2
   513
	};
williamr@2
   514
williamr@2
   515
/**
williamr@2
   516
Dithering types.
williamr@2
   517
@publishedAll
williamr@2
   518
@released
williamr@2
   519
*/
williamr@2
   520
enum TDitherType
williamr@2
   521
	{
williamr@2
   522
	/** No dithering. */
williamr@2
   523
	EDitherNone		   = 0x00000001,
williamr@2
   524
williamr@2
   525
	/** Ordered dither. */
williamr@2
   526
	EDitherOrdered		= 0x00000002,
williamr@2
   527
williamr@2
   528
	/** Error diffusion dither. */
williamr@2
   529
	EDitherErrorDiffusion = 0x00000004,
williamr@2
   530
williamr@2
   531
	/** Other hardware device specific dithering type. */
williamr@2
   532
	EDitherOther		  = 0x00000008
williamr@2
   533
	};
williamr@2
   534
williamr@2
   535
/**
williamr@2
   536
Rotation types for pre-processors and post-processors.
williamr@2
   537
@publishedAll
williamr@2
   538
@released
williamr@2
   539
*/
williamr@2
   540
enum TRotationType
williamr@2
   541
	{
williamr@2
   542
	/**	No rotation. */
williamr@2
   543
	ERotateNone			   = 0x00000001,
williamr@2
   544
williamr@2
   545
	/** Rotate the picture 90 degrees clockwise. */
williamr@2
   546
	ERotate90Clockwise	 = 0x00000002,
williamr@2
   547
williamr@2
   548
	/** Rotate the picture 90 degrees anticlockwise. */
williamr@2
   549
	ERotate90Anticlockwise = 0x00000004,
williamr@2
   550
williamr@2
   551
	/** Rotate the picture 180 degrees. */
williamr@2
   552
	ERotate180			 = 0x00000008
williamr@2
   553
	};
williamr@2
   554
williamr@2
   555
williamr@2
   556
williamr@2
   557
/**
williamr@2
   558
Defines possible encoding bit-rate control modes.
williamr@2
   559
@publishedAll
williamr@2
   560
@released
williamr@2
   561
*/
williamr@2
   562
enum TBitrateControlType
williamr@2
   563
	{
williamr@2
   564
	/**
williamr@2
   565
	The encoder does not control the bit-rate, but uses specified target picture quality and picture 
williamr@2
   566
	rate as such. The coded data stream must still remain compliant with the standard and buffer settings 
williamr@2
   567
	in use, if any, and thus HRD/VBV settings can limit the possible bit-rate.
williamr@2
   568
	*/
williamr@2
   569
	EBrControlNone	= 0x00000001,
williamr@2
   570
williamr@2
   571
	/**
williamr@2
   572
	The encoder controls the coded bit-rate of the stream. The caller indicates target bit-rate, target 
williamr@2
   573
	picture quality, target frame rate, spatial-temporal trade-off, and latency-quality trade-off.
williamr@2
   574
	*/
williamr@2
   575
	EBrControlStream  = 0x00000002,
williamr@2
   576
williamr@2
   577
	/**
williamr@2
   578
	The encoder controls the coded bit-rate of each picture. The caller gives the target amount of bits per 
williamr@2
   579
	frame. Each given input frame is coded. This type of operation is applicable only in memory-buffer-based 
williamr@2
   580
	input.
williamr@2
   581
	*/
williamr@2
   582
	EBrControlPicture = 0x00000004
williamr@2
   583
	};
williamr@2
   584
williamr@2
   585
williamr@2
   586
/**
williamr@2
   587
Defines the scalability type for a single bit-rate scalability layer.
williamr@2
   588
@publishedAll
williamr@2
   589
@released
williamr@2
   590
*/
williamr@2
   591
enum TScalabilityType
williamr@2
   592
	{
williamr@2
   593
	/**
williamr@2
   594
	The layer uses temporal scalability. Using the layer increases the picture rate.
williamr@2
   595
	*/
williamr@2
   596
	EScalabilityTemporal		= 0x00000001,
williamr@2
   597
williamr@2
   598
	/**
williamr@2
   599
	The layer uses quality scalability. Using the layer improves picture quality.
williamr@2
   600
	*/
williamr@2
   601
	EScalabilityQuality		 = 0x00000002,
williamr@2
   602
williamr@2
   603
	/**
williamr@2
   604
	The layer uses spatial scalability. Using the layer increases picture resolution.
williamr@2
   605
	*/
williamr@2
   606
	EScalabilitySpatial		 = 0x00000004,
williamr@2
   607
williamr@2
   608
	/**
williamr@2
   609
	The layer is a fine-granularity scalability layer. In fine granularity scalability, the output 
williamr@2
   610
	quality increases gradually as a function of decoded bits from the enhancement layer.
williamr@2
   611
	*/
williamr@2
   612
	EScalabilityFineGranularity = 0x10000000,
williamr@2
   613
williamr@2
   614
	/**
williamr@2
   615
	The layer is a fine-granularity quality scalability layer.
williamr@2
   616
	*/
williamr@2
   617
	EScalabilityQualityFG	   = EScalabilityFineGranularity | EScalabilityQuality
williamr@2
   618
	};
williamr@2
   619
williamr@2
   620
/**
williamr@2
   621
Forward error control strength used for an unequal error protection level. Also other values between 
williamr@2
   622
EFecStrengthNone and EFecStrengthHigh can be used, the encoder will round the values to the levels 
williamr@2
   623
it supports.
williamr@2
   624
@publishedAll
williamr@2
   625
@released
williamr@2
   626
*/
williamr@2
   627
enum TErrorControlStrength
williamr@2
   628
	{
williamr@2
   629
	/** No error control. */
williamr@2
   630
	EFecStrengthNone = 0,
williamr@2
   631
williamr@2
   632
	/** Low error control strength. */
williamr@2
   633
	EFecStrengthLow = 256,
williamr@2
   634
williamr@2
   635
	/** Normal error control strength. */
williamr@2
   636
	EFecStrengthNormal = 512,
williamr@2
   637
williamr@2
   638
	/** High error control strength. */
williamr@2
   639
	EFecStrengthHigh = 768
williamr@2
   640
	};
williamr@2
   641
williamr@2
   642
/**
williamr@2
   643
Defines the scalability type for in-layer bit-rate scalability.
williamr@2
   644
@publishedAll
williamr@2
   645
@released
williamr@2
   646
*/
williamr@2
   647
enum TInLayerScalabilityType
williamr@2
   648
	{
williamr@2
   649
	/** Temporal scalability, such as B-pictures. */
williamr@2
   650
	EInLScalabilityTemporal = 1,
williamr@2
   651
williamr@2
   652
	/** Other scalability type. */
williamr@2
   653
	EInLScalabilityOther
williamr@2
   654
	};
williamr@2
   655
williamr@2
   656
/**
williamr@2
   657
Defines what part of a frame is contained within a video buffer.
williamr@2
   658
@publishedAll
williamr@2
   659
@released
williamr@2
   660
*/
williamr@2
   661
enum TFramePortion
williamr@2
   662
	{
williamr@2
   663
	/** The frame portion is unknown. */
williamr@2
   664
	EFramePortionUnknown,
williamr@2
   665
williamr@2
   666
	/** An entire frame. */
williamr@2
   667
	EFramePortionWhole,
williamr@2
   668
williamr@2
   669
	/** A fragment of a frame containing the start but not the end. */
williamr@2
   670
	EFramePortionStartFragment,
williamr@2
   671
williamr@2
   672
	/** An fragment of a frame containing neither the start nor the end. */
williamr@2
   673
	EFramePortionMidFragment,
williamr@2
   674
williamr@2
   675
	/** A fragment of a frame containing the end but not the start. */
williamr@2
   676
	EFramePortionEndFragment
williamr@2
   677
	};
williamr@2
   678
williamr@2
   679
#endif
williamr@4
   680