os/mm/mmhais/videohai/devvideo/inc/devvideoconstants.h
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/mm/mmhais/videohai/devvideo/inc/devvideoconstants.h	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,680 @@
     1.4 +// Copyright (c) 2003-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +//
    1.18 +
    1.19 +#ifndef __DEVVIDEOCONSTANTS_H__
    1.20 +#define __DEVVIDEOCONSTANTS_H__
    1.21 +
    1.22 +#include <e32base.h>
    1.23 +#include <mmf/devvideo/devvideoplugininterfaceuids.hrh>
    1.24 +#include <mm/conversioncoefficient.h>
    1.25 +
    1.26 +/**
    1.27 +DevVideo Panic Category
    1.28 +
    1.29 +@publishedAll
    1.30 +@released
    1.31 +*/
    1.32 +_LIT(KDevVideoPanicCategory, "DevVideo");
    1.33 +
    1.34 +/**
    1.35 +DevVideo Panic Codes
    1.36 +
    1.37 +@publishedAll
    1.38 +@released
    1.39 +*/
    1.40 +enum TDevVideoPanicCodes
    1.41 +	{
    1.42 +	/**
    1.43 +	A pre-condition on a method has been violated.
    1.44 +	*/
    1.45 +	EDevVideoPanicPreConditionViolation = 1,
    1.46 +	/**
    1.47 +	A post-condition on a method has been violated.
    1.48 +	*/
    1.49 +	EDevVideoPanicPostConditionViolation = 2,
    1.50 +	/**
    1.51 +	An invalid hardware device ID has been supplied.
    1.52 +	*/
    1.53 +	EDevVideoPanicInvalidHwDeviceId = 3
    1.54 +	};
    1.55 +
    1.56 +
    1.57 +// DevVideo Plugin Interface UIDs
    1.58 +
    1.59 +/** Video Decoder HW Device Plugin Interface UID 
    1.60 +@publishedAll
    1.61 +@released
    1.62 +*/
    1.63 +const TUid KUidDevVideoDecoderHwDevice = {KUidDevVideoDecoderHwDeviceDefine};
    1.64 +
    1.65 +/** Video Post Processor HW Device Plugin Interface UID
    1.66 +@publishedAll
    1.67 +@released
    1.68 +*/
    1.69 +const TUid KUidDevVideoPostProcessorHwDevice = {KUidDevVideoPostProcessorHwDeviceDefine};
    1.70 +
    1.71 +/** Video Encoder HW Device Plugin Interface UID 
    1.72 +@publishedAll
    1.73 +@released
    1.74 +*/
    1.75 +const TUid KUidDevVideoEncoderHwDevice = {KUidDevVideoEncoderHwDeviceDefine};
    1.76 +
    1.77 +/** Video Pre Processor HW Device Plugin Interface UID
    1.78 +@publishedAll
    1.79 +@released
    1.80 +*/
    1.81 +const TUid KUidDevVideoPreProcessorHwDevice = {KUidDevVideoPreProcessorHwDeviceDefine};
    1.82 +
    1.83 +// DevVideo Custom Interface Uids
    1.84 +
    1.85 +/** MMMFVideoPlayHwDeviceExtensionScanCopy Custom Interface UID
    1.86 +@publishedAll
    1.87 +@released
    1.88 +*/
    1.89 +const TUid KUidDevVideoPlayHwDeviceExtensionScanCopy = {KUidDevVideoPlayHwDeviceExScanCopyDefine};
    1.90 +
    1.91 +/** 
    1.92 +Picture frame rate constants
    1.93 +
    1.94 +Using these constants is recommended when the picture rate is known to match 
    1.95 +one of them, to ensure that floating point equality comparisons work as expected.
    1.96 +
    1.97 +Note that the MSL video APIs currently only deal with non-interlaced frames.  For interlaced 
    1.98 +video, all references to the term "picture" should be considered to refer to complete frames. 
    1.99 +As such, the term "picture rate" here refers to the frame rate for interlaced video.
   1.100 +
   1.101 +@publishedAll
   1.102 +@released
   1.103 +*/
   1.104 +const TReal KPictureRate5 = 5.0;
   1.105 +const TReal KPictureRate75 = 7.5;
   1.106 +const TReal KPictureRate10 = 10.0;
   1.107 +const TReal KPictureRate15 = 15.0;
   1.108 +const TReal KPictureRateNTSC24 = 23.97602397602398; // == 24000/1001
   1.109 +const TReal KPictureRate25 = 25.0;
   1.110 +const TReal KPictureRateNTSC30 = 29.97002997002997; // == 30000/1001
   1.111 +const TReal KPictureRate30 = 30.0;
   1.112 +
   1.113 +
   1.114 +/**
   1.115 +Specifies the data format used for an uncompressed picture. 
   1.116 +The values are bit patterns that can be combined with other format definition constants.
   1.117 +
   1.118 +@publishedAll
   1.119 +@released
   1.120 +*/
   1.121 +enum TImageDataFormat
   1.122 +	{
   1.123 +	/** Raw RGB picture data in a memory area.
   1.124 +	*/
   1.125 +	ERgbRawData		= 0x01000000,
   1.126 +	/** RGB picture data stored in a Symbian OS CFbsBitmap object.
   1.127 +	*/
   1.128 +	ERgbFbsBitmap	  = 0x02000000,
   1.129 +	/** Raw YUV picture data stored in a memory area. The data storage 
   1.130 +	format depends on the YUV sampling pattern and data layout used.
   1.131 +	*/
   1.132 +	EYuvRawData		= 0x04000000,
   1.133 +	
   1.134 +	/** Picture stored in a surface buffer.
   1.135 +	 @See MMmfVideoSurfaceHandleControl::MmvshcSetSurfaceHandle
   1.136 +	*/
   1.137 +	ESurfaceBuffer = 0x08000000
   1.138 +	};
   1.139 +
   1.140 +
   1.141 +/**
   1.142 +RGB uncompressed image format alternatives.
   1.143 +@publishedAll
   1.144 +@released
   1.145 +*/
   1.146 +enum TRgbFormat 
   1.147 +	{
   1.148 +	/**
   1.149 +	16-bit RGB data format with four pixels per component. 
   1.150 +	The data format is the same as used in Symbian EColor4K bitmaps, 
   1.151 +	with each pixel using two bytes with the bit layout [ggggbbbb xxxxrrrr]
   1.152 +	where "x" indicates unused bits. (This corresponds to "XRGB" 16-bit little-endian halfwords)
   1.153 +	*/
   1.154 +	ERgb16bit444	   = ERgbRawData   | 0x00000001,
   1.155 +
   1.156 +	/**
   1.157 +	16-bit RGB data format with five bits per component for red and blue and 
   1.158 +	six bits for green. The data format is the same as used in Symbian EColor64K bitmaps, 
   1.159 +	with each pixel using two bytes with the bit layout [gggbbbbb rrrrrggg]
   1.160 +	(This corresponds to "RGB" 16-bit little-endian halfwords)
   1.161 +	*/
   1.162 +	ERgb16bit565	   = ERgbRawData   | 0x00000002,
   1.163 +
   1.164 +	/**
   1.165 +	32-bit RGB data format with eight bits per component. 
   1.166 +	This data format is the same as is used in Symbian EColor16MU bitmaps. The bit layout is
   1.167 +	[bbbbbbbb gggggggg rrrrrrrr xxxxxxxx] where "x" indicates unused bits. 
   1.168 +	(This corresponds to "XRGB" 32-bit little-endian words)
   1.169 +	*/
   1.170 +	ERgb32bit888	   = ERgbRawData   | 0x00000004,
   1.171 +
   1.172 +	/**
   1.173 +	CFbsBitmap object with EColor4K data format.
   1.174 +	*/
   1.175 +	EFbsBitmapColor4K  = ERgbFbsBitmap | 0x00000001,
   1.176 +
   1.177 + 	/**
   1.178 +	CFbsBitmap object with EColor64K data format.
   1.179 +	*/
   1.180 +	EFbsBitmapColor64K = ERgbFbsBitmap | 0x00000002,
   1.181 +
   1.182 +	/**
   1.183 +	CFbsBitmap object with EColor16M data format.
   1.184 +	*/
   1.185 +	EFbsBitmapColor16M = ERgbFbsBitmap | 0x00000004,
   1.186 +
   1.187 +	/**
   1.188 +	CFbsBitmap object with EColor16MU data format.
   1.189 +	*/
   1.190 +	EFbsBitmapColor16MU = ERgbFbsBitmap | 0x00000008
   1.191 +	};
   1.192 +
   1.193 +
   1.194 +/**
   1.195 +YUV (YCbCr) uncompressed image data sampling pattern.
   1.196 +@publishedAll
   1.197 +@released
   1.198 +*/
   1.199 +enum TYuvSamplingPattern 
   1.200 +	{
   1.201 +	/**
   1.202 +	4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. 
   1.203 +	The four luminance sample positions are on the corners of a square. The chrominance sample position 
   1.204 +	is vertically half-way of the luminance sample positions and horizontally aligned with the left 
   1.205 +	side of the square. This is the MPEG-2 and the MPEG-4 Part 2 sampling pattern.
   1.206 +	*/
   1.207 +	EYuv420Chroma1 = 0x00000001,
   1.208 +
   1.209 +	/**
   1.210 +	4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. 
   1.211 +	The four luminance sample positions are on the corners of a square. The chrominance sample position 
   1.212 +	is vertically and horizontally in the middle of the luminance sample positions. This is the MPEG-1 sampling pattern.
   1.213 +	*/
   1.214 +	EYuv420Chroma2 = 0x00000002,
   1.215 +
   1.216 +	/**
   1.217 +	4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. 
   1.218 +	The four luminance sample positions are on the corners of a square. The chrominance sample position 
   1.219 +	colocates with the top-left corner of the square. This sampling format is one of the options in Annex E of H.264 | MPEG-4 AVC.
   1.220 +	*/
   1.221 +	EYuv420Chroma3 = 0x00000004,
   1.222 +
   1.223 +	/**
   1.224 +	4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position. 
   1.225 +	The luminance sample positions reside on the same pixel row. The chrominance sample position is co-located 
   1.226 +	with the left one of the luminance sample positions. This is the MPEG-2 4:2:2 sampling pattern.
   1.227 +	*/
   1.228 +	EYuv422Chroma1 = 0x00000008,
   1.229 +
   1.230 +	/**
   1.231 +	4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position. 
   1.232 +	The luminance sample positions reside on the same pixel row. The chrominance sample position is in the 
   1.233 +	middle of the luminance sample positions. This is the MPEG-1 4:2:2 sampling pattern.
   1.234 +	*/
   1.235 +	EYuv422Chroma2 = 0x00000010
   1.236 +	};
   1.237 +
   1.238 +
   1.239 +/**
   1.240 +Defines the YUV data layout in a decoded picture.
   1.241 +@publishedAll
   1.242 +@released
   1.243 +*/
   1.244 +enum TYuvDataLayout
   1.245 +	{
   1.246 +	/**
   1.247 +	The data is stored in a plane mode. The memory buffer contains first all Y component 
   1.248 +	data for the whole picture, followed by U and V, making the data format Y00Y01Y02Y03...U0...V0... 
   1.249 +	For YUV 4:2:0 data, this is the same data format as EFormatYUV420Planar in the Onboard Camera API
   1.250 +	*/
   1.251 +	EYuvDataPlanar		= 0x00000001,
   1.252 +
   1.253 +	/**
   1.254 +	The data is stored interleaved mode, all components interleaved in a single memory block. 
   1.255 +	Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is Y1VY0U, 
   1.256 +	corresponding to "UY0VY1" little-endian 32-bit words. 
   1.257 +	This is the same data format as EFormatYUV422Reversed in the Onboard Camera API
   1.258 +	*/
   1.259 +	EYuvDataInterleavedLE = 0x00000002,
   1.260 +
   1.261 +	/**
   1.262 +	The data is stored interleaved mode, all components interleaved in a single memory block. 
   1.263 +	Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is UY0VY1, 
   1.264 +	corresponding to "UY0VY1" big-endian 32-bit words. 
   1.265 +	This is the same data format as EFormatYUV422 in the Onboard Camera API
   1.266 +	*/
   1.267 +	EYuvDataInterleavedBE = 0x00000004,
   1.268 +	/**
   1.269 +	The data is stored in a semi-planar mode. The memory buffer contains first all Y component 
   1.270 +	data for the whole picture, followed by U and V components, which are interlaced, making the data 
   1.271 +	format Y00Y01Y02Y03...U0V0U1V1... For YUV 4:2:0 data, this is the same data format as 
   1.272 +	FormatYUV420SemiPlanar in the Onboard Camera API
   1.273 +	*/
   1.274 +	EYuvDataSemiPlanar = 0x00000008
   1.275 +	};
   1.276 +
   1.277 +/**
   1.278 +Defines the picture effect used for an input picture. Please refer to ITU-T H.264 | ISO/IEC MPEG-4 AVC [] for the definitions of the transition effects.
   1.279 +@publishedAll
   1.280 +@released
   1.281 +*/
   1.282 +enum TPictureEffect
   1.283 +	{
   1.284 +	/**
   1.285 +	No effect.
   1.286 +	*/
   1.287 +	EEffectNone          					= 0x00000001,
   1.288 +
   1.289 +	/**
   1.290 +	Fade from black.
   1.291 +	*/
   1.292 +	EEffectFadeFromBlack 					= 0x00000002,
   1.293 +
   1.294 +	/**
   1.295 +	Fade to black.
   1.296 +	*/
   1.297 +	EEffectFadeToBlack   					= 0x00000004,
   1.298 +
   1.299 +	/**
   1.300 +	Unspecified transition from or to constant colour.
   1.301 +	*/
   1.302 +	EEffectUnspecifiedThroughConstantColor 	= 0x00000008,
   1.303 +
   1.304 +	/**
   1.305 +	Dissolve.
   1.306 +	*/
   1.307 +	EEffectDissolve   						= 0x00000010,
   1.308 +
   1.309 +	/**
   1.310 +	Wipe.
   1.311 +	*/
   1.312 +	EEffectWipe   							= 0x00000020,
   1.313 +
   1.314 +	/**
   1.315 +	Unspecified mixture of two scenes.
   1.316 +	*/
   1.317 +	EEffectUnspecifiedMixOfTwoScenes  		= 0x00000040
   1.318 +	};
   1.319 +
   1.320 +/**
   1.321 +Defines the data value range used for RGB data. Used for determining the correct color space conversion factors. 
   1.322 +@publishedAll
   1.323 +@released
   1.324 +*/
   1.325 +enum TRgbRange
   1.326 +	{
   1.327 +	/**
   1.328 +	The RGB data uses the full 8-bit range of [0…255].
   1.329 +	*/
   1.330 +	ERgbRangeFull	= 0x00000001,
   1.331 +
   1.332 +	/**
   1.333 +	The RGB data uses the nominal range of [16…235]. Individual samples can still contain 
   1.334 +	values beyond that range, the rest of the 8-bit range is used for headroom and footroom.
   1.335 +	*/
   1.336 +	ERgbRange16to235 = 0x00000002
   1.337 +	};
   1.338 +
   1.339 +
   1.340 +
   1.341 +/**
   1.342 +Defines possible data unit types for encoded video data. The data unit types are used both 
   1.343 +for encoded video input for playback as well as encoded video output from recording.
   1.344 +@publishedAll
   1.345 +@released
   1.346 +*/
   1.347 +enum TVideoDataUnitType
   1.348 +	{
   1.349 +	/**
   1.350 +	Each data unit is a single coded picture.
   1.351 +	*/
   1.352 +	EDuCodedPicture		   = 0x00000001,
   1.353 +
   1.354 +	/**
   1.355 +	Each data unit is a coded video segment.  
   1.356 +	A coded video segment is a part of the coded video data that forms an independently 
   1.357 +	decodable part of a coded video frame. For example, a video packet in MPEG-4 Part 2 
   1.358 +	and slice in H.263 are coded video segments.
   1.359 +	*/
   1.360 +	EDuVideoSegment		   = 0x00000002,
   1.361 +
   1.362 +	/**
   1.363 +	Each data unit contains an integer number of video segments consecutive in decoding order, 
   1.364 +	possibly more than one. The video segments shall be a subset of one coded picture.
   1.365 +	*/
   1.366 +	EDuSeveralSegments		= 0x00000004,
   1.367 +
   1.368 +	/**
   1.369 +	Each data unit contains a piece of raw video bitstream, not necessarily aligned at any headers. 
   1.370 +	The data must be written in decoding order. This data unit type can be used for playback if the client 
   1.371 +	does not have information about the bitstream syntax, and just writes data in random-sized chunks. For 
   1.372 +	recording this data unit type is useful if the client can handle arbitrarily split data units, giving the
   1.373 +	encoder maximum flexibility in buffer allocation. For encoded data output, each data unit must still 
   1.374 +	belong to exactly one output picture.
   1.375 +	*/
   1.376 +	EDuArbitraryStreamSection = 0x00000008
   1.377 +	};
   1.378 +
   1.379 +/**
   1.380 +Defines possible encapsulation types for coded video data units. The encapsulation information is 
   1.381 +used both for encoded video input for playback as well as encoded video output from recording.
   1.382 +@publishedAll
   1.383 +@released
   1.384 +*/
   1.385 +enum TVideoDataUnitEncapsulation
   1.386 +	{
   1.387 +	/**
   1.388 +	The coded data units can be chained in a bitstream that can be decoded. For example, MPEG-4 
   1.389 +	Part 2 elementary streams, H.263 bitstreams, and H.264 | MPEG-4 AVC Annex B bitstreams fall into this category.
   1.390 +	*/
   1.391 +	EDuElementaryStream = 0x00010000,
   1.392 +
   1.393 +	/**
   1.394 +	The coded data units are encapsulated in a general-purpose packet payload format whose coded 
   1.395 +	data units can be decoded independently but cannot be generally chained into a bitstream. 
   1.396 +	For example, the Network Abstraction Layer Units of H.264 | MPEG-4 AVC fall into this category. 
   1.397 +	*/
   1.398 +	EDuGenericPayload   = 0x00020000,
   1.399 +
   1.400 +	/**
   1.401 +	The coded data units are encapsulated in RTP packet payload format. The RTP payload header 
   1.402 +	may contain codec-specific items, such as a redundant copy of a picture header in the H.263 
   1.403 +	payload specification RFC2429.
   1.404 +	*/
   1.405 +	EDuRtpPayload	   = 0x00040000
   1.406 +	};
   1.407 +
   1.408 +/**
   1.409 +Defines the HRD/VBV specification used in a stream.
   1.410 +@publishedAll
   1.411 +@released
   1.412 +*/
   1.413 +enum THrdVbvSpecification
   1.414 +	{
   1.415 +	/** No HRD/VBV specification. */
   1.416 +	EHrdVbvNone		   = 0x00000001,
   1.417 +
   1.418 +	/** The HRD/VBV specification in the corresponding coding standard. */
   1.419 +	EHrdVbvCodingStandard = 0x00000002,
   1.420 +
   1.421 +	/** Annex G of 3GPP TS 26.234 Release 5. */
   1.422 +	EHrdVbv3GPP		   = 0x00000004
   1.423 +	};
   1.424 +
   1.425 +/**
   1.426 +Defines the pre-processor and post-processor types available in the system. 
   1.427 +One pre-processor or post-processor can implement multiple operations simultaneously, and thus the 
   1.428 +types are defined as bit values that can be combined as a bitfield.
   1.429 +@publishedAll
   1.430 +@released
   1.431 +*/
   1.432 +enum TPrePostProcessType
   1.433 +	{
   1.434 +	/**
   1.435 +	Input cropping, used for pan-scan cropping in video playback and digital zoom in video recording. 
   1.436 +	Pan-scan cropping is useful, for example, for displaying arbitrary-sized pictures with codecs that 
   1.437 +	only support image dimensions that are multiples of 16 pixels.
   1.438 +	*/
   1.439 +	EPpInputCrop =		0x00000001,
   1.440 +
   1.441 +	/**
   1.442 +	Horizontal mirroring, flips the image data around a vertical line in its center.
   1.443 +	*/
   1.444 +	EPpMirror =		   0x00000002,
   1.445 +
   1.446 +	/**
   1.447 +	Picture rotation, supports rotation by 90 or 180 degrees, clockwise and anticlockwise.
   1.448 +	*/
   1.449 +	EPpRotate =		   0x00000004,
   1.450 +
   1.451 +	/**
   1.452 +	Picture scaling to a new size, includes both upscaling and downscaling. 
   1.453 +	The supported scaling types and scale factors depend on the pixel processor.
   1.454 +	*/
   1.455 +	EPpScale =			0x00000008,
   1.456 +
   1.457 +	/**
   1.458 +	Crops the picture to a final output rectangle.
   1.459 +	*/
   1.460 +	EPpOutputCrop =	   0x00000010,
   1.461 +
   1.462 +	/**
   1.463 +	Pads the output picture to a defined size. Used in video recording to pad pictures to 
   1.464 +	suit the encoder input requirements.
   1.465 +	*/
   1.466 +	EPpOutputPad =		0x00000020,
   1.467 +
   1.468 +	/**
   1.469 +	YUV to RGB color space conversion. Supported only for video playback.
   1.470 +	*/
   1.471 +	EPpYuvToRgb =		 0x00000040,
   1.472 +
   1.473 +	/**
   1.474 +	RGB to YUV color space conversion. Supported only for video recording.
   1.475 +	*/
   1.476 +	EPpRgbToYuv =		 0x00000080,
   1.477 +
   1.478 +	/**
   1.479 +	YUV to YUV data format conversion. Supported only for video recording.
   1.480 +	*/
   1.481 +	EPpYuvToYuv =		 0x00000100,
   1.482 +
   1.483 +	/**
   1.484 +	Noise filtering. Noise filtering is typically used to enhance the input 
   1.485 +	picture from the camera, and is usually only supported for video recording.
   1.486 +	*/
   1.487 +	EPpNoiseFilter =	  0x00000200,
   1.488 +
   1.489 +	/**
   1.490 +	Color enhancement.  Color enhancement is typically used to enhance the input picture 
   1.491 +	from the camera, and is usually only supported for video recording.
   1.492 +	*/
   1.493 +	EPpColorEnhancement = 0x00000400,
   1.494 +
   1.495 +	/**
   1.496 +	Frame stabilisation. Supported only for video recording.
   1.497 +	*/
   1.498 +	EPpFrameStabilisation = 0x00000800,
   1.499 +	
   1.500 +	/**
   1.501 +	Deblocking is typically used to remove artefacts from the output picture that result from 
   1.502 +	high compression or a noisy input signal. Only supported for video playback.
   1.503 +	*/
   1.504 +    EPpDeblocking =         0x00001000,
   1.505 +
   1.506 +	/**
   1.507 +	Deringing is typically used to remove artefacts from the output picture that result from 
   1.508 +	a noisy input signal corrupting motion estimates. Only supported for video playback.
   1.509 +	*/
   1.510 +    EPpDeringing =          0x00002000,
   1.511 + 
   1.512 +	/**
   1.513 +	Custom hardware device specific processing.
   1.514 +	*/
   1.515 +	EPpCustom =		   0x10000000
   1.516 +	};
   1.517 +
   1.518 +/**
   1.519 +Dithering types.
   1.520 +@publishedAll
   1.521 +@released
   1.522 +*/
   1.523 +enum TDitherType
   1.524 +	{
   1.525 +	/** No dithering. */
   1.526 +	EDitherNone		   = 0x00000001,
   1.527 +
   1.528 +	/** Ordered dither. */
   1.529 +	EDitherOrdered		= 0x00000002,
   1.530 +
   1.531 +	/** Error diffusion dither. */
   1.532 +	EDitherErrorDiffusion = 0x00000004,
   1.533 +
   1.534 +	/** Other hardware device specific dithering type. */
   1.535 +	EDitherOther		  = 0x00000008
   1.536 +	};
   1.537 +
   1.538 +/**
   1.539 +Rotation types for pre-processors and post-processors.
   1.540 +@publishedAll
   1.541 +@released
   1.542 +*/
   1.543 +enum TRotationType
   1.544 +	{
   1.545 +	/**	No rotation. */
   1.546 +	ERotateNone			   = 0x00000001,
   1.547 +
   1.548 +	/** Rotate the picture 90 degrees clockwise. */
   1.549 +	ERotate90Clockwise	 = 0x00000002,
   1.550 +
   1.551 +	/** Rotate the picture 90 degrees anticlockwise. */
   1.552 +	ERotate90Anticlockwise = 0x00000004,
   1.553 +
   1.554 +	/** Rotate the picture 180 degrees. */
   1.555 +	ERotate180			 = 0x00000008
   1.556 +	};
   1.557 +
   1.558 +
   1.559 +
   1.560 +/**
   1.561 +Defines possible encoding bit-rate control modes.
   1.562 +@publishedAll
   1.563 +@released
   1.564 +*/
   1.565 +enum TBitrateControlType
   1.566 +	{
   1.567 +	/**
   1.568 +	The encoder does not control the bit-rate, but uses specified target picture quality and picture 
   1.569 +	rate as such. The coded data stream must still remain compliant with the standard and buffer settings 
   1.570 +	in use, if any, and thus HRD/VBV settings can limit the possible bit-rate.
   1.571 +	*/
   1.572 +	EBrControlNone	= 0x00000001,
   1.573 +
   1.574 +	/**
   1.575 +	The encoder controls the coded bit-rate of the stream. The caller indicates target bit-rate, target 
   1.576 +	picture quality, target frame rate, spatial-temporal trade-off, and latency-quality trade-off.
   1.577 +	*/
   1.578 +	EBrControlStream  = 0x00000002,
   1.579 +
   1.580 +	/**
   1.581 +	The encoder controls the coded bit-rate of each picture. The caller gives the target amount of bits per 
   1.582 +	frame. Each given input frame is coded. This type of operation is applicable only in memory-buffer-based 
   1.583 +	input.
   1.584 +	*/
   1.585 +	EBrControlPicture = 0x00000004
   1.586 +	};
   1.587 +
   1.588 +
   1.589 +/**
   1.590 +Defines the scalability type for a single bit-rate scalability layer.
   1.591 +@publishedAll
   1.592 +@released
   1.593 +*/
   1.594 +enum TScalabilityType
   1.595 +	{
   1.596 +	/**
   1.597 +	The layer uses temporal scalability. Using the layer increases the picture rate.
   1.598 +	*/
   1.599 +	EScalabilityTemporal		= 0x00000001,
   1.600 +
   1.601 +	/**
   1.602 +	The layer uses quality scalability. Using the layer improves picture quality.
   1.603 +	*/
   1.604 +	EScalabilityQuality		 = 0x00000002,
   1.605 +
   1.606 +	/**
   1.607 +	The layer uses spatial scalability. Using the layer increases picture resolution.
   1.608 +	*/
   1.609 +	EScalabilitySpatial		 = 0x00000004,
   1.610 +
   1.611 +	/**
   1.612 +	The layer is a fine-granularity scalability layer. In fine granularity scalability, the output 
   1.613 +	quality increases gradually as a function of decoded bits from the enhancement layer.
   1.614 +	*/
   1.615 +	EScalabilityFineGranularity = 0x10000000,
   1.616 +
   1.617 +	/**
   1.618 +	The layer is a fine-granularity quality scalability layer.
   1.619 +	*/
   1.620 +	EScalabilityQualityFG	   = EScalabilityFineGranularity | EScalabilityQuality
   1.621 +	};
   1.622 +
   1.623 +/**
   1.624 +Forward error control strength used for an unequal error protection level. Also other values between 
   1.625 +EFecStrengthNone and EFecStrengthHigh can be used, the encoder will round the values to the levels 
   1.626 +it supports.
   1.627 +@publishedAll
   1.628 +@released
   1.629 +*/
   1.630 +enum TErrorControlStrength
   1.631 +	{
   1.632 +	/** No error control. */
   1.633 +	EFecStrengthNone = 0,
   1.634 +
   1.635 +	/** Low error control strength. */
   1.636 +	EFecStrengthLow = 256,
   1.637 +
   1.638 +	/** Normal error control strength. */
   1.639 +	EFecStrengthNormal = 512,
   1.640 +
   1.641 +	/** High error control strength. */
   1.642 +	EFecStrengthHigh = 768
   1.643 +	};
   1.644 +
   1.645 +/**
   1.646 +Defines the scalability type for in-layer bit-rate scalability.
   1.647 +@publishedAll
   1.648 +@released
   1.649 +*/
   1.650 +enum TInLayerScalabilityType
   1.651 +	{
   1.652 +	/** Temporal scalability, such as B-pictures. */
   1.653 +	EInLScalabilityTemporal = 1,
   1.654 +
   1.655 +	/** Other scalability type. */
   1.656 +	EInLScalabilityOther
   1.657 +	};
   1.658 +
   1.659 +/**
   1.660 +Defines what part of a frame is contained within a video buffer.
   1.661 +@publishedAll
   1.662 +@released
   1.663 +*/
   1.664 +enum TFramePortion
   1.665 +	{
   1.666 +	/** The frame portion is unknown. */
   1.667 +	EFramePortionUnknown,
   1.668 +
   1.669 +	/** An entire frame. */
   1.670 +	EFramePortionWhole,
   1.671 +
   1.672 +	/** A fragment of a frame containing the start but not the end. */
   1.673 +	EFramePortionStartFragment,
   1.674 +
   1.675 +	/** An fragment of a frame containing neither the start nor the end. */
   1.676 +	EFramePortionMidFragment,
   1.677 +
   1.678 +	/** A fragment of a frame containing the end but not the start. */
   1.679 +	EFramePortionEndFragment
   1.680 +	};
   1.681 +
   1.682 +#endif
   1.683 +