epoc32/include/mmf/devvideo/devvideoconstants.h
author William Roberts <williamr@symbian.org>
Wed, 31 Mar 2010 12:33:34 +0100
branchSymbian3
changeset 4 837f303aceeb
parent 2 2fe1408b6811
permissions -rw-r--r--
Current Symbian^3 public API header files (from PDK 3.0.h)
This is the epoc32/include tree with the "platform" subtrees removed, and
all but a selected few mbg and rsg files removed.
     1 // Copyright (c) 2003-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 #ifndef __DEVVIDEOCONSTANTS_H__
    17 #define __DEVVIDEOCONSTANTS_H__
    18 
    19 #include <e32base.h>
    20 #include <mmf/devvideo/devvideoplugininterfaceuids.hrh>
    21 #include <mm/conversioncoefficient.h>
    22 
    23 /**
    24 DevVideo Panic Category
    25 
    26 @publishedAll
    27 @released
    28 */
    29 _LIT(KDevVideoPanicCategory, "DevVideo");
    30 
    31 /**
    32 DevVideo Panic Codes
    33 
    34 @publishedAll
    35 @released
    36 */
    37 enum TDevVideoPanicCodes
    38 	{
    39 	/**
    40 	A pre-condition on a method has been violated.
    41 	*/
    42 	EDevVideoPanicPreConditionViolation = 1,
    43 	/**
    44 	A post-condition on a method has been violated.
    45 	*/
    46 	EDevVideoPanicPostConditionViolation = 2,
    47 	/**
    48 	An invalid hardware device ID has been supplied.
    49 	*/
    50 	EDevVideoPanicInvalidHwDeviceId = 3
    51 	};
    52 
    53 
    54 // DevVideo Plugin Interface UIDs
    55 
    56 /** Video Decoder HW Device Plugin Interface UID 
    57 @publishedAll
    58 @released
    59 */
    60 const TUid KUidDevVideoDecoderHwDevice = {KUidDevVideoDecoderHwDeviceDefine};
    61 
    62 /** Video Post Processor HW Device Plugin Interface UID
    63 @publishedAll
    64 @released
    65 */
    66 const TUid KUidDevVideoPostProcessorHwDevice = {KUidDevVideoPostProcessorHwDeviceDefine};
    67 
    68 /** Video Encoder HW Device Plugin Interface UID 
    69 @publishedAll
    70 @released
    71 */
    72 const TUid KUidDevVideoEncoderHwDevice = {KUidDevVideoEncoderHwDeviceDefine};
    73 
    74 /** Video Pre Processor HW Device Plugin Interface UID
    75 @publishedAll
    76 @released
    77 */
    78 const TUid KUidDevVideoPreProcessorHwDevice = {KUidDevVideoPreProcessorHwDeviceDefine};
    79 
    80 // DevVideo Custom Interface Uids
    81 
    82 /** MMMFVideoPlayHwDeviceExtensionScanCopy Custom Interface UID
    83 @publishedAll
    84 @released
    85 */
    86 const TUid KUidDevVideoPlayHwDeviceExtensionScanCopy = {KUidDevVideoPlayHwDeviceExScanCopyDefine};
    87 
    88 /** 
    89 Picture frame rate constants
    90 
    91 Using these constants is recommended when the picture rate is known to match 
    92 one of them, to ensure that floating point equality comparisons work as expected.
    93 
    94 Note that the MSL video APIs currently only deal with non-interlaced frames.  For interlaced 
    95 video, all references to the term "picture" should be considered to refer to complete frames. 
    96 As such, the term "picture rate" here refers to the frame rate for interlaced video.
    97 
    98 @publishedAll
    99 @released
   100 */
   101 const TReal KPictureRate5 = 5.0;
   102 const TReal KPictureRate75 = 7.5;
   103 const TReal KPictureRate10 = 10.0;
   104 const TReal KPictureRate15 = 15.0;
   105 const TReal KPictureRateNTSC24 = 23.97602397602398; // == 24000/1001
   106 const TReal KPictureRate25 = 25.0;
   107 const TReal KPictureRateNTSC30 = 29.97002997002997; // == 30000/1001
   108 const TReal KPictureRate30 = 30.0;
   109 
   110 
   111 /**
   112 Specifies the data format used for an uncompressed picture. 
   113 The values are bit patterns that can be combined with other format definition constants.
   114 
   115 @publishedAll
   116 @released
   117 */
   118 enum TImageDataFormat
   119 	{
   120 	/** Raw RGB picture data in a memory area.
   121 	*/
   122 	ERgbRawData		= 0x01000000,
   123 	/** RGB picture data stored in a Symbian OS CFbsBitmap object.
   124 	*/
   125 	ERgbFbsBitmap	  = 0x02000000,
   126 	/** Raw YUV picture data stored in a memory area. The data storage 
   127 	format depends on the YUV sampling pattern and data layout used.
   128 	*/
   129 	EYuvRawData		= 0x04000000,
   130 	
   131 	/** Picture stored in a surface buffer.
   132 	 @See MMmfVideoSurfaceHandleControl::MmvshcSetSurfaceHandle
   133 	*/
   134 	ESurfaceBuffer = 0x08000000
   135 	};
   136 
   137 
   138 /**
   139 RGB uncompressed image format alternatives.
   140 @publishedAll
   141 @released
   142 */
   143 enum TRgbFormat 
   144 	{
   145 	/**
   146 	16-bit RGB data format with four pixels per component. 
   147 	The data format is the same as used in Symbian EColor4K bitmaps, 
   148 	with each pixel using two bytes with the bit layout [ggggbbbb xxxxrrrr]
   149 	where "x" indicates unused bits. (This corresponds to "XRGB" 16-bit little-endian halfwords)
   150 	*/
   151 	ERgb16bit444	   = ERgbRawData   | 0x00000001,
   152 
   153 	/**
   154 	16-bit RGB data format with five bits per component for red and blue and 
   155 	six bits for green. The data format is the same as used in Symbian EColor64K bitmaps, 
   156 	with each pixel using two bytes with the bit layout [gggbbbbb rrrrrggg]
   157 	(This corresponds to "RGB" 16-bit little-endian halfwords)
   158 	*/
   159 	ERgb16bit565	   = ERgbRawData   | 0x00000002,
   160 
   161 	/**
   162 	32-bit RGB data format with eight bits per component. 
   163 	This data format is the same as is used in Symbian EColor16MU bitmaps. The bit layout is
   164 	[bbbbbbbb gggggggg rrrrrrrr xxxxxxxx] where "x" indicates unused bits. 
   165 	(This corresponds to "XRGB" 32-bit little-endian words)
   166 	*/
   167 	ERgb32bit888	   = ERgbRawData   | 0x00000004,
   168 
   169 	/**
   170 	CFbsBitmap object with EColor4K data format.
   171 	*/
   172 	EFbsBitmapColor4K  = ERgbFbsBitmap | 0x00000001,
   173 
   174  	/**
   175 	CFbsBitmap object with EColor64K data format.
   176 	*/
   177 	EFbsBitmapColor64K = ERgbFbsBitmap | 0x00000002,
   178 
   179 	/**
   180 	CFbsBitmap object with EColor16M data format.
   181 	*/
   182 	EFbsBitmapColor16M = ERgbFbsBitmap | 0x00000004,
   183 
   184 	/**
   185 	CFbsBitmap object with EColor16MU data format.
   186 	*/
   187 	EFbsBitmapColor16MU = ERgbFbsBitmap | 0x00000008
   188 	};
   189 
   190 
   191 /**
   192 YUV (YCbCr) uncompressed image data sampling pattern.
   193 @publishedAll
   194 @released
   195 */
   196 enum TYuvSamplingPattern 
   197 	{
   198 	/**
   199 	4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. 
   200 	The four luminance sample positions are on the corners of a square. The chrominance sample position 
   201 	is vertically half-way of the luminance sample positions and horizontally aligned with the left 
   202 	side of the square. This is the MPEG-2 and the MPEG-4 Part 2 sampling pattern.
   203 	*/
   204 	EYuv420Chroma1 = 0x00000001,
   205 
   206 	/**
   207 	4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. 
   208 	The four luminance sample positions are on the corners of a square. The chrominance sample position 
   209 	is vertically and horizontally in the middle of the luminance sample positions. This is the MPEG-1 sampling pattern.
   210 	*/
   211 	EYuv420Chroma2 = 0x00000002,
   212 
   213 	/**
   214 	4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. 
   215 	The four luminance sample positions are on the corners of a square. The chrominance sample position 
   216 	colocates with the top-left corner of the square. This sampling format is one of the options in Annex E of H.264 | MPEG-4 AVC.
   217 	*/
   218 	EYuv420Chroma3 = 0x00000004,
   219 
   220 	/**
   221 	4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position. 
   222 	The luminance sample positions reside on the same pixel row. The chrominance sample position is co-located 
   223 	with the left one of the luminance sample positions. This is the MPEG-2 4:2:2 sampling pattern.
   224 	*/
   225 	EYuv422Chroma1 = 0x00000008,
   226 
   227 	/**
   228 	4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position. 
   229 	The luminance sample positions reside on the same pixel row. The chrominance sample position is in the 
   230 	middle of the luminance sample positions. This is the MPEG-1 4:2:2 sampling pattern.
   231 	*/
   232 	EYuv422Chroma2 = 0x00000010
   233 	};
   234 
   235 
   236 /**
   237 Defines the YUV data layout in a decoded picture.
   238 @publishedAll
   239 @released
   240 */
   241 enum TYuvDataLayout
   242 	{
   243 	/**
   244 	The data is stored in a plane mode. The memory buffer contains first all Y component 
   245 	data for the whole picture, followed by U and V, making the data format Y00Y01Y02Y03...U0...V0... 
   246 	For YUV 4:2:0 data, this is the same data format as EFormatYUV420Planar in the Onboard Camera API
   247 	*/
   248 	EYuvDataPlanar		= 0x00000001,
   249 
   250 	/**
   251 	The data is stored interleaved mode, all components interleaved in a single memory block. 
   252 	Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is Y1VY0U, 
   253 	corresponding to "UY0VY1" little-endian 32-bit words. 
   254 	This is the same data format as EFormatYUV422Reversed in the Onboard Camera API
   255 	*/
   256 	EYuvDataInterleavedLE = 0x00000002,
   257 
   258 	/**
   259 	The data is stored interleaved mode, all components interleaved in a single memory block. 
   260 	Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is UY0VY1, 
   261 	corresponding to "UY0VY1" big-endian 32-bit words. 
   262 	This is the same data format as EFormatYUV422 in the Onboard Camera API
   263 	*/
   264 	EYuvDataInterleavedBE = 0x00000004,
   265 	/**
   266 	The data is stored in a semi-planar mode. The memory buffer contains first all Y component 
   267 	data for the whole picture, followed by U and V components, which are interlaced, making the data 
   268 	format Y00Y01Y02Y03...U0V0U1V1... For YUV 4:2:0 data, this is the same data format as 
   269 	FormatYUV420SemiPlanar in the Onboard Camera API
   270 	*/
   271 	EYuvDataSemiPlanar = 0x00000008
   272 	};
   273 
   274 /**
   275 Defines the picture effect used for an input picture. Please refer to ITU-T H.264 | ISO/IEC MPEG-4 AVC [] for the definitions of the transition effects.
   276 @publishedAll
   277 @released
   278 */
   279 enum TPictureEffect
   280 	{
   281 	/**
   282 	No effect.
   283 	*/
   284 	EEffectNone          					= 0x00000001,
   285 
   286 	/**
   287 	Fade from black.
   288 	*/
   289 	EEffectFadeFromBlack 					= 0x00000002,
   290 
   291 	/**
   292 	Fade to black.
   293 	*/
   294 	EEffectFadeToBlack   					= 0x00000004,
   295 
   296 	/**
   297 	Unspecified transition from or to constant colour.
   298 	*/
   299 	EEffectUnspecifiedThroughConstantColor 	= 0x00000008,
   300 
   301 	/**
   302 	Dissolve.
   303 	*/
   304 	EEffectDissolve   						= 0x00000010,
   305 
   306 	/**
   307 	Wipe.
   308 	*/
   309 	EEffectWipe   							= 0x00000020,
   310 
   311 	/**
   312 	Unspecified mixture of two scenes.
   313 	*/
   314 	EEffectUnspecifiedMixOfTwoScenes  		= 0x00000040
   315 	};
   316 
   317 /**
   318 Defines the data value range used for RGB data. Used for determining the correct color space conversion factors. 
   319 @publishedAll
   320 @released
   321 */
   322 enum TRgbRange
   323 	{
   324 	/**
   325 	The RGB data uses the full 8-bit range of [0…255].
   326 	*/
   327 	ERgbRangeFull	= 0x00000001,
   328 
   329 	/**
   330 	The RGB data uses the nominal range of [16…235]. Individual samples can still contain 
   331 	values beyond that range, the rest of the 8-bit range is used for headroom and footroom.
   332 	*/
   333 	ERgbRange16to235 = 0x00000002
   334 	};
   335 
   336 
   337 
   338 /**
   339 Defines possible data unit types for encoded video data. The data unit types are used both 
   340 for encoded video input for playback as well as encoded video output from recording.
   341 @publishedAll
   342 @released
   343 */
   344 enum TVideoDataUnitType
   345 	{
   346 	/**
   347 	Each data unit is a single coded picture.
   348 	*/
   349 	EDuCodedPicture		   = 0x00000001,
   350 
   351 	/**
   352 	Each data unit is a coded video segment.  
   353 	A coded video segment is a part of the coded video data that forms an independently 
   354 	decodable part of a coded video frame. For example, a video packet in MPEG-4 Part 2 
   355 	and slice in H.263 are coded video segments.
   356 	*/
   357 	EDuVideoSegment		   = 0x00000002,
   358 
   359 	/**
   360 	Each data unit contains an integer number of video segments consecutive in decoding order, 
   361 	possibly more than one. The video segments shall be a subset of one coded picture.
   362 	*/
   363 	EDuSeveralSegments		= 0x00000004,
   364 
   365 	/**
   366 	Each data unit contains a piece of raw video bitstream, not necessarily aligned at any headers. 
   367 	The data must be written in decoding order. This data unit type can be used for playback if the client 
   368 	does not have information about the bitstream syntax, and just writes data in random-sized chunks. For 
   369 	recording this data unit type is useful if the client can handle arbitrarily split data units, giving the
   370 	encoder maximum flexibility in buffer allocation. For encoded data output, each data unit must still 
   371 	belong to exactly one output picture.
   372 	*/
   373 	EDuArbitraryStreamSection = 0x00000008
   374 	};
   375 
   376 /**
   377 Defines possible encapsulation types for coded video data units. The encapsulation information is 
   378 used both for encoded video input for playback as well as encoded video output from recording.
   379 @publishedAll
   380 @released
   381 */
   382 enum TVideoDataUnitEncapsulation
   383 	{
   384 	/**
   385 	The coded data units can be chained in a bitstream that can be decoded. For example, MPEG-4 
   386 	Part 2 elementary streams, H.263 bitstreams, and H.264 | MPEG-4 AVC Annex B bitstreams fall into this category.
   387 	*/
   388 	EDuElementaryStream = 0x00010000,
   389 
   390 	/**
   391 	The coded data units are encapsulated in a general-purpose packet payload format whose coded 
   392 	data units can be decoded independently but cannot be generally chained into a bitstream. 
   393 	For example, the Network Abstraction Layer Units of H.264 | MPEG-4 AVC fall into this category. 
   394 	*/
   395 	EDuGenericPayload   = 0x00020000,
   396 
   397 	/**
   398 	The coded data units are encapsulated in RTP packet payload format. The RTP payload header 
   399 	may contain codec-specific items, such as a redundant copy of a picture header in the H.263 
   400 	payload specification RFC2429.
   401 	*/
   402 	EDuRtpPayload	   = 0x00040000
   403 	};
   404 
   405 /**
   406 Defines the HRD/VBV specification used in a stream.
   407 @publishedAll
   408 @released
   409 */
   410 enum THrdVbvSpecification
   411 	{
   412 	/** No HRD/VBV specification. */
   413 	EHrdVbvNone		   = 0x00000001,
   414 
   415 	/** The HRD/VBV specification in the corresponding coding standard. */
   416 	EHrdVbvCodingStandard = 0x00000002,
   417 
   418 	/** Annex G of 3GPP TS 26.234 Release 5. */
   419 	EHrdVbv3GPP		   = 0x00000004
   420 	};
   421 
   422 /**
   423 Defines the pre-processor and post-processor types available in the system. 
   424 One pre-processor or post-processor can implement multiple operations simultaneously, and thus the 
   425 types are defined as bit values that can be combined as a bitfield.
   426 @publishedAll
   427 @released
   428 */
   429 enum TPrePostProcessType
   430 	{
   431 	/**
   432 	Input cropping, used for pan-scan cropping in video playback and digital zoom in video recording. 
   433 	Pan-scan cropping is useful, for example, for displaying arbitrary-sized pictures with codecs that 
   434 	only support image dimensions that are multiples of 16 pixels.
   435 	*/
   436 	EPpInputCrop =		0x00000001,
   437 
   438 	/**
   439 	Horizontal mirroring, flips the image data around a vertical line in its center.
   440 	*/
   441 	EPpMirror =		   0x00000002,
   442 
   443 	/**
   444 	Picture rotation, supports rotation by 90 or 180 degrees, clockwise and anticlockwise.
   445 	*/
   446 	EPpRotate =		   0x00000004,
   447 
   448 	/**
   449 	Picture scaling to a new size, includes both upscaling and downscaling. 
   450 	The supported scaling types and scale factors depend on the pixel processor.
   451 	*/
   452 	EPpScale =			0x00000008,
   453 
   454 	/**
   455 	Crops the picture to a final output rectangle.
   456 	*/
   457 	EPpOutputCrop =	   0x00000010,
   458 
   459 	/**
   460 	Pads the output picture to a defined size. Used in video recording to pad pictures to 
   461 	suit the encoder input requirements.
   462 	*/
   463 	EPpOutputPad =		0x00000020,
   464 
   465 	/**
   466 	YUV to RGB color space conversion. Supported only for video playback.
   467 	*/
   468 	EPpYuvToRgb =		 0x00000040,
   469 
   470 	/**
   471 	RGB to YUV color space conversion. Supported only for video recording.
   472 	*/
   473 	EPpRgbToYuv =		 0x00000080,
   474 
   475 	/**
   476 	YUV to YUV data format conversion. Supported only for video recording.
   477 	*/
   478 	EPpYuvToYuv =		 0x00000100,
   479 
   480 	/**
   481 	Noise filtering. Noise filtering is typically used to enhance the input 
   482 	picture from the camera, and is usually only supported for video recording.
   483 	*/
   484 	EPpNoiseFilter =	  0x00000200,
   485 
   486 	/**
   487 	Color enhancement.  Color enhancement is typically used to enhance the input picture 
   488 	from the camera, and is usually only supported for video recording.
   489 	*/
   490 	EPpColorEnhancement = 0x00000400,
   491 
   492 	/**
   493 	Frame stabilisation. Supported only for video recording.
   494 	*/
   495 	EPpFrameStabilisation = 0x00000800,
   496 	
   497 	/**
   498 	Deblocking is typically used to remove artefacts from the output picture that result from 
   499 	high compression or a noisy input signal. Only supported for video playback.
   500 	*/
   501     EPpDeblocking =         0x00001000,
   502 
   503 	/**
   504 	Deringing is typically used to remove artefacts from the output picture that result from 
   505 	a noisy input signal corrupting motion estimates. Only supported for video playback.
   506 	*/
   507     EPpDeringing =          0x00002000,
   508  
   509 	/**
   510 	Custom hardware device specific processing.
   511 	*/
   512 	EPpCustom =		   0x10000000
   513 	};
   514 
   515 /**
   516 Dithering types.
   517 @publishedAll
   518 @released
   519 */
   520 enum TDitherType
   521 	{
   522 	/** No dithering. */
   523 	EDitherNone		   = 0x00000001,
   524 
   525 	/** Ordered dither. */
   526 	EDitherOrdered		= 0x00000002,
   527 
   528 	/** Error diffusion dither. */
   529 	EDitherErrorDiffusion = 0x00000004,
   530 
   531 	/** Other hardware device specific dithering type. */
   532 	EDitherOther		  = 0x00000008
   533 	};
   534 
   535 /**
   536 Rotation types for pre-processors and post-processors.
   537 @publishedAll
   538 @released
   539 */
   540 enum TRotationType
   541 	{
   542 	/**	No rotation. */
   543 	ERotateNone			   = 0x00000001,
   544 
   545 	/** Rotate the picture 90 degrees clockwise. */
   546 	ERotate90Clockwise	 = 0x00000002,
   547 
   548 	/** Rotate the picture 90 degrees anticlockwise. */
   549 	ERotate90Anticlockwise = 0x00000004,
   550 
   551 	/** Rotate the picture 180 degrees. */
   552 	ERotate180			 = 0x00000008
   553 	};
   554 
   555 
   556 
   557 /**
   558 Defines possible encoding bit-rate control modes.
   559 @publishedAll
   560 @released
   561 */
   562 enum TBitrateControlType
   563 	{
   564 	/**
   565 	The encoder does not control the bit-rate, but uses specified target picture quality and picture 
   566 	rate as such. The coded data stream must still remain compliant with the standard and buffer settings 
   567 	in use, if any, and thus HRD/VBV settings can limit the possible bit-rate.
   568 	*/
   569 	EBrControlNone	= 0x00000001,
   570 
   571 	/**
   572 	The encoder controls the coded bit-rate of the stream. The caller indicates target bit-rate, target 
   573 	picture quality, target frame rate, spatial-temporal trade-off, and latency-quality trade-off.
   574 	*/
   575 	EBrControlStream  = 0x00000002,
   576 
   577 	/**
   578 	The encoder controls the coded bit-rate of each picture. The caller gives the target amount of bits per 
   579 	frame. Each given input frame is coded. This type of operation is applicable only in memory-buffer-based 
   580 	input.
   581 	*/
   582 	EBrControlPicture = 0x00000004
   583 	};
   584 
   585 
   586 /**
   587 Defines the scalability type for a single bit-rate scalability layer.
   588 @publishedAll
   589 @released
   590 */
   591 enum TScalabilityType
   592 	{
   593 	/**
   594 	The layer uses temporal scalability. Using the layer increases the picture rate.
   595 	*/
   596 	EScalabilityTemporal		= 0x00000001,
   597 
   598 	/**
   599 	The layer uses quality scalability. Using the layer improves picture quality.
   600 	*/
   601 	EScalabilityQuality		 = 0x00000002,
   602 
   603 	/**
   604 	The layer uses spatial scalability. Using the layer increases picture resolution.
   605 	*/
   606 	EScalabilitySpatial		 = 0x00000004,
   607 
   608 	/**
   609 	The layer is a fine-granularity scalability layer. In fine granularity scalability, the output 
   610 	quality increases gradually as a function of decoded bits from the enhancement layer.
   611 	*/
   612 	EScalabilityFineGranularity = 0x10000000,
   613 
   614 	/**
   615 	The layer is a fine-granularity quality scalability layer.
   616 	*/
   617 	EScalabilityQualityFG	   = EScalabilityFineGranularity | EScalabilityQuality
   618 	};
   619 
   620 /**
   621 Forward error control strength used for an unequal error protection level. Also other values between 
   622 EFecStrengthNone and EFecStrengthHigh can be used, the encoder will round the values to the levels 
   623 it supports.
   624 @publishedAll
   625 @released
   626 */
   627 enum TErrorControlStrength
   628 	{
   629 	/** No error control. */
   630 	EFecStrengthNone = 0,
   631 
   632 	/** Low error control strength. */
   633 	EFecStrengthLow = 256,
   634 
   635 	/** Normal error control strength. */
   636 	EFecStrengthNormal = 512,
   637 
   638 	/** High error control strength. */
   639 	EFecStrengthHigh = 768
   640 	};
   641 
   642 /**
   643 Defines the scalability type for in-layer bit-rate scalability.
   644 @publishedAll
   645 @released
   646 */
   647 enum TInLayerScalabilityType
   648 	{
   649 	/** Temporal scalability, such as B-pictures. */
   650 	EInLScalabilityTemporal = 1,
   651 
   652 	/** Other scalability type. */
   653 	EInLScalabilityOther
   654 	};
   655 
   656 /**
   657 Defines what part of a frame is contained within a video buffer.
   658 @publishedAll
   659 @released
   660 */
   661 enum TFramePortion
   662 	{
   663 	/** The frame portion is unknown. */
   664 	EFramePortionUnknown,
   665 
   666 	/** An entire frame. */
   667 	EFramePortionWhole,
   668 
   669 	/** A fragment of a frame containing the start but not the end. */
   670 	EFramePortionStartFragment,
   671 
   672 	/** An fragment of a frame containing neither the start nor the end. */
   673 	EFramePortionMidFragment,
   674 
   675 	/** A fragment of a frame containing the end but not the start. */
   676 	EFramePortionEndFragment
   677 	};
   678 
   679 #endif
   680