williamr@2: // Copyright (c) 2003-2009 Nokia Corporation and/or its subsidiary(-ies). williamr@2: // All rights reserved. williamr@2: // This component and the accompanying materials are made available williamr@2: // under the terms of the License "Symbian Foundation License v1.0" to Symbian Foundation members and "Symbian Foundation End User License Agreement v1.0" to non-members williamr@2: // which accompanies this distribution, and is available williamr@2: // at the URL "http://www.symbianfoundation.org/legal/licencesv10.html". williamr@2: // williamr@2: // Initial Contributors: williamr@2: // Nokia Corporation - initial contribution. williamr@2: // williamr@2: // Contributors: williamr@2: // williamr@2: // Description: williamr@2: // williamr@2: williamr@2: #ifndef __DEVVIDEOCONSTANTS_H__ williamr@2: #define __DEVVIDEOCONSTANTS_H__ williamr@2: williamr@2: #include williamr@2: #include williamr@2: #include williamr@2: williamr@2: /** williamr@2: DevVideo Panic Category williamr@2: williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: _LIT(KDevVideoPanicCategory, "DevVideo"); williamr@2: williamr@2: /** williamr@2: DevVideo Panic Codes williamr@2: williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TDevVideoPanicCodes williamr@2: { williamr@2: /** williamr@2: A pre-condition on a method has been violated. williamr@2: */ williamr@2: EDevVideoPanicPreConditionViolation = 1, williamr@2: /** williamr@2: A post-condition on a method has been violated. williamr@2: */ williamr@2: EDevVideoPanicPostConditionViolation = 2, williamr@2: /** williamr@2: An invalid hardware device ID has been supplied. williamr@2: */ williamr@2: EDevVideoPanicInvalidHwDeviceId = 3 williamr@2: }; williamr@2: williamr@2: williamr@2: // DevVideo Plugin Interface UIDs williamr@2: williamr@2: /** Video Decoder HW Device Plugin Interface UID williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: const TUid KUidDevVideoDecoderHwDevice = {KUidDevVideoDecoderHwDeviceDefine}; williamr@2: williamr@2: /** Video Post Processor HW Device Plugin Interface UID williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: const TUid KUidDevVideoPostProcessorHwDevice = {KUidDevVideoPostProcessorHwDeviceDefine}; williamr@2: williamr@2: /** Video Encoder HW Device Plugin Interface UID williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: const TUid KUidDevVideoEncoderHwDevice = {KUidDevVideoEncoderHwDeviceDefine}; williamr@2: williamr@2: /** Video Pre Processor HW Device Plugin Interface UID williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: const TUid KUidDevVideoPreProcessorHwDevice = {KUidDevVideoPreProcessorHwDeviceDefine}; williamr@2: williamr@2: // DevVideo Custom Interface Uids williamr@2: williamr@2: /** MMMFVideoPlayHwDeviceExtensionScanCopy Custom Interface UID williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: const TUid KUidDevVideoPlayHwDeviceExtensionScanCopy = {KUidDevVideoPlayHwDeviceExScanCopyDefine}; williamr@2: williamr@2: williamr@2: /** williamr@2: Picture frame rate constants williamr@2: williamr@2: Using these constants is recommended when the picture rate is known to match williamr@2: one of them, to ensure that floating point equality comparisons work as expected. williamr@2: williamr@2: Note that the MSL video APIs currently only deal with non-interlaced frames. For interlaced williamr@2: video, all references to the term "picture" should be considered to refer to complete frames. williamr@2: As such, the term "picture rate" here refers to the frame rate for interlaced video. williamr@2: williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: const TReal KPictureRate5 = 5.0; williamr@2: const TReal KPictureRate75 = 7.5; williamr@2: const TReal KPictureRate10 = 10.0; williamr@2: const TReal KPictureRate15 = 15.0; williamr@2: const TReal KPictureRateNTSC24 = 23.97602397602398; // == 24000/1001 williamr@2: const TReal KPictureRate25 = 25.0; williamr@2: const TReal KPictureRateNTSC30 = 29.97002997002997; // == 30000/1001 williamr@2: const TReal KPictureRate30 = 30.0; williamr@2: williamr@2: williamr@2: /** williamr@2: Specifies the data format used for an uncompressed picture. williamr@2: The values are bit patterns that can be combined with other format definition constants. williamr@2: williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TImageDataFormat williamr@2: { williamr@2: /** Raw RGB picture data in a memory area. williamr@2: */ williamr@2: ERgbRawData = 0x01000000, williamr@2: /** RGB picture data stored in a Symbian OS CFbsBitmap object. williamr@2: */ williamr@2: ERgbFbsBitmap = 0x02000000, williamr@2: /** Raw YUV picture data stored in a memory area. The data storage williamr@2: format depends on the YUV sampling pattern and data layout used. williamr@2: */ williamr@2: EYuvRawData = 0x04000000 williamr@2: }; williamr@2: williamr@2: williamr@2: /** williamr@2: RGB uncompressed image format alternatives. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TRgbFormat williamr@2: { williamr@2: /** williamr@2: 16-bit RGB data format with four pixels per component. williamr@2: The data format is the same as used in Symbian EColor4K bitmaps, williamr@2: with each pixel using two bytes with the bit layout [ggggbbbb xxxxrrrr] williamr@2: where "x" indicates unused bits. (This corresponds to "XRGB" 16-bit little-endian halfwords) williamr@2: */ williamr@2: ERgb16bit444 = ERgbRawData | 0x00000001, williamr@2: williamr@2: /** williamr@2: 16-bit RGB data format with five bits per component for red and blue and williamr@2: six bits for green. The data format is the same as used in Symbian EColor64K bitmaps, williamr@2: with each pixel using two bytes with the bit layout [gggbbbbb rrrrrggg] williamr@2: (This corresponds to "RGB" 16-bit little-endian halfwords) williamr@2: */ williamr@2: ERgb16bit565 = ERgbRawData | 0x00000002, williamr@2: williamr@2: /** williamr@2: 32-bit RGB data format with eight bits per component. williamr@2: This data format is the same as is used in Symbian EColor16MU bitmaps. The bit layout is williamr@2: [bbbbbbbb gggggggg rrrrrrrr xxxxxxxx] where "x" indicates unused bits. williamr@2: (This corresponds to "XRGB" 32-bit little-endian words) williamr@2: */ williamr@2: ERgb32bit888 = ERgbRawData | 0x00000004, williamr@2: williamr@2: /** williamr@2: CFbsBitmap object with EColor4K data format. williamr@2: */ williamr@2: EFbsBitmapColor4K = ERgbFbsBitmap | 0x00000001, williamr@2: williamr@2: /** williamr@2: CFbsBitmap object with EColor64K data format. williamr@2: */ williamr@2: EFbsBitmapColor64K = ERgbFbsBitmap | 0x00000002, williamr@2: williamr@2: /** williamr@2: CFbsBitmap object with EColor16M data format. williamr@2: */ williamr@2: EFbsBitmapColor16M = ERgbFbsBitmap | 0x00000004, williamr@2: williamr@2: /** williamr@2: CFbsBitmap object with EColor16MU data format. williamr@2: */ williamr@2: EFbsBitmapColor16MU = ERgbFbsBitmap | 0x00000008 williamr@2: }; williamr@2: williamr@2: williamr@2: /** williamr@2: YUV (YCbCr) uncompressed image data sampling pattern. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TYuvSamplingPattern williamr@2: { williamr@2: /** williamr@2: 4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. williamr@2: The four luminance sample positions are on the corners of a square. The chrominance sample position williamr@2: is vertically half-way of the luminance sample positions and horizontally aligned with the left williamr@2: side of the square. This is the MPEG-2 and the MPEG-4 Part 2 sampling pattern. williamr@2: */ williamr@2: EYuv420Chroma1 = 0x00000001, williamr@2: williamr@2: /** williamr@2: 4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. williamr@2: The four luminance sample positions are on the corners of a square. The chrominance sample position williamr@2: is vertically and horizontally in the middle of the luminance sample positions. This is the MPEG-1 sampling pattern. williamr@2: */ williamr@2: EYuv420Chroma2 = 0x00000002, williamr@2: williamr@2: /** williamr@2: 4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. williamr@2: The four luminance sample positions are on the corners of a square. The chrominance sample position williamr@2: colocates with the top-left corner of the square. This sampling format is one of the options in Annex E of H.264 | MPEG-4 AVC. williamr@2: */ williamr@2: EYuv420Chroma3 = 0x00000004, williamr@2: williamr@2: /** williamr@2: 4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position. williamr@2: The luminance sample positions reside on the same pixel row. The chrominance sample position is co-located williamr@2: with the left one of the luminance sample positions. This is the MPEG-2 4:2:2 sampling pattern. williamr@2: */ williamr@2: EYuv422Chroma1 = 0x00000008, williamr@2: williamr@2: /** williamr@2: 4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position. williamr@2: The luminance sample positions reside on the same pixel row. The chrominance sample position is in the williamr@2: middle of the luminance sample positions. This is the MPEG-1 4:2:2 sampling pattern. williamr@2: */ williamr@2: EYuv422Chroma2 = 0x00000010 williamr@2: }; williamr@2: williamr@2: williamr@2: /** williamr@2: Defines the YUV data layout in a decoded picture. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TYuvDataLayout williamr@2: { williamr@2: /** williamr@2: The data is stored in a plane mode. The memory buffer contains first all Y component williamr@2: data for the whole picture, followed by U and V, making the data format Y00Y01Y02Y03...U0...V0... williamr@2: For YUV 4:2:0 data, this is the same data format as EFormatYUV420Planar in the Onboard Camera API williamr@2: */ williamr@2: EYuvDataPlanar = 0x00000001, williamr@2: williamr@2: /** williamr@2: The data is stored interleaved mode, all components interleaved in a single memory block. williamr@2: Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is Y1VY0U, williamr@2: corresponding to "UY0VY1" little-endian 32-bit words. williamr@2: This is the same data format as EFormatYUV422Reversed in the Onboard Camera API williamr@2: */ williamr@2: EYuvDataInterleavedLE = 0x00000002, williamr@2: williamr@2: /** williamr@2: The data is stored interleaved mode, all components interleaved in a single memory block. williamr@2: Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is UY0VY1, williamr@2: corresponding to "UY0VY1" big-endian 32-bit words. williamr@2: This is the same data format as EFormatYUV422 in the Onboard Camera API williamr@2: */ williamr@2: EYuvDataInterleavedBE = 0x00000004, williamr@2: /** williamr@2: The data is stored in a semi-planar mode. The memory buffer contains first all Y component williamr@2: data for the whole picture, followed by U and V components, which are interlaced, making the data williamr@2: format Y00Y01Y02Y03...U0V0U1V1... For YUV 4:2:0 data, this is the same data format as williamr@2: FormatYUV420SemiPlanar in the Onboard Camera API williamr@2: */ williamr@2: EYuvDataSemiPlanar = 0x00000008 williamr@2: }; williamr@2: williamr@2: /** williamr@2: Defines the picture effect used for an input picture. Please refer to ITU-T H.264 | ISO/IEC MPEG-4 AVC [] for the definitions of the transition effects. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TPictureEffect williamr@2: { williamr@2: /** williamr@2: No effect. williamr@2: */ williamr@2: EEffectNone = 0x00000001, williamr@2: williamr@2: /** williamr@2: Fade from black. williamr@2: */ williamr@2: EEffectFadeFromBlack = 0x00000002, williamr@2: williamr@2: /** williamr@2: Fade to black. williamr@2: */ williamr@2: EEffectFadeToBlack = 0x00000004, williamr@2: williamr@2: /** williamr@2: Unspecified transition from or to constant colour. williamr@2: */ williamr@2: EEffectUnspecifiedThroughConstantColor = 0x00000008, williamr@2: williamr@2: /** williamr@2: Dissolve. williamr@2: */ williamr@2: EEffectDissolve = 0x00000010, williamr@2: williamr@2: /** williamr@2: Wipe. williamr@2: */ williamr@2: EEffectWipe = 0x00000020, williamr@2: williamr@2: /** williamr@2: Unspecified mixture of two scenes. williamr@2: */ williamr@2: EEffectUnspecifiedMixOfTwoScenes = 0x00000040 williamr@2: }; williamr@2: williamr@2: /** williamr@2: Defines the data value range used for RGB data. Used for determining the correct color space conversion factors. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TRgbRange williamr@2: { williamr@2: /** williamr@2: The RGB data uses the full 8-bit range of [0…255]. williamr@2: */ williamr@2: ERgbRangeFull = 0x00000001, williamr@2: williamr@2: /** williamr@2: The RGB data uses the nominal range of [16…235]. Individual samples can still contain williamr@2: values beyond that range, the rest of the 8-bit range is used for headroom and footroom. williamr@2: */ williamr@2: ERgbRange16to235 = 0x00000002 williamr@2: }; williamr@2: williamr@2: williamr@2: williamr@2: /** williamr@2: Defines possible data unit types for encoded video data. The data unit types are used both williamr@2: for encoded video input for playback as well as encoded video output from recording. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TVideoDataUnitType williamr@2: { williamr@2: /** williamr@2: Each data unit is a single coded picture. williamr@2: */ williamr@2: EDuCodedPicture = 0x00000001, williamr@2: williamr@2: /** williamr@2: Each data unit is a coded video segment. williamr@2: A coded video segment is a part of the coded video data that forms an independently williamr@2: decodable part of a coded video frame. For example, a video packet in MPEG-4 Part 2 williamr@2: and slice in H.263 are coded video segments. williamr@2: */ williamr@2: EDuVideoSegment = 0x00000002, williamr@2: williamr@2: /** williamr@2: Each data unit contains an integer number of video segments consecutive in decoding order, williamr@2: possibly more than one. The video segments shall be a subset of one coded picture. williamr@2: */ williamr@2: EDuSeveralSegments = 0x00000004, williamr@2: williamr@2: /** williamr@2: Each data unit contains a piece of raw video bitstream, not necessarily aligned at any headers. williamr@2: The data must be written in decoding order. This data unit type can be used for playback if the client williamr@2: does not have information about the bitstream syntax, and just writes data in random-sized chunks. For williamr@2: recording this data unit type is useful if the client can handle arbitrarily split data units, giving the williamr@2: encoder maximum flexibility in buffer allocation. For encoded data output, each data unit must still williamr@2: belong to exactly one output picture. williamr@2: */ williamr@2: EDuArbitraryStreamSection = 0x00000008 williamr@2: }; williamr@2: williamr@2: /** williamr@2: Defines possible encapsulation types for coded video data units. The encapsulation information is williamr@2: used both for encoded video input for playback as well as encoded video output from recording. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TVideoDataUnitEncapsulation williamr@2: { williamr@2: /** williamr@2: The coded data units can be chained in a bitstream that can be decoded. For example, MPEG-4 williamr@2: Part 2 elementary streams, H.263 bitstreams, and H.264 | MPEG-4 AVC Annex B bitstreams fall into this category. williamr@2: */ williamr@2: EDuElementaryStream = 0x00010000, williamr@2: williamr@2: /** williamr@2: The coded data units are encapsulated in a general-purpose packet payload format whose coded williamr@2: data units can be decoded independently but cannot be generally chained into a bitstream. williamr@2: For example, the Network Abstraction Layer Units of H.264 | MPEG-4 AVC fall into this category. williamr@2: */ williamr@2: EDuGenericPayload = 0x00020000, williamr@2: williamr@2: /** williamr@2: The coded data units are encapsulated in RTP packet payload format. The RTP payload header williamr@2: may contain codec-specific items, such as a redundant copy of a picture header in the H.263 williamr@2: payload specification RFC2429. williamr@2: */ williamr@2: EDuRtpPayload = 0x00040000 williamr@2: }; williamr@2: williamr@2: /** williamr@2: Defines the HRD/VBV specification used in a stream. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum THrdVbvSpecification williamr@2: { williamr@2: /** No HRD/VBV specification. */ williamr@2: EHrdVbvNone = 0x00000001, williamr@2: williamr@2: /** The HRD/VBV specification in the corresponding coding standard. */ williamr@2: EHrdVbvCodingStandard = 0x00000002, williamr@2: williamr@2: /** Annex G of 3GPP TS 26.234 Release 5. */ williamr@2: EHrdVbv3GPP = 0x00000004 williamr@2: }; williamr@2: williamr@2: /** williamr@2: Defines the pre-processor and post-processor types available in the system. williamr@2: One pre-processor or post-processor can implement multiple operations simultaneously, and thus the williamr@2: types are defined as bit values that can be combined as a bitfield. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TPrePostProcessType williamr@2: { williamr@2: /** williamr@2: Input cropping, used for pan-scan cropping in video playback and digital zoom in video recording. williamr@2: Pan-scan cropping is useful, for example, for displaying arbitrary-sized pictures with codecs that williamr@2: only support image dimensions that are multiples of 16 pixels. williamr@2: */ williamr@2: EPpInputCrop = 0x00000001, williamr@2: williamr@2: /** williamr@2: Horizontal mirroring, flips the image data around a vertical line in its center. williamr@2: */ williamr@2: EPpMirror = 0x00000002, williamr@2: williamr@2: /** williamr@2: Picture rotation, supports rotation by 90 or 180 degrees, clockwise and anticlockwise. williamr@2: */ williamr@2: EPpRotate = 0x00000004, williamr@2: williamr@2: /** williamr@2: Picture scaling to a new size, includes both upscaling and downscaling. williamr@2: The supported scaling types and scale factors depend on the pixel processor. williamr@2: */ williamr@2: EPpScale = 0x00000008, williamr@2: williamr@2: /** williamr@2: Crops the picture to a final output rectangle. williamr@2: */ williamr@2: EPpOutputCrop = 0x00000010, williamr@2: williamr@2: /** williamr@2: Pads the output picture to a defined size. Used in video recording to pad pictures to williamr@2: suit the encoder input requirements. williamr@2: */ williamr@2: EPpOutputPad = 0x00000020, williamr@2: williamr@2: /** williamr@2: YUV to RGB color space conversion. Supported only for video playback. williamr@2: */ williamr@2: EPpYuvToRgb = 0x00000040, williamr@2: williamr@2: /** williamr@2: RGB to YUV color space conversion. Supported only for video recording. williamr@2: */ williamr@2: EPpRgbToYuv = 0x00000080, williamr@2: williamr@2: /** williamr@2: YUV to YUV data format conversion. Supported only for video recording. williamr@2: */ williamr@2: EPpYuvToYuv = 0x00000100, williamr@2: williamr@2: /** williamr@2: Noise filtering. Noise filtering is typically used to enhance the input williamr@2: picture from the camera, and is usually only supported for video recording. williamr@2: */ williamr@2: EPpNoiseFilter = 0x00000200, williamr@2: williamr@2: /** williamr@2: Color enhancement. Color enhancement is typically used to enhance the input picture williamr@2: from the camera, and is usually only supported for video recording. williamr@2: */ williamr@2: EPpColorEnhancement = 0x00000400, williamr@2: williamr@2: /** williamr@2: Frame stabilisation. Supported only for video recording. williamr@2: */ williamr@2: EPpFrameStabilisation = 0x00000800, williamr@2: williamr@2: /** williamr@2: Deblocking is typically used to remove artefacts from the output picture that result from williamr@2: high compression or a noisy input signal. Only supported for video playback. williamr@2: */ williamr@2: EPpDeblocking = 0x00001000, williamr@2: williamr@2: /** williamr@2: Deringing is typically used to remove artefacts from the output picture that result from williamr@2: a noisy input signal corrupting motion estimates. Only supported for video playback. williamr@2: */ williamr@2: EPpDeringing = 0x00002000, williamr@2: williamr@2: /** williamr@2: Custom hardware device specific processing. williamr@2: */ williamr@2: EPpCustom = 0x10000000 williamr@2: }; williamr@2: williamr@2: /** williamr@2: Dithering types. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TDitherType williamr@2: { williamr@2: /** No dithering. */ williamr@2: EDitherNone = 0x00000001, williamr@2: williamr@2: /** Ordered dither. */ williamr@2: EDitherOrdered = 0x00000002, williamr@2: williamr@2: /** Error diffusion dither. */ williamr@2: EDitherErrorDiffusion = 0x00000004, williamr@2: williamr@2: /** Other hardware device specific dithering type. */ williamr@2: EDitherOther = 0x00000008 williamr@2: }; williamr@2: williamr@2: /** williamr@2: Rotation types for pre-processors and post-processors. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TRotationType williamr@2: { williamr@2: /** No rotation. */ williamr@2: ERotateNone = 0x00000001, williamr@2: williamr@2: /** Rotate the picture 90 degrees clockwise. */ williamr@2: ERotate90Clockwise = 0x00000002, williamr@2: williamr@2: /** Rotate the picture 90 degrees anticlockwise. */ williamr@2: ERotate90Anticlockwise = 0x00000004, williamr@2: williamr@2: /** Rotate the picture 180 degrees. */ williamr@2: ERotate180 = 0x00000008 williamr@2: }; williamr@2: williamr@2: williamr@2: williamr@2: /** williamr@2: Defines possible encoding bit-rate control modes. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TBitrateControlType williamr@2: { williamr@2: /** williamr@2: The encoder does not control the bit-rate, but uses specified target picture quality and picture williamr@2: rate as such. The coded data stream must still remain compliant with the standard and buffer settings williamr@2: in use, if any, and thus HRD/VBV settings can limit the possible bit-rate. williamr@2: */ williamr@2: EBrControlNone = 0x00000001, williamr@2: williamr@2: /** williamr@2: The encoder controls the coded bit-rate of the stream. The caller indicates target bit-rate, target williamr@2: picture quality, target frame rate, spatial-temporal trade-off, and latency-quality trade-off. williamr@2: */ williamr@2: EBrControlStream = 0x00000002, williamr@2: williamr@2: /** williamr@2: The encoder controls the coded bit-rate of each picture. The caller gives the target amount of bits per williamr@2: frame. Each given input frame is coded. This type of operation is applicable only in memory-buffer-based williamr@2: input. williamr@2: */ williamr@2: EBrControlPicture = 0x00000004 williamr@2: }; williamr@2: williamr@2: williamr@2: /** williamr@2: Defines the scalability type for a single bit-rate scalability layer. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TScalabilityType williamr@2: { williamr@2: /** williamr@2: The layer uses temporal scalability. Using the layer increases the picture rate. williamr@2: */ williamr@2: EScalabilityTemporal = 0x00000001, williamr@2: williamr@2: /** williamr@2: The layer uses quality scalability. Using the layer improves picture quality. williamr@2: */ williamr@2: EScalabilityQuality = 0x00000002, williamr@2: williamr@2: /** williamr@2: The layer uses spatial scalability. Using the layer increases picture resolution. williamr@2: */ williamr@2: EScalabilitySpatial = 0x00000004, williamr@2: williamr@2: /** williamr@2: The layer is a fine-granularity scalability layer. In fine granularity scalability, the output williamr@2: quality increases gradually as a function of decoded bits from the enhancement layer. williamr@2: */ williamr@2: EScalabilityFineGranularity = 0x10000000, williamr@2: williamr@2: /** williamr@2: The layer is a fine-granularity quality scalability layer. williamr@2: */ williamr@2: EScalabilityQualityFG = EScalabilityFineGranularity | EScalabilityQuality williamr@2: }; williamr@2: williamr@2: /** williamr@2: Forward error control strength used for an unequal error protection level. Also other values between williamr@2: EFecStrengthNone and EFecStrengthHigh can be used, the encoder will round the values to the levels williamr@2: it supports. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TErrorControlStrength williamr@2: { williamr@2: /** No error control. */ williamr@2: EFecStrengthNone = 0, williamr@2: williamr@2: /** Low error control strength. */ williamr@2: EFecStrengthLow = 256, williamr@2: williamr@2: /** Normal error control strength. */ williamr@2: EFecStrengthNormal = 512, williamr@2: williamr@2: /** High error control strength. */ williamr@2: EFecStrengthHigh = 768 williamr@2: }; williamr@2: williamr@2: /** williamr@2: Defines the scalability type for in-layer bit-rate scalability. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TInLayerScalabilityType williamr@2: { williamr@2: /** Temporal scalability, such as B-pictures. */ williamr@2: EInLScalabilityTemporal = 1, williamr@2: williamr@2: /** Other scalability type. */ williamr@2: EInLScalabilityOther williamr@2: }; williamr@2: williamr@2: /** williamr@2: Defines what part of a frame is contained within a video buffer. williamr@2: @publishedAll williamr@2: @released williamr@2: */ williamr@2: enum TFramePortion williamr@2: { williamr@2: /** The frame portion is unknown. */ williamr@2: EFramePortionUnknown, williamr@2: williamr@2: /** An entire frame. */ williamr@2: EFramePortionWhole, williamr@2: williamr@2: /** A fragment of a frame containing the start but not the end. */ williamr@2: EFramePortionStartFragment, williamr@2: williamr@2: /** An fragment of a frame containing neither the start nor the end. */ williamr@2: EFramePortionMidFragment, williamr@2: williamr@2: /** A fragment of a frame containing the end but not the start. */ williamr@2: EFramePortionEndFragment williamr@2: }; williamr@2: williamr@2: #endif