Attempt to represent the S^2->S^3 header reorganisation as a series of "hg rename" operations
1 // Copyright (c) 2003-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Symbian Foundation License v1.0" to Symbian Foundation members and "Symbian Foundation End User License Agreement v1.0" to non-members
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.symbianfoundation.org/legal/licencesv10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
16 #ifndef __DEVVIDEOCONSTANTS_H__
17 #define __DEVVIDEOCONSTANTS_H__
20 #include <mmf/devvideo/devvideoplugininterfaceuids.hrh>
21 #include <mm/conversioncoefficient.h>
24 DevVideo Panic Category
29 _LIT(KDevVideoPanicCategory, "DevVideo");
37 enum TDevVideoPanicCodes
40 A pre-condition on a method has been violated.
42 EDevVideoPanicPreConditionViolation = 1,
44 A post-condition on a method has been violated.
46 EDevVideoPanicPostConditionViolation = 2,
48 An invalid hardware device ID has been supplied.
50 EDevVideoPanicInvalidHwDeviceId = 3
54 // DevVideo Plugin Interface UIDs
56 /** Video Decoder HW Device Plugin Interface UID
60 const TUid KUidDevVideoDecoderHwDevice = {KUidDevVideoDecoderHwDeviceDefine};
62 /** Video Post Processor HW Device Plugin Interface UID
66 const TUid KUidDevVideoPostProcessorHwDevice = {KUidDevVideoPostProcessorHwDeviceDefine};
68 /** Video Encoder HW Device Plugin Interface UID
72 const TUid KUidDevVideoEncoderHwDevice = {KUidDevVideoEncoderHwDeviceDefine};
74 /** Video Pre Processor HW Device Plugin Interface UID
78 const TUid KUidDevVideoPreProcessorHwDevice = {KUidDevVideoPreProcessorHwDeviceDefine};
80 // DevVideo Custom Interface Uids
82 /** MMMFVideoPlayHwDeviceExtensionScanCopy Custom Interface UID
86 const TUid KUidDevVideoPlayHwDeviceExtensionScanCopy = {KUidDevVideoPlayHwDeviceExScanCopyDefine};
90 Picture frame rate constants
92 Using these constants is recommended when the picture rate is known to match
93 one of them, to ensure that floating point equality comparisons work as expected.
95 Note that the MSL video APIs currently only deal with non-interlaced frames. For interlaced
96 video, all references to the term "picture" should be considered to refer to complete frames.
97 As such, the term "picture rate" here refers to the frame rate for interlaced video.
102 const TReal KPictureRate5 = 5.0;
103 const TReal KPictureRate75 = 7.5;
104 const TReal KPictureRate10 = 10.0;
105 const TReal KPictureRate15 = 15.0;
106 const TReal KPictureRateNTSC24 = 23.97602397602398; // == 24000/1001
107 const TReal KPictureRate25 = 25.0;
108 const TReal KPictureRateNTSC30 = 29.97002997002997; // == 30000/1001
109 const TReal KPictureRate30 = 30.0;
113 Specifies the data format used for an uncompressed picture.
114 The values are bit patterns that can be combined with other format definition constants.
119 enum TImageDataFormat
121 /** Raw RGB picture data in a memory area.
123 ERgbRawData = 0x01000000,
124 /** RGB picture data stored in a Symbian OS CFbsBitmap object.
126 ERgbFbsBitmap = 0x02000000,
127 /** Raw YUV picture data stored in a memory area. The data storage
128 format depends on the YUV sampling pattern and data layout used.
130 EYuvRawData = 0x04000000
135 RGB uncompressed image format alternatives.
142 16-bit RGB data format with four pixels per component.
143 The data format is the same as used in Symbian EColor4K bitmaps,
144 with each pixel using two bytes with the bit layout [ggggbbbb xxxxrrrr]
145 where "x" indicates unused bits. (This corresponds to "XRGB" 16-bit little-endian halfwords)
147 ERgb16bit444 = ERgbRawData | 0x00000001,
150 16-bit RGB data format with five bits per component for red and blue and
151 six bits for green. The data format is the same as used in Symbian EColor64K bitmaps,
152 with each pixel using two bytes with the bit layout [gggbbbbb rrrrrggg]
153 (This corresponds to "RGB" 16-bit little-endian halfwords)
155 ERgb16bit565 = ERgbRawData | 0x00000002,
158 32-bit RGB data format with eight bits per component.
159 This data format is the same as is used in Symbian EColor16MU bitmaps. The bit layout is
160 [bbbbbbbb gggggggg rrrrrrrr xxxxxxxx] where "x" indicates unused bits.
161 (This corresponds to "XRGB" 32-bit little-endian words)
163 ERgb32bit888 = ERgbRawData | 0x00000004,
166 CFbsBitmap object with EColor4K data format.
168 EFbsBitmapColor4K = ERgbFbsBitmap | 0x00000001,
171 CFbsBitmap object with EColor64K data format.
173 EFbsBitmapColor64K = ERgbFbsBitmap | 0x00000002,
176 CFbsBitmap object with EColor16M data format.
178 EFbsBitmapColor16M = ERgbFbsBitmap | 0x00000004,
181 CFbsBitmap object with EColor16MU data format.
183 EFbsBitmapColor16MU = ERgbFbsBitmap | 0x00000008
188 YUV (YCbCr) uncompressed image data sampling pattern.
192 enum TYuvSamplingPattern
195 4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position.
196 The four luminance sample positions are on the corners of a square. The chrominance sample position
197 is vertically half-way of the luminance sample positions and horizontally aligned with the left
198 side of the square. This is the MPEG-2 and the MPEG-4 Part 2 sampling pattern.
200 EYuv420Chroma1 = 0x00000001,
203 4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position.
204 The four luminance sample positions are on the corners of a square. The chrominance sample position
205 is vertically and horizontally in the middle of the luminance sample positions. This is the MPEG-1 sampling pattern.
207 EYuv420Chroma2 = 0x00000002,
210 4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position.
211 The four luminance sample positions are on the corners of a square. The chrominance sample position
212 colocates with the top-left corner of the square. This sampling format is one of the options in Annex E of H.264 | MPEG-4 AVC.
214 EYuv420Chroma3 = 0x00000004,
217 4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position.
218 The luminance sample positions reside on the same pixel row. The chrominance sample position is co-located
219 with the left one of the luminance sample positions. This is the MPEG-2 4:2:2 sampling pattern.
221 EYuv422Chroma1 = 0x00000008,
224 4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position.
225 The luminance sample positions reside on the same pixel row. The chrominance sample position is in the
226 middle of the luminance sample positions. This is the MPEG-1 4:2:2 sampling pattern.
228 EYuv422Chroma2 = 0x00000010
233 Defines the YUV data layout in a decoded picture.
240 The data is stored in a plane mode. The memory buffer contains first all Y component
241 data for the whole picture, followed by U and V, making the data format Y00Y01Y02Y03...U0...V0...
242 For YUV 4:2:0 data, this is the same data format as EFormatYUV420Planar in the Onboard Camera API
244 EYuvDataPlanar = 0x00000001,
247 The data is stored interleaved mode, all components interleaved in a single memory block.
248 Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is Y1VY0U,
249 corresponding to "UY0VY1" little-endian 32-bit words.
250 This is the same data format as EFormatYUV422Reversed in the Onboard Camera API
252 EYuvDataInterleavedLE = 0x00000002,
255 The data is stored interleaved mode, all components interleaved in a single memory block.
256 Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is UY0VY1,
257 corresponding to "UY0VY1" big-endian 32-bit words.
258 This is the same data format as EFormatYUV422 in the Onboard Camera API
260 EYuvDataInterleavedBE = 0x00000004,
262 The data is stored in a semi-planar mode. The memory buffer contains first all Y component
263 data for the whole picture, followed by U and V components, which are interlaced, making the data
264 format Y00Y01Y02Y03...U0V0U1V1... For YUV 4:2:0 data, this is the same data format as
265 FormatYUV420SemiPlanar in the Onboard Camera API
267 EYuvDataSemiPlanar = 0x00000008
271 Defines the picture effect used for an input picture. Please refer to ITU-T H.264 | ISO/IEC MPEG-4 AVC [] for the definitions of the transition effects.
280 EEffectNone = 0x00000001,
285 EEffectFadeFromBlack = 0x00000002,
290 EEffectFadeToBlack = 0x00000004,
293 Unspecified transition from or to constant colour.
295 EEffectUnspecifiedThroughConstantColor = 0x00000008,
300 EEffectDissolve = 0x00000010,
305 EEffectWipe = 0x00000020,
308 Unspecified mixture of two scenes.
310 EEffectUnspecifiedMixOfTwoScenes = 0x00000040
314 Defines the data value range used for RGB data. Used for determining the correct color space conversion factors.
321 The RGB data uses the full 8-bit range of [0…255].
323 ERgbRangeFull = 0x00000001,
326 The RGB data uses the nominal range of [16…235]. Individual samples can still contain
327 values beyond that range, the rest of the 8-bit range is used for headroom and footroom.
329 ERgbRange16to235 = 0x00000002
335 Defines possible data unit types for encoded video data. The data unit types are used both
336 for encoded video input for playback as well as encoded video output from recording.
340 enum TVideoDataUnitType
343 Each data unit is a single coded picture.
345 EDuCodedPicture = 0x00000001,
348 Each data unit is a coded video segment.
349 A coded video segment is a part of the coded video data that forms an independently
350 decodable part of a coded video frame. For example, a video packet in MPEG-4 Part 2
351 and slice in H.263 are coded video segments.
353 EDuVideoSegment = 0x00000002,
356 Each data unit contains an integer number of video segments consecutive in decoding order,
357 possibly more than one. The video segments shall be a subset of one coded picture.
359 EDuSeveralSegments = 0x00000004,
362 Each data unit contains a piece of raw video bitstream, not necessarily aligned at any headers.
363 The data must be written in decoding order. This data unit type can be used for playback if the client
364 does not have information about the bitstream syntax, and just writes data in random-sized chunks. For
365 recording this data unit type is useful if the client can handle arbitrarily split data units, giving the
366 encoder maximum flexibility in buffer allocation. For encoded data output, each data unit must still
367 belong to exactly one output picture.
369 EDuArbitraryStreamSection = 0x00000008
373 Defines possible encapsulation types for coded video data units. The encapsulation information is
374 used both for encoded video input for playback as well as encoded video output from recording.
378 enum TVideoDataUnitEncapsulation
381 The coded data units can be chained in a bitstream that can be decoded. For example, MPEG-4
382 Part 2 elementary streams, H.263 bitstreams, and H.264 | MPEG-4 AVC Annex B bitstreams fall into this category.
384 EDuElementaryStream = 0x00010000,
387 The coded data units are encapsulated in a general-purpose packet payload format whose coded
388 data units can be decoded independently but cannot be generally chained into a bitstream.
389 For example, the Network Abstraction Layer Units of H.264 | MPEG-4 AVC fall into this category.
391 EDuGenericPayload = 0x00020000,
394 The coded data units are encapsulated in RTP packet payload format. The RTP payload header
395 may contain codec-specific items, such as a redundant copy of a picture header in the H.263
396 payload specification RFC2429.
398 EDuRtpPayload = 0x00040000
402 Defines the HRD/VBV specification used in a stream.
406 enum THrdVbvSpecification
408 /** No HRD/VBV specification. */
409 EHrdVbvNone = 0x00000001,
411 /** The HRD/VBV specification in the corresponding coding standard. */
412 EHrdVbvCodingStandard = 0x00000002,
414 /** Annex G of 3GPP TS 26.234 Release 5. */
415 EHrdVbv3GPP = 0x00000004
419 Defines the pre-processor and post-processor types available in the system.
420 One pre-processor or post-processor can implement multiple operations simultaneously, and thus the
421 types are defined as bit values that can be combined as a bitfield.
425 enum TPrePostProcessType
428 Input cropping, used for pan-scan cropping in video playback and digital zoom in video recording.
429 Pan-scan cropping is useful, for example, for displaying arbitrary-sized pictures with codecs that
430 only support image dimensions that are multiples of 16 pixels.
432 EPpInputCrop = 0x00000001,
435 Horizontal mirroring, flips the image data around a vertical line in its center.
437 EPpMirror = 0x00000002,
440 Picture rotation, supports rotation by 90 or 180 degrees, clockwise and anticlockwise.
442 EPpRotate = 0x00000004,
445 Picture scaling to a new size, includes both upscaling and downscaling.
446 The supported scaling types and scale factors depend on the pixel processor.
448 EPpScale = 0x00000008,
451 Crops the picture to a final output rectangle.
453 EPpOutputCrop = 0x00000010,
456 Pads the output picture to a defined size. Used in video recording to pad pictures to
457 suit the encoder input requirements.
459 EPpOutputPad = 0x00000020,
462 YUV to RGB color space conversion. Supported only for video playback.
464 EPpYuvToRgb = 0x00000040,
467 RGB to YUV color space conversion. Supported only for video recording.
469 EPpRgbToYuv = 0x00000080,
472 YUV to YUV data format conversion. Supported only for video recording.
474 EPpYuvToYuv = 0x00000100,
477 Noise filtering. Noise filtering is typically used to enhance the input
478 picture from the camera, and is usually only supported for video recording.
480 EPpNoiseFilter = 0x00000200,
483 Color enhancement. Color enhancement is typically used to enhance the input picture
484 from the camera, and is usually only supported for video recording.
486 EPpColorEnhancement = 0x00000400,
489 Frame stabilisation. Supported only for video recording.
491 EPpFrameStabilisation = 0x00000800,
494 Deblocking is typically used to remove artefacts from the output picture that result from
495 high compression or a noisy input signal. Only supported for video playback.
497 EPpDeblocking = 0x00001000,
500 Deringing is typically used to remove artefacts from the output picture that result from
501 a noisy input signal corrupting motion estimates. Only supported for video playback.
503 EPpDeringing = 0x00002000,
506 Custom hardware device specific processing.
508 EPpCustom = 0x10000000
519 EDitherNone = 0x00000001,
521 /** Ordered dither. */
522 EDitherOrdered = 0x00000002,
524 /** Error diffusion dither. */
525 EDitherErrorDiffusion = 0x00000004,
527 /** Other hardware device specific dithering type. */
528 EDitherOther = 0x00000008
532 Rotation types for pre-processors and post-processors.
539 ERotateNone = 0x00000001,
541 /** Rotate the picture 90 degrees clockwise. */
542 ERotate90Clockwise = 0x00000002,
544 /** Rotate the picture 90 degrees anticlockwise. */
545 ERotate90Anticlockwise = 0x00000004,
547 /** Rotate the picture 180 degrees. */
548 ERotate180 = 0x00000008
554 Defines possible encoding bit-rate control modes.
558 enum TBitrateControlType
561 The encoder does not control the bit-rate, but uses specified target picture quality and picture
562 rate as such. The coded data stream must still remain compliant with the standard and buffer settings
563 in use, if any, and thus HRD/VBV settings can limit the possible bit-rate.
565 EBrControlNone = 0x00000001,
568 The encoder controls the coded bit-rate of the stream. The caller indicates target bit-rate, target
569 picture quality, target frame rate, spatial-temporal trade-off, and latency-quality trade-off.
571 EBrControlStream = 0x00000002,
574 The encoder controls the coded bit-rate of each picture. The caller gives the target amount of bits per
575 frame. Each given input frame is coded. This type of operation is applicable only in memory-buffer-based
578 EBrControlPicture = 0x00000004
583 Defines the scalability type for a single bit-rate scalability layer.
587 enum TScalabilityType
590 The layer uses temporal scalability. Using the layer increases the picture rate.
592 EScalabilityTemporal = 0x00000001,
595 The layer uses quality scalability. Using the layer improves picture quality.
597 EScalabilityQuality = 0x00000002,
600 The layer uses spatial scalability. Using the layer increases picture resolution.
602 EScalabilitySpatial = 0x00000004,
605 The layer is a fine-granularity scalability layer. In fine granularity scalability, the output
606 quality increases gradually as a function of decoded bits from the enhancement layer.
608 EScalabilityFineGranularity = 0x10000000,
611 The layer is a fine-granularity quality scalability layer.
613 EScalabilityQualityFG = EScalabilityFineGranularity | EScalabilityQuality
617 Forward error control strength used for an unequal error protection level. Also other values between
618 EFecStrengthNone and EFecStrengthHigh can be used, the encoder will round the values to the levels
623 enum TErrorControlStrength
625 /** No error control. */
626 EFecStrengthNone = 0,
628 /** Low error control strength. */
629 EFecStrengthLow = 256,
631 /** Normal error control strength. */
632 EFecStrengthNormal = 512,
634 /** High error control strength. */
635 EFecStrengthHigh = 768
639 Defines the scalability type for in-layer bit-rate scalability.
643 enum TInLayerScalabilityType
645 /** Temporal scalability, such as B-pictures. */
646 EInLScalabilityTemporal = 1,
648 /** Other scalability type. */
653 Defines what part of a frame is contained within a video buffer.
659 /** The frame portion is unknown. */
660 EFramePortionUnknown,
662 /** An entire frame. */
665 /** A fragment of a frame containing the start but not the end. */
666 EFramePortionStartFragment,
668 /** An fragment of a frame containing neither the start nor the end. */
669 EFramePortionMidFragment,
671 /** A fragment of a frame containing the end but not the start. */
672 EFramePortionEndFragment