1 // Copyright (c) 2003-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
16 #ifndef __DEVVIDEOCONSTANTS_H__
17 #define __DEVVIDEOCONSTANTS_H__
20 #include <mmf/devvideo/devvideoplugininterfaceuids.hrh>
21 #include <mm/conversioncoefficient.h>
24 DevVideo Panic Category
29 _LIT(KDevVideoPanicCategory, "DevVideo");
37 enum TDevVideoPanicCodes
40 A pre-condition on a method has been violated.
42 EDevVideoPanicPreConditionViolation = 1,
44 A post-condition on a method has been violated.
46 EDevVideoPanicPostConditionViolation = 2,
48 An invalid hardware device ID has been supplied.
50 EDevVideoPanicInvalidHwDeviceId = 3
54 // DevVideo Plugin Interface UIDs
56 /** Video Decoder HW Device Plugin Interface UID
60 const TUid KUidDevVideoDecoderHwDevice = {KUidDevVideoDecoderHwDeviceDefine};
62 /** Video Post Processor HW Device Plugin Interface UID
66 const TUid KUidDevVideoPostProcessorHwDevice = {KUidDevVideoPostProcessorHwDeviceDefine};
68 /** Video Encoder HW Device Plugin Interface UID
72 const TUid KUidDevVideoEncoderHwDevice = {KUidDevVideoEncoderHwDeviceDefine};
74 /** Video Pre Processor HW Device Plugin Interface UID
78 const TUid KUidDevVideoPreProcessorHwDevice = {KUidDevVideoPreProcessorHwDeviceDefine};
80 // DevVideo Custom Interface Uids
82 /** MMMFVideoPlayHwDeviceExtensionScanCopy Custom Interface UID
86 const TUid KUidDevVideoPlayHwDeviceExtensionScanCopy = {KUidDevVideoPlayHwDeviceExScanCopyDefine};
89 Picture frame rate constants
91 Using these constants is recommended when the picture rate is known to match
92 one of them, to ensure that floating point equality comparisons work as expected.
94 Note that the MSL video APIs currently only deal with non-interlaced frames. For interlaced
95 video, all references to the term "picture" should be considered to refer to complete frames.
96 As such, the term "picture rate" here refers to the frame rate for interlaced video.
101 const TReal KPictureRate5 = 5.0;
102 const TReal KPictureRate75 = 7.5;
103 const TReal KPictureRate10 = 10.0;
104 const TReal KPictureRate15 = 15.0;
105 const TReal KPictureRateNTSC24 = 23.97602397602398; // == 24000/1001
106 const TReal KPictureRate25 = 25.0;
107 const TReal KPictureRateNTSC30 = 29.97002997002997; // == 30000/1001
108 const TReal KPictureRate30 = 30.0;
112 Specifies the data format used for an uncompressed picture.
113 The values are bit patterns that can be combined with other format definition constants.
118 enum TImageDataFormat
120 /** Raw RGB picture data in a memory area.
122 ERgbRawData = 0x01000000,
123 /** RGB picture data stored in a Symbian OS CFbsBitmap object.
125 ERgbFbsBitmap = 0x02000000,
126 /** Raw YUV picture data stored in a memory area. The data storage
127 format depends on the YUV sampling pattern and data layout used.
129 EYuvRawData = 0x04000000,
131 /** Picture stored in a surface buffer.
132 @See MMmfVideoSurfaceHandleControl::MmvshcSetSurfaceHandle
134 ESurfaceBuffer = 0x08000000
139 RGB uncompressed image format alternatives.
146 16-bit RGB data format with four pixels per component.
147 The data format is the same as used in Symbian EColor4K bitmaps,
148 with each pixel using two bytes with the bit layout [ggggbbbb xxxxrrrr]
149 where "x" indicates unused bits. (This corresponds to "XRGB" 16-bit little-endian halfwords)
151 ERgb16bit444 = ERgbRawData | 0x00000001,
154 16-bit RGB data format with five bits per component for red and blue and
155 six bits for green. The data format is the same as used in Symbian EColor64K bitmaps,
156 with each pixel using two bytes with the bit layout [gggbbbbb rrrrrggg]
157 (This corresponds to "RGB" 16-bit little-endian halfwords)
159 ERgb16bit565 = ERgbRawData | 0x00000002,
162 32-bit RGB data format with eight bits per component.
163 This data format is the same as is used in Symbian EColor16MU bitmaps. The bit layout is
164 [bbbbbbbb gggggggg rrrrrrrr xxxxxxxx] where "x" indicates unused bits.
165 (This corresponds to "XRGB" 32-bit little-endian words)
167 ERgb32bit888 = ERgbRawData | 0x00000004,
170 CFbsBitmap object with EColor4K data format.
172 EFbsBitmapColor4K = ERgbFbsBitmap | 0x00000001,
175 CFbsBitmap object with EColor64K data format.
177 EFbsBitmapColor64K = ERgbFbsBitmap | 0x00000002,
180 CFbsBitmap object with EColor16M data format.
182 EFbsBitmapColor16M = ERgbFbsBitmap | 0x00000004,
185 CFbsBitmap object with EColor16MU data format.
187 EFbsBitmapColor16MU = ERgbFbsBitmap | 0x00000008
192 YUV (YCbCr) uncompressed image data sampling pattern.
196 enum TYuvSamplingPattern
199 4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position.
200 The four luminance sample positions are on the corners of a square. The chrominance sample position
201 is vertically half-way of the luminance sample positions and horizontally aligned with the left
202 side of the square. This is the MPEG-2 and the MPEG-4 Part 2 sampling pattern.
204 EYuv420Chroma1 = 0x00000001,
207 4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position.
208 The four luminance sample positions are on the corners of a square. The chrominance sample position
209 is vertically and horizontally in the middle of the luminance sample positions. This is the MPEG-1 sampling pattern.
211 EYuv420Chroma2 = 0x00000002,
214 4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position.
215 The four luminance sample positions are on the corners of a square. The chrominance sample position
216 colocates with the top-left corner of the square. This sampling format is one of the options in Annex E of H.264 | MPEG-4 AVC.
218 EYuv420Chroma3 = 0x00000004,
221 4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position.
222 The luminance sample positions reside on the same pixel row. The chrominance sample position is co-located
223 with the left one of the luminance sample positions. This is the MPEG-2 4:2:2 sampling pattern.
225 EYuv422Chroma1 = 0x00000008,
228 4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position.
229 The luminance sample positions reside on the same pixel row. The chrominance sample position is in the
230 middle of the luminance sample positions. This is the MPEG-1 4:2:2 sampling pattern.
232 EYuv422Chroma2 = 0x00000010
237 Defines the YUV data layout in a decoded picture.
244 The data is stored in a plane mode. The memory buffer contains first all Y component
245 data for the whole picture, followed by U and V, making the data format Y00Y01Y02Y03...U0...V0...
246 For YUV 4:2:0 data, this is the same data format as EFormatYUV420Planar in the Onboard Camera API
248 EYuvDataPlanar = 0x00000001,
251 The data is stored interleaved mode, all components interleaved in a single memory block.
252 Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is Y1VY0U,
253 corresponding to "UY0VY1" little-endian 32-bit words.
254 This is the same data format as EFormatYUV422Reversed in the Onboard Camera API
256 EYuvDataInterleavedLE = 0x00000002,
259 The data is stored interleaved mode, all components interleaved in a single memory block.
260 Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is UY0VY1,
261 corresponding to "UY0VY1" big-endian 32-bit words.
262 This is the same data format as EFormatYUV422 in the Onboard Camera API
264 EYuvDataInterleavedBE = 0x00000004,
266 The data is stored in a semi-planar mode. The memory buffer contains first all Y component
267 data for the whole picture, followed by U and V components, which are interlaced, making the data
268 format Y00Y01Y02Y03...U0V0U1V1... For YUV 4:2:0 data, this is the same data format as
269 FormatYUV420SemiPlanar in the Onboard Camera API
271 EYuvDataSemiPlanar = 0x00000008
275 Defines the picture effect used for an input picture. Please refer to ITU-T H.264 | ISO/IEC MPEG-4 AVC [] for the definitions of the transition effects.
284 EEffectNone = 0x00000001,
289 EEffectFadeFromBlack = 0x00000002,
294 EEffectFadeToBlack = 0x00000004,
297 Unspecified transition from or to constant colour.
299 EEffectUnspecifiedThroughConstantColor = 0x00000008,
304 EEffectDissolve = 0x00000010,
309 EEffectWipe = 0x00000020,
312 Unspecified mixture of two scenes.
314 EEffectUnspecifiedMixOfTwoScenes = 0x00000040
318 Defines the data value range used for RGB data. Used for determining the correct color space conversion factors.
325 The RGB data uses the full 8-bit range of [0…255].
327 ERgbRangeFull = 0x00000001,
330 The RGB data uses the nominal range of [16…235]. Individual samples can still contain
331 values beyond that range, the rest of the 8-bit range is used for headroom and footroom.
333 ERgbRange16to235 = 0x00000002
339 Defines possible data unit types for encoded video data. The data unit types are used both
340 for encoded video input for playback as well as encoded video output from recording.
344 enum TVideoDataUnitType
347 Each data unit is a single coded picture.
349 EDuCodedPicture = 0x00000001,
352 Each data unit is a coded video segment.
353 A coded video segment is a part of the coded video data that forms an independently
354 decodable part of a coded video frame. For example, a video packet in MPEG-4 Part 2
355 and slice in H.263 are coded video segments.
357 EDuVideoSegment = 0x00000002,
360 Each data unit contains an integer number of video segments consecutive in decoding order,
361 possibly more than one. The video segments shall be a subset of one coded picture.
363 EDuSeveralSegments = 0x00000004,
366 Each data unit contains a piece of raw video bitstream, not necessarily aligned at any headers.
367 The data must be written in decoding order. This data unit type can be used for playback if the client
368 does not have information about the bitstream syntax, and just writes data in random-sized chunks. For
369 recording this data unit type is useful if the client can handle arbitrarily split data units, giving the
370 encoder maximum flexibility in buffer allocation. For encoded data output, each data unit must still
371 belong to exactly one output picture.
373 EDuArbitraryStreamSection = 0x00000008
377 Defines possible encapsulation types for coded video data units. The encapsulation information is
378 used both for encoded video input for playback as well as encoded video output from recording.
382 enum TVideoDataUnitEncapsulation
385 The coded data units can be chained in a bitstream that can be decoded. For example, MPEG-4
386 Part 2 elementary streams, H.263 bitstreams, and H.264 | MPEG-4 AVC Annex B bitstreams fall into this category.
388 EDuElementaryStream = 0x00010000,
391 The coded data units are encapsulated in a general-purpose packet payload format whose coded
392 data units can be decoded independently but cannot be generally chained into a bitstream.
393 For example, the Network Abstraction Layer Units of H.264 | MPEG-4 AVC fall into this category.
395 EDuGenericPayload = 0x00020000,
398 The coded data units are encapsulated in RTP packet payload format. The RTP payload header
399 may contain codec-specific items, such as a redundant copy of a picture header in the H.263
400 payload specification RFC2429.
402 EDuRtpPayload = 0x00040000
406 Defines the HRD/VBV specification used in a stream.
410 enum THrdVbvSpecification
412 /** No HRD/VBV specification. */
413 EHrdVbvNone = 0x00000001,
415 /** The HRD/VBV specification in the corresponding coding standard. */
416 EHrdVbvCodingStandard = 0x00000002,
418 /** Annex G of 3GPP TS 26.234 Release 5. */
419 EHrdVbv3GPP = 0x00000004
423 Defines the pre-processor and post-processor types available in the system.
424 One pre-processor or post-processor can implement multiple operations simultaneously, and thus the
425 types are defined as bit values that can be combined as a bitfield.
429 enum TPrePostProcessType
432 Input cropping, used for pan-scan cropping in video playback and digital zoom in video recording.
433 Pan-scan cropping is useful, for example, for displaying arbitrary-sized pictures with codecs that
434 only support image dimensions that are multiples of 16 pixels.
436 EPpInputCrop = 0x00000001,
439 Horizontal mirroring, flips the image data around a vertical line in its center.
441 EPpMirror = 0x00000002,
444 Picture rotation, supports rotation by 90 or 180 degrees, clockwise and anticlockwise.
446 EPpRotate = 0x00000004,
449 Picture scaling to a new size, includes both upscaling and downscaling.
450 The supported scaling types and scale factors depend on the pixel processor.
452 EPpScale = 0x00000008,
455 Crops the picture to a final output rectangle.
457 EPpOutputCrop = 0x00000010,
460 Pads the output picture to a defined size. Used in video recording to pad pictures to
461 suit the encoder input requirements.
463 EPpOutputPad = 0x00000020,
466 YUV to RGB color space conversion. Supported only for video playback.
468 EPpYuvToRgb = 0x00000040,
471 RGB to YUV color space conversion. Supported only for video recording.
473 EPpRgbToYuv = 0x00000080,
476 YUV to YUV data format conversion. Supported only for video recording.
478 EPpYuvToYuv = 0x00000100,
481 Noise filtering. Noise filtering is typically used to enhance the input
482 picture from the camera, and is usually only supported for video recording.
484 EPpNoiseFilter = 0x00000200,
487 Color enhancement. Color enhancement is typically used to enhance the input picture
488 from the camera, and is usually only supported for video recording.
490 EPpColorEnhancement = 0x00000400,
493 Frame stabilisation. Supported only for video recording.
495 EPpFrameStabilisation = 0x00000800,
498 Deblocking is typically used to remove artefacts from the output picture that result from
499 high compression or a noisy input signal. Only supported for video playback.
501 EPpDeblocking = 0x00001000,
504 Deringing is typically used to remove artefacts from the output picture that result from
505 a noisy input signal corrupting motion estimates. Only supported for video playback.
507 EPpDeringing = 0x00002000,
510 Custom hardware device specific processing.
512 EPpCustom = 0x10000000
523 EDitherNone = 0x00000001,
525 /** Ordered dither. */
526 EDitherOrdered = 0x00000002,
528 /** Error diffusion dither. */
529 EDitherErrorDiffusion = 0x00000004,
531 /** Other hardware device specific dithering type. */
532 EDitherOther = 0x00000008
536 Rotation types for pre-processors and post-processors.
543 ERotateNone = 0x00000001,
545 /** Rotate the picture 90 degrees clockwise. */
546 ERotate90Clockwise = 0x00000002,
548 /** Rotate the picture 90 degrees anticlockwise. */
549 ERotate90Anticlockwise = 0x00000004,
551 /** Rotate the picture 180 degrees. */
552 ERotate180 = 0x00000008
558 Defines possible encoding bit-rate control modes.
562 enum TBitrateControlType
565 The encoder does not control the bit-rate, but uses specified target picture quality and picture
566 rate as such. The coded data stream must still remain compliant with the standard and buffer settings
567 in use, if any, and thus HRD/VBV settings can limit the possible bit-rate.
569 EBrControlNone = 0x00000001,
572 The encoder controls the coded bit-rate of the stream. The caller indicates target bit-rate, target
573 picture quality, target frame rate, spatial-temporal trade-off, and latency-quality trade-off.
575 EBrControlStream = 0x00000002,
578 The encoder controls the coded bit-rate of each picture. The caller gives the target amount of bits per
579 frame. Each given input frame is coded. This type of operation is applicable only in memory-buffer-based
582 EBrControlPicture = 0x00000004
587 Defines the scalability type for a single bit-rate scalability layer.
591 enum TScalabilityType
594 The layer uses temporal scalability. Using the layer increases the picture rate.
596 EScalabilityTemporal = 0x00000001,
599 The layer uses quality scalability. Using the layer improves picture quality.
601 EScalabilityQuality = 0x00000002,
604 The layer uses spatial scalability. Using the layer increases picture resolution.
606 EScalabilitySpatial = 0x00000004,
609 The layer is a fine-granularity scalability layer. In fine granularity scalability, the output
610 quality increases gradually as a function of decoded bits from the enhancement layer.
612 EScalabilityFineGranularity = 0x10000000,
615 The layer is a fine-granularity quality scalability layer.
617 EScalabilityQualityFG = EScalabilityFineGranularity | EScalabilityQuality
621 Forward error control strength used for an unequal error protection level. Also other values between
622 EFecStrengthNone and EFecStrengthHigh can be used, the encoder will round the values to the levels
627 enum TErrorControlStrength
629 /** No error control. */
630 EFecStrengthNone = 0,
632 /** Low error control strength. */
633 EFecStrengthLow = 256,
635 /** Normal error control strength. */
636 EFecStrengthNormal = 512,
638 /** High error control strength. */
639 EFecStrengthHigh = 768
643 Defines the scalability type for in-layer bit-rate scalability.
647 enum TInLayerScalabilityType
649 /** Temporal scalability, such as B-pictures. */
650 EInLScalabilityTemporal = 1,
652 /** Other scalability type. */
657 Defines what part of a frame is contained within a video buffer.
663 /** The frame portion is unknown. */
664 EFramePortionUnknown,
666 /** An entire frame. */
669 /** A fragment of a frame containing the start but not the end. */
670 EFramePortionStartFragment,
672 /** An fragment of a frame containing neither the start nor the end. */
673 EFramePortionMidFragment,
675 /** A fragment of a frame containing the end but not the start. */
676 EFramePortionEndFragment