--- /dev/null
+/*\r
+ * GPAC - Multimedia Framework C SDK\r
+ *\r
+ * Copyright (c) Jean Le Feuvre 2000-2005 \r
+ * All rights reserved\r
+ *\r
+ * This file is part of GPAC / modules interfaces\r
+ *\r
+ * GPAC is free software; you can redistribute it and/or modify\r
+ * it under the terms of the GNU Lesser General Public License as published by\r
+ * the Free Software Foundation; either version 2, or (at your option)\r
+ * any later version.\r
+ * \r
+ * GPAC is distributed in the hope that it will be useful,\r
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+ * GNU Lesser General Public License for more details.\r
+ * \r
+ * You should have received a copy of the GNU Lesser General Public\r
+ * License along with this library; see the file COPYING. If not, write to\r
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. \r
+ *\r
+ */\r
+\r
+\r
+\r
+#ifndef _GF_MODULE_CODEC_H_\r
+#define _GF_MODULE_CODEC_H_\r
+\r
+\r
+#include <gpac/module.h>\r
+#include <gpac/mpeg4_odf.h>\r
+\r
+#ifdef __cplusplus\r
+extern "C" {\r
+#endif\r
+\r
+/*multimedia processing levels*/\r
+enum\r
+{\r
+ /*normal, full processing*/\r
+ GF_CODEC_LEVEL_NORMAL,\r
+ /*codec is late, should scale down processing*/\r
+ GF_CODEC_LEVEL_LATE,\r
+ /*codec is very late, should turn off post-processing, even drop*/\r
+ GF_CODEC_LEVEL_VERY_LATE,\r
+ /*input frames are already late before decoding*/\r
+ GF_CODEC_LEVEL_DROP,\r
+ /*this is a special level indicating that a seek is happening (decode but no dispatch)\r
+ it is set dynamically*/\r
+ GF_CODEC_LEVEL_SEEK\r
+};\r
+\r
+/*the structure for capabilities*/\r
+typedef struct \r
+{\r
+ /*cap code cf below*/\r
+ u16 CapCode;\r
+ union {\r
+ u32 valueInt;\r
+ Float valueFloat;\r
+ } cap;\r
+} GF_CodecCapability;\r
+\r
+\r
+/*\r
+ all codecs capabilities\r
+*/\r
+\r
+enum\r
+{\r
+ /*size of a single composition unit */\r
+ GF_CODEC_OUTPUT_SIZE = 0x01,\r
+ /*resilency: if packets are lost within an AU, resilience means the AU won't be discarded and the codec\r
+ will try to decode */\r
+ GF_CODEC_RESILIENT,\r
+ /*critical level of composition memory - if below, media management for the object */\r
+ GF_CODEC_BUFFER_MIN,\r
+ /*maximum size in CU of composition memory */\r
+ GF_CODEC_BUFFER_MAX,\r
+ /*flags that all AUs should be discarded till next RAP (needed in case RAPs are not carried by the transport\r
+ protocol */\r
+ GF_CODEC_WAIT_RAP,\r
+ /*number of padding bytes needed - if the decoder needs padding input cannot be pulled and data is duplicated*/\r
+ GF_CODEC_PADDING_BYTES,\r
+ /*codecs can be threaded at will - by default a single thread is used for all decoders and priority is handled\r
+ by the app, but a codec can configure itself to run in a dedicated thread*/\r
+ GF_CODEC_WANTS_THREAD,\r
+\r
+ /*video width and height and horizontal pitch (in YV12 we assume half Y pitch for U and V planes) */\r
+ GF_CODEC_WIDTH,\r
+ GF_CODEC_HEIGHT,\r
+ GF_CODEC_STRIDE,\r
+ GF_CODEC_FPS,\r
+ /*Pixel Aspect Ratio, expressed as (par.num<<16) | par.den*/\r
+ GF_CODEC_PAR,\r
+ /*video color mode - color modes are defined in constants.h*/\r
+ GF_CODEC_PIXEL_FORMAT,\r
+ /*isgnal decoder performs frame re-ordering in temporal scalability*/\r
+ GF_CODEC_REORDER,\r
+ \r
+ /*Audio sample rate*/\r
+ GF_CODEC_SAMPLERATE,\r
+ /*Audio num channels*/\r
+ GF_CODEC_NB_CHAN,\r
+ /*Audio bps*/\r
+ GF_CODEC_BITS_PER_SAMPLE,\r
+ /*audio frame format*/\r
+ GF_CODEC_CHANNEL_CONFIG,\r
+ /*this is only used for audio in case transport mapping relies on sampleRate (RTP)\r
+ gets the CU duration in samplerate unit (type: int) */\r
+ GF_CODEC_CU_DURATION,\r
+\r
+ /*This is only called on scene decoders to signal that potential overlay scene should be \r
+ showed (cap.valueINT=1) or hidden (cap.valueINT=0). Currently only used with SetCap*/\r
+ GF_CODEC_SHOW_SCENE,\r
+ /*This is only called on scene decoders, GetCap only. If the decoder may continue modifying the scene once the last AU is received,\r
+ it must set cap.valueINT to 1 (typically, text stream decoder will hold the scene for a given duration\r
+ after the last AU). Otherwise the decoder will be stoped and ask to remove any extra scene being displayed*/\r
+ GF_CODEC_MEDIA_NOT_OVER,\r
+};\r
+\r
+\r
+ /* Generic interface used by both media decoders and scene decoders\r
+@AttachStream:\r
+ Add a Stream to the codec. If DependsOnESID is NULL, the stream is a base layer\r
+ UpStream means that the decoder should send feedback on this channel. \r
+ WARNING: Feedback format is not standardized by MPEG\r
+ the same API is used for both encoder and decoder (decSpecInfo is ignored\r
+ for an encoder) \r
+@DetachStream:\r
+ Remove stream\r
+@GetCapabilities:\r
+ Get the desired capability given its code\r
+@SetCapabilities\r
+ Set the desired capability given its code if possible\r
+ if the codec does not support the request capability, return GF_NOT_SUPPORTED\r
+@CanHandleStream\r
+ Can module handle this codec? Return 0 if No and !0 otherwise\r
+ decoderSpecificInfo is provided for MPEG4 audio/visual where a bunch of codecs are defined \r
+ with same objectType\r
+@GetDecoderName\r
+ returns codec name - only called once the stream is successfully attached\r
+@privateStack\r
+ user defined.\r
+*/\r
+\r
+#define GF_CODEC_BASE_INTERFACE(IFCE_NAME) \\r
+ GF_DECL_MODULE_INTERFACE \\r
+ GF_Err (*AttachStream)(IFCE_NAME, GF_ESD *esd);\\r
+ GF_Err (*DetachStream)(IFCE_NAME, u16 ES_ID);\\r
+ GF_Err (*GetCapabilities)(IFCE_NAME, GF_CodecCapability *capability);\\r
+ GF_Err (*SetCapabilities)(IFCE_NAME, GF_CodecCapability capability);\\r
+ Bool (*CanHandleStream)(IFCE_NAME, u32 StreamType, u32 ObjectType, char *decSpecInfo, u32 decSpecInfoSize, u32 PL);\\r
+ const char *(*GetName)(IFCE_NAME);\\r
+ void *privateStack; \\r
+\r
+\r
+typedef struct _basedecoder\r
+{\r
+ GF_CODEC_BASE_INTERFACE(struct _basedecoder *)\r
+} GF_BaseDecoder;\r
+\r
+/*interface name and version for media decoder */\r
+#define GF_MEDIA_DECODER_INTERFACE GF_4CC('G', 'M', 'D', 0x02)\r
+\r
+/*the media module interface. A media module MUST be implemented in synchronous mode as time \r
+and resources management is done by the terminal*/\r
+typedef struct _mediadecoder\r
+{\r
+ GF_CODEC_BASE_INTERFACE(struct _basedecoder *)\r
+\r
+ /*Process the media data in inAU. \r
+ @inBuffer, inBufferLength: encoded input data (complete framing of encoded data)\r
+ @ES_ID: stream this data belongs too (scalable object)\r
+ @outBuffer, outBufferLength: allocated data for decoding - if outBufferLength is not enough\r
+ you must set the size in outBufferLength and GF_BUFFER_TOO_SMALL \r
+\r
+ @PaddingBits is the padding at the end of the buffer (some codecs need this info)\r
+ @mmlevel: speed indicator for the decoding - cf above for values*/\r
+ GF_Err (*ProcessData)(struct _mediadecoder *, \r
+ char *inBuffer, u32 inBufferLength,\r
+ u16 ES_ID,\r
+ char *outBuffer, u32 *outBufferLength,\r
+ u8 PaddingBits, u32 mmlevel);\r
+} GF_MediaDecoder;\r
+\r
+\r
+\r
+typedef struct _inline_scene *LPINLINESCENE;\r
+\r
+/*interface name and version for scene decoder */\r
+#define GF_SCENE_DECODER_INTERFACE GF_4CC('G', 'S', 'D', 0x02)\r
+\r
+typedef struct _scenedecoder\r
+{\r
+ GF_CODEC_BASE_INTERFACE(struct _basedecoder *)\r
+ \r
+ /*attaches scene to the decoder - a scene may be attached to several decoders of several types\r
+ (BIFS or others scene dec, ressource decoders (OD), etc. \r
+ is: inline scene owning graph (and not just graph), defined in intern/terminal_dev.h. With inline scene\r
+ the complete terminal is exposed so there's pretty much everything doable in a scene decoder\r
+ @is_scene_root: set to true if this decoder is the root of the scene, false otherwise (either another decoder\r
+ or a re-entrant call, cf below)\r
+ This is called once upon creation of the decoder (several times if re-entrant)\r
+ */\r
+ GF_Err (*AttachScene)(struct _scenedecoder *, LPINLINESCENE is, Bool is_scene_root);\r
+ /*releases scene. If the decoder manages nodes / resources in the scene, \r
+ THESE MUST BE DESTROYED. May be NULL if decoder doesn't manage nodes but only create them (like BIFS, OD) and\r
+ doesn't have to be instructed the scene is about to be resumed\r
+ This is called each time the scene is about to be reseted (eg, seek and destroy)\r
+ */\r
+ GF_Err (*ReleaseScene)(struct _scenedecoder *);\r
+ /*Process the scene data in inAU. \r
+ @inBuffer, inBufferLength: encoded input data (complete framing of encoded data)\r
+ @ES_ID: stream this data belongs too (scalable object)\r
+ @AU_Time: specifies the current AU time. This is usually unused, however is needed for decoder\r
+ handling the scene graph without input data (cf below). In this case the buffer passed is always NULL and the AU\r
+ time caries the time of the scene (or of the stream object attached to the scene decoder, cf below)\r
+ @mmlevel: speed indicator for the decoding - cf above for values*/\r
+ GF_Err (*ProcessData)(struct _scenedecoder *, char *inBuffer, u32 inBufferLength,\r
+ u16 ES_ID, u32 AU_Time, u32 mmlevel);\r
+\r
+} GF_SceneDecoder;\r
+\r
+#ifdef __cplusplus\r
+}\r
+#endif\r
+\r
+#endif /*_GF_MODULE_CODEC_H_*/\r
+\r