aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Dröge <slomo@circular-chaos.org>2013-08-30 11:02:26 +0200
committerSebastian Dröge <slomo@circular-chaos.org>2013-08-30 11:54:50 +0200
commit807d998a1d2715b9af4ae116701b9f4d2040fae7 (patch)
tree1a12ede5ec049faf1bf43f132c0d514a923588bc
parent7c6bc31bc0a6e8f36d41a9f82467c135c9956236 (diff)
New upstream bugfix release.debian/1.0.10-1
-rw-r--r--debian/changelog9
-rw-r--r--debian/control6
-rw-r--r--debian/patches/03_git-2013-04-26.patch8453
-rw-r--r--debian/patches/series1
4 files changed, 8466 insertions, 3 deletions
diff --git a/debian/changelog b/debian/changelog
index 6cb7fc4..1ad9772 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,12 @@
+gst-libav1.0 (1.0.10-1) unstable; urgency=low
+
+ * New upstream bugfix release.
+ * debian/patches/03_git-2013-04-26.patch,
+ debian/control:
+ + Make it compile with libav 9.
+
+ -- Sebastian Dröge <slomo@debian.org> Fri, 30 Aug 2013 11:01:38 +0200
+
gst-libav1.0 (1.0.9-1) unstable; urgency=low
* New upstream bugfix release.
diff --git a/debian/control b/debian/control
index 826496b..39916ba 100644
--- a/debian/control
+++ b/debian/control
@@ -21,9 +21,9 @@ Build-Depends: debhelper (>= 9),
libgstreamer1.0-dev (>= 1.0.0),
libgstreamer-plugins-base1.0-dev (>= 1.0.0),
liborc-0.4-dev (>= 1:0.4.16),
- libavcodec-dev (>= 4:0.7),
- libavformat-dev (>= 4:0.7),
- libswscale-dev (>= 4:0.7),
+ libavcodec-dev (>= 6:9),
+ libavformat-dev (>= 6:9),
+ libswscale-dev (>= 6:9),
libbz2-dev,
yasm
Standards-Version: 3.9.3
diff --git a/debian/patches/03_git-2013-04-26.patch b/debian/patches/03_git-2013-04-26.patch
new file mode 100644
index 0000000..7abafc9
--- /dev/null
+++ b/debian/patches/03_git-2013-04-26.patch
@@ -0,0 +1,8453 @@
+diff --git a/ext/Makefile.am b/ext/Makefile.am
+index bbdb9f0..16718f6 100644
+--- a/ext/Makefile.am
++++ b/ext/Makefile.am
+@@ -1 +1,4 @@
+-SUBDIRS = libav libswscale
++# disable/skip avvideoscale until someone makes it work
++SUBDIRS = libav
++
++DIST_SUBDIRS = libav libswscale
+diff --git a/ext/libav/Makefile.am b/ext/libav/Makefile.am
+index a1e51f4..24d0eb9 100644
+--- a/ext/libav/Makefile.am
++++ b/ext/libav/Makefile.am
+@@ -10,9 +10,9 @@ libgstlibav_la_SOURCES = gstav.c \
+ gstavprotocol.c \
+ gstavcodecmap.c \
+ gstavutils.c \
+- gstavenc.c \
++ gstavaudenc.c \
+ gstavvidenc.c \
+- gstavdec.c \
++ gstavauddec.c \
+ gstavviddec.c \
+ gstavcfg.c \
+ gstavdemux.c \
+@@ -40,7 +40,9 @@ noinst_HEADERS = \
+ gstav.h \
+ gstavcodecmap.h \
+ gstavutils.h \
+- gstavenc.h \
++ gstavauddec.h \
++ gstavviddec.h \
++ gstavaudenc.h \
+ gstavvidenc.h \
+ gstavcfg.h \
+- gstavpipe.h
++ gstavprotocol.h
+diff --git a/ext/libav/gstav.c b/ext/libav/gstav.c
+index 8c88a13..7884a51 100644
+--- a/ext/libav/gstav.c
++++ b/ext/libav/gstav.c
+@@ -13,8 +13,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ /* First, include the header file for the plugin, to bring in the
+@@ -45,7 +45,7 @@ gst_ffmpeg_avcodec_open (AVCodecContext * avctx, AVCodec * codec)
+ int ret;
+
+ g_static_mutex_lock (&gst_avcodec_mutex);
+- ret = avcodec_open (avctx, codec);
++ ret = avcodec_open2 (avctx, codec, NULL);
+ g_static_mutex_unlock (&gst_avcodec_mutex);
+
+ return ret;
+@@ -69,7 +69,7 @@ gst_ffmpeg_av_find_stream_info (AVFormatContext * ic)
+ int ret;
+
+ g_static_mutex_lock (&gst_avcodec_mutex);
+- ret = av_find_stream_info (ic);
++ ret = avformat_find_stream_info (ic, NULL);
+ g_static_mutex_unlock (&gst_avcodec_mutex);
+
+ return ret;
+@@ -83,9 +83,6 @@ gst_ffmpeg_log_callback (void *ptr, int level, const char *fmt, va_list vl)
+ gint len = strlen (fmt);
+ gchar *fmt2 = NULL;
+
+- if (_shut_up_I_am_probing)
+- return;
+-
+ switch (level) {
+ case AV_LOG_QUIET:
+ gst_level = GST_LEVEL_NONE;
+@@ -117,10 +114,6 @@ gst_ffmpeg_log_callback (void *ptr, int level, const char *fmt, va_list vl)
+ }
+ #endif
+
+-#ifndef GST_DISABLE_GST_DEBUG
+-gboolean _shut_up_I_am_probing = FALSE;
+-#endif
+-
+ static gboolean
+ plugin_init (GstPlugin * plugin)
+ {
+@@ -145,13 +138,9 @@ plugin_init (GstPlugin * plugin)
+ gst_ffmpegscale_register (plugin);
+ #endif
+ #if 0
+- gst_ffmpegcsp_register (plugin);
+ gst_ffmpegaudioresample_register (plugin);
+ #endif
+
+- av_register_protocol2 (&gstreamer_protocol, sizeof (URLProtocol));
+- av_register_protocol2 (&gstpipe_protocol, sizeof (URLProtocol));
+-
+ /* Now we can return the pointer to the newly created Plugin object. */
+ return TRUE;
+ }
+diff --git a/ext/libav/gstav.h b/ext/libav/gstav.h
+index 5cd69a5..82e2972 100644
+--- a/ext/libav/gstav.h
++++ b/ext/libav/gstav.h
+@@ -13,8 +13,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ /* First, include the header file for the plugin, to bring in the
+@@ -36,21 +36,16 @@ GST_DEBUG_CATEGORY_EXTERN (ffmpeg_debug);
+
+ G_BEGIN_DECLS
+
+-#ifndef GST_DISABLE_GST_DEBUG
+-extern gboolean _shut_up_I_am_probing;
+-#endif
+-
+ extern gboolean gst_ffmpegdemux_register (GstPlugin * plugin);
+ extern gboolean gst_ffmpegauddec_register (GstPlugin * plugin);
+ extern gboolean gst_ffmpegviddec_register (GstPlugin * plugin);
+ extern gboolean gst_ffmpegaudenc_register (GstPlugin * plugin);
+ extern gboolean gst_ffmpegvidenc_register (GstPlugin * plugin);
+ extern gboolean gst_ffmpegmux_register (GstPlugin * plugin);
+-extern gboolean gst_ffmpegcsp_register (GstPlugin * plugin);
+ #if 0
+ extern gboolean gst_ffmpegscale_register (GstPlugin * plugin);
+-#endif
+ extern gboolean gst_ffmpegaudioresample_register (GstPlugin * plugin);
++#endif
+ extern gboolean gst_ffmpegdeinterlace_register (GstPlugin * plugin);
+
+ int gst_ffmpeg_avcodec_open (AVCodecContext *avctx, AVCodec *codec);
+@@ -59,9 +54,6 @@ int gst_ffmpeg_av_find_stream_info(AVFormatContext *ic);
+
+ G_END_DECLS
+
+-extern URLProtocol gstreamer_protocol;
+-extern URLProtocol gstpipe_protocol;
+-
+ /* use GST_FFMPEG URL_STREAMHEADER with URL_WRONLY if the first
+ * buffer should be used as streamheader property on the pad's caps. */
+ #define GST_FFMPEG_URL_STREAMHEADER 16
+diff --git a/ext/libav/gstavauddec.c b/ext/libav/gstavauddec.c
+new file mode 100644
+index 0000000..e6646af
+--- /dev/null
++++ b/ext/libav/gstavauddec.c
+@@ -0,0 +1,893 @@
++/* GStreamer
++ * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
++ * Copyright (C) <2012> Collabora Ltd.
++ * Author: Sebastian Dröge <sebastian.droege@collabora.co.uk>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Library General Public
++ * License as published by the Free Software Foundation; either
++ * version 2 of the License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Library General Public License for more details.
++ *
++ * You should have received a copy of the GNU Library General Public
++ * License along with this library; if not, write to the
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
++ */
++
++#ifdef HAVE_CONFIG_H
++#include "config.h"
++#endif
++
++#include <assert.h>
++#include <string.h>
++
++#include <libavcodec/avcodec.h>
++
++#include <gst/gst.h>
++
++#include "gstav.h"
++#include "gstavcodecmap.h"
++#include "gstavutils.h"
++#include "gstavauddec.h"
++
++GST_DEBUG_CATEGORY_EXTERN (GST_CAT_PERFORMANCE);
++
++/* A number of function prototypes are given so we can refer to them later. */
++static void gst_ffmpegauddec_base_init (GstFFMpegAudDecClass * klass);
++static void gst_ffmpegauddec_class_init (GstFFMpegAudDecClass * klass);
++static void gst_ffmpegauddec_init (GstFFMpegAudDec * ffmpegdec);
++static void gst_ffmpegauddec_finalize (GObject * object);
++
++static gboolean gst_ffmpegauddec_stop (GstAudioDecoder * decoder);
++static void gst_ffmpegauddec_flush (GstAudioDecoder * decoder, gboolean hard);
++static gboolean gst_ffmpegauddec_set_format (GstAudioDecoder * decoder,
++ GstCaps * caps);
++static GstFlowReturn gst_ffmpegauddec_handle_frame (GstAudioDecoder * decoder,
++ GstBuffer * inbuf);
++
++static gboolean gst_ffmpegauddec_negotiate (GstFFMpegAudDec * ffmpegdec,
++ gboolean force);
++
++static void gst_ffmpegauddec_drain (GstFFMpegAudDec * ffmpegdec);
++
++#define GST_FFDEC_PARAMS_QDATA g_quark_from_static_string("avdec-params")
++
++static GstElementClass *parent_class = NULL;
++
++static void
++gst_ffmpegauddec_base_init (GstFFMpegAudDecClass * klass)
++{
++ GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
++ GstPadTemplate *sinktempl, *srctempl;
++ GstCaps *sinkcaps, *srccaps;
++ AVCodec *in_plugin;
++ gchar *longname, *description;
++
++ in_plugin =
++ (AVCodec *) g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass),
++ GST_FFDEC_PARAMS_QDATA);
++ g_assert (in_plugin != NULL);
++
++ /* construct the element details struct */
++ longname = g_strdup_printf ("libav %s decoder", in_plugin->long_name);
++ description = g_strdup_printf ("libav %s decoder", in_plugin->name);
++ gst_element_class_set_metadata (element_class, longname,
++ "Codec/Decoder/Audio", description,
++ "Wim Taymans <wim.taymans@gmail.com>, "
++ "Ronald Bultje <rbultje@ronald.bitfreak.net>, "
++ "Edward Hervey <bilboed@bilboed.com>");
++ g_free (longname);
++ g_free (description);
++
++ /* get the caps */
++ sinkcaps = gst_ffmpeg_codecid_to_caps (in_plugin->id, NULL, FALSE);
++ if (!sinkcaps) {
++ GST_DEBUG ("Couldn't get sink caps for decoder '%s'", in_plugin->name);
++ sinkcaps = gst_caps_from_string ("unknown/unknown");
++ }
++ srccaps = gst_ffmpeg_codectype_to_audio_caps (NULL,
++ in_plugin->id, FALSE, in_plugin);
++ if (!srccaps) {
++ GST_DEBUG ("Couldn't get source caps for decoder '%s'", in_plugin->name);
++ srccaps = gst_caps_from_string ("audio/x-raw");
++ }
++
++ /* pad templates */
++ sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK,
++ GST_PAD_ALWAYS, sinkcaps);
++ srctempl = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, srccaps);
++
++ gst_element_class_add_pad_template (element_class, srctempl);
++ gst_element_class_add_pad_template (element_class, sinktempl);
++
++ klass->in_plugin = in_plugin;
++ klass->srctempl = srctempl;
++ klass->sinktempl = sinktempl;
++}
++
++static void
++gst_ffmpegauddec_class_init (GstFFMpegAudDecClass * klass)
++{
++ GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
++ GstAudioDecoderClass *gstaudiodecoder_class = GST_AUDIO_DECODER_CLASS (klass);
++
++ parent_class = g_type_class_peek_parent (klass);
++
++ gobject_class->finalize = gst_ffmpegauddec_finalize;
++
++ gstaudiodecoder_class->stop = GST_DEBUG_FUNCPTR (gst_ffmpegauddec_stop);
++ gstaudiodecoder_class->set_format =
++ GST_DEBUG_FUNCPTR (gst_ffmpegauddec_set_format);
++ gstaudiodecoder_class->handle_frame =
++ GST_DEBUG_FUNCPTR (gst_ffmpegauddec_handle_frame);
++ gstaudiodecoder_class->flush = GST_DEBUG_FUNCPTR (gst_ffmpegauddec_flush);
++}
++
++static void
++gst_ffmpegauddec_init (GstFFMpegAudDec * ffmpegdec)
++{
++ GstFFMpegAudDecClass *klass =
++ (GstFFMpegAudDecClass *) G_OBJECT_GET_CLASS (ffmpegdec);
++
++ /* some ffmpeg data */
++ ffmpegdec->context = avcodec_alloc_context3 (klass->in_plugin);
++ ffmpegdec->opened = FALSE;
++
++ gst_audio_decoder_set_drainable (GST_AUDIO_DECODER (ffmpegdec), TRUE);
++ gst_audio_decoder_set_needs_format (GST_AUDIO_DECODER (ffmpegdec), TRUE);
++}
++
++static void
++gst_ffmpegauddec_finalize (GObject * object)
++{
++ GstFFMpegAudDec *ffmpegdec = (GstFFMpegAudDec *) object;
++
++ if (ffmpegdec->context != NULL)
++ av_free (ffmpegdec->context);
++ ffmpegdec->context = NULL;
++
++ G_OBJECT_CLASS (parent_class)->finalize (object);
++}
++
++/* With LOCK */
++static void
++gst_ffmpegauddec_close (GstFFMpegAudDec * ffmpegdec)
++{
++ GST_LOG_OBJECT (ffmpegdec, "closing libav codec");
++
++ gst_caps_replace (&ffmpegdec->last_caps, NULL);
++ gst_buffer_replace (&ffmpegdec->outbuf, NULL);
++
++ gst_ffmpeg_avcodec_close (ffmpegdec->context);
++ ffmpegdec->opened = FALSE;
++
++ if (ffmpegdec->context->extradata) {
++ av_free (ffmpegdec->context->extradata);
++ ffmpegdec->context->extradata = NULL;
++ }
++}
++
++static gboolean
++gst_ffmpegauddec_stop (GstAudioDecoder * decoder)
++{
++ GstFFMpegAudDec *ffmpegdec = (GstFFMpegAudDec *) decoder;
++
++ GST_OBJECT_LOCK (ffmpegdec);
++ gst_ffmpegauddec_close (ffmpegdec);
++ GST_OBJECT_UNLOCK (ffmpegdec);
++ gst_audio_info_init (&ffmpegdec->info);
++ gst_caps_replace (&ffmpegdec->last_caps, NULL);
++
++ return TRUE;
++}
++
++/* with LOCK */
++static gboolean
++gst_ffmpegauddec_open (GstFFMpegAudDec * ffmpegdec)
++{
++ GstFFMpegAudDecClass *oclass;
++
++ oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
++
++ if (gst_ffmpeg_avcodec_open (ffmpegdec->context, oclass->in_plugin) < 0)
++ goto could_not_open;
++
++ ffmpegdec->opened = TRUE;
++
++ GST_LOG_OBJECT (ffmpegdec, "Opened libav codec %s, id %d",
++ oclass->in_plugin->name, oclass->in_plugin->id);
++
++ gst_audio_info_init (&ffmpegdec->info);
++
++ return TRUE;
++
++ /* ERRORS */
++could_not_open:
++ {
++ gst_ffmpegauddec_close (ffmpegdec);
++ GST_DEBUG_OBJECT (ffmpegdec, "avdec_%s: Failed to open libav codec",
++ oclass->in_plugin->name);
++ return FALSE;
++ }
++}
++
++typedef struct
++{
++ GstBuffer *buffer;
++ GstMapInfo map;
++} BufferInfo;
++
++/* called when ffmpeg wants us to allocate a buffer to write the decoded frame
++ * into. We try to give it memory from our pool */
++static int
++gst_ffmpegauddec_get_buffer (AVCodecContext * context, AVFrame * frame)
++{
++ GstFFMpegAudDec *ffmpegdec;
++ GstAudioInfo *info;
++ BufferInfo *buffer_info;
++
++ ffmpegdec = (GstFFMpegAudDec *) context->opaque;
++ if (G_UNLIKELY (!gst_ffmpegauddec_negotiate (ffmpegdec, FALSE)))
++ goto negotiate_failed;
++
++ /* Always use the default allocator for planar audio formats because
++ * we will have to copy and deinterleave later anyway */
++ if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt))
++ goto fallback;
++
++ info = gst_audio_decoder_get_audio_info (GST_AUDIO_DECODER (ffmpegdec));
++
++ buffer_info = g_slice_new (BufferInfo);
++ buffer_info->buffer =
++ gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (ffmpegdec),
++ frame->nb_samples * info->bpf);
++ gst_buffer_map (buffer_info->buffer, &buffer_info->map, GST_MAP_WRITE);
++ frame->opaque = buffer_info;
++ frame->data[0] = buffer_info->map.data;
++ frame->extended_data = frame->data;
++ frame->linesize[0] = buffer_info->map.size;
++ frame->type = FF_BUFFER_TYPE_USER;
++
++ return 0;
++ /* fallbacks */
++negotiate_failed:
++ {
++ GST_DEBUG_OBJECT (ffmpegdec, "negotiate failed");
++ goto fallback;
++ }
++fallback:
++ {
++ return avcodec_default_get_buffer (context, frame);
++ }
++}
++
++static gboolean
++gst_ffmpegauddec_set_format (GstAudioDecoder * decoder, GstCaps * caps)
++{
++ GstFFMpegAudDec *ffmpegdec = (GstFFMpegAudDec *) decoder;
++ GstFFMpegAudDecClass *oclass;
++ gboolean ret = TRUE;
++
++ oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
++
++ GST_DEBUG_OBJECT (ffmpegdec, "setcaps called");
++
++ GST_OBJECT_LOCK (ffmpegdec);
++
++ if (ffmpegdec->last_caps && gst_caps_is_equal (ffmpegdec->last_caps, caps)) {
++ GST_DEBUG_OBJECT (ffmpegdec, "same caps");
++ GST_OBJECT_UNLOCK (ffmpegdec);
++ return TRUE;
++ }
++
++ gst_caps_replace (&ffmpegdec->last_caps, caps);
++
++ /* close old session */
++ if (ffmpegdec->opened) {
++ GST_OBJECT_UNLOCK (ffmpegdec);
++ gst_ffmpegauddec_drain (ffmpegdec);
++ GST_OBJECT_LOCK (ffmpegdec);
++ gst_ffmpegauddec_close (ffmpegdec);
++ }
++
++ /* get size and so */
++ gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
++ oclass->in_plugin->type, caps, ffmpegdec->context);
++
++ /* workaround encoder bugs */
++ ffmpegdec->context->workaround_bugs |= FF_BUG_AUTODETECT;
++ ffmpegdec->context->err_recognition = 1;
++
++ ffmpegdec->context->opaque = ffmpegdec;
++ ffmpegdec->context->get_buffer = gst_ffmpegauddec_get_buffer;
++ ffmpegdec->context->reget_buffer = NULL;
++ ffmpegdec->context->release_buffer = NULL;
++
++ /* open codec - we don't select an output pix_fmt yet,
++ * simply because we don't know! We only get it
++ * during playback... */
++ if (!gst_ffmpegauddec_open (ffmpegdec))
++ goto open_failed;
++
++done:
++ GST_OBJECT_UNLOCK (ffmpegdec);
++
++ return ret;
++
++ /* ERRORS */
++open_failed:
++ {
++ GST_DEBUG_OBJECT (ffmpegdec, "Failed to open");
++ ret = FALSE;
++ goto done;
++ }
++}
++
++static gboolean
++gst_ffmpegauddec_negotiate (GstFFMpegAudDec * ffmpegdec, gboolean force)
++{
++ GstFFMpegAudDecClass *oclass;
++ gint depth;
++ GstAudioFormat format;
++ GstAudioChannelPosition pos[64] = { 0, };
++
++ oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
++
++ depth = av_smp_format_depth (ffmpegdec->context->sample_fmt) * 8;
++ format = gst_ffmpeg_smpfmt_to_audioformat (ffmpegdec->context->sample_fmt);
++ if (format == GST_AUDIO_FORMAT_UNKNOWN)
++ goto no_caps;
++
++ if (!force && ffmpegdec->info.rate ==
++ ffmpegdec->context->sample_rate &&
++ ffmpegdec->info.channels == ffmpegdec->context->channels &&
++ ffmpegdec->info.finfo->depth == depth)
++ return TRUE;
++
++ GST_DEBUG_OBJECT (ffmpegdec,
++ "Renegotiating audio from %dHz@%dchannels (%d) to %dHz@%dchannels (%d)",
++ ffmpegdec->info.rate, ffmpegdec->info.channels,
++ ffmpegdec->info.finfo->depth,
++ ffmpegdec->context->sample_rate, ffmpegdec->context->channels, depth);
++
++ gst_ffmpeg_channel_layout_to_gst (ffmpegdec->context->channel_layout,
++ ffmpegdec->context->channels, pos);
++ memcpy (ffmpegdec->ffmpeg_layout, pos,
++ sizeof (GstAudioChannelPosition) * ffmpegdec->context->channels);
++
++ /* Get GStreamer channel layout */
++ gst_audio_channel_positions_to_valid_order (pos,
++ ffmpegdec->context->channels);
++ ffmpegdec->needs_reorder =
++ memcmp (pos, ffmpegdec->ffmpeg_layout,
++ sizeof (pos[0]) * ffmpegdec->context->channels) != 0;
++ gst_audio_info_set_format (&ffmpegdec->info, format,
++ ffmpegdec->context->sample_rate, ffmpegdec->context->channels, pos);
++
++ if (!gst_audio_decoder_set_output_format (GST_AUDIO_DECODER (ffmpegdec),
++ &ffmpegdec->info))
++ goto caps_failed;
++
++ return TRUE;
++
++ /* ERRORS */
++no_caps:
++ {
++#ifdef HAVE_LIBAV_UNINSTALLED
++ /* using internal ffmpeg snapshot */
++ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
++ ("Could not find GStreamer caps mapping for libav codec '%s'.",
++ oclass->in_plugin->name), (NULL));
++#else
++ /* using external ffmpeg */
++ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
++ ("Could not find GStreamer caps mapping for libav codec '%s', and "
++ "you are using an external libavcodec. This is most likely due to "
++ "a packaging problem and/or libavcodec having been upgraded to a "
++ "version that is not compatible with this version of "
++ "gstreamer-libav. Make sure your gstreamer-libav and libavcodec "
++ "packages come from the same source/repository.",
++ oclass->in_plugin->name), (NULL));
++#endif
++ return FALSE;
++ }
++caps_failed:
++ {
++ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
++ ("Could not set caps for libav decoder (%s), not fixed?",
++ oclass->in_plugin->name));
++
++ return FALSE;
++ }
++}
++
++static void
++gst_avpacket_init (AVPacket * packet, guint8 * data, guint size)
++{
++ memset (packet, 0, sizeof (AVPacket));
++ packet->data = data;
++ packet->size = size;
++}
++
++static gint
++gst_ffmpegauddec_audio_frame (GstFFMpegAudDec * ffmpegdec,
++ AVCodec * in_plugin, guint8 * data, guint size,
++ GstBuffer ** outbuf, GstFlowReturn * ret)
++{
++ gint len = -1;
++ gint have_data = AVCODEC_MAX_AUDIO_FRAME_SIZE;
++ AVPacket packet;
++ AVFrame frame;
++
++ GST_DEBUG_OBJECT (ffmpegdec, "size: %d", size);
++
++ gst_avpacket_init (&packet, data, size);
++ memset (&frame, 0, sizeof (frame));
++ avcodec_get_frame_defaults (&frame);
++ len = avcodec_decode_audio4 (ffmpegdec->context, &frame, &have_data, &packet);
++
++ GST_DEBUG_OBJECT (ffmpegdec,
++ "Decode audio: len=%d, have_data=%d", len, have_data);
++
++ if (len >= 0 && have_data > 0) {
++ BufferInfo *buffer_info = frame.opaque;
++ gint nsamples, channels, byte_per_sample;
++ gsize output_size;
++
++ if (!gst_ffmpegauddec_negotiate (ffmpegdec, FALSE)) {
++ *outbuf = NULL;
++ *ret = GST_FLOW_NOT_NEGOTIATED;
++ len = -1;
++ goto beach;
++ }
++
++ channels = ffmpegdec->info.channels;
++ nsamples = frame.nb_samples;
++ byte_per_sample = ffmpegdec->info.finfo->width / 8;
++
++ /* frame.linesize[0] might contain padding, allocate only what's needed */
++ output_size = nsamples * byte_per_sample * channels;
++
++ GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer");
++ if (buffer_info) {
++ *outbuf = buffer_info->buffer;
++ gst_buffer_unmap (buffer_info->buffer, &buffer_info->map);
++ g_slice_free (BufferInfo, buffer_info);
++ frame.opaque = NULL;
++ } else if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt)
++ && channels > 1) {
++ gint i, j;
++ GstMapInfo minfo;
++
++ /* note: linesize[0] might contain padding, allocate only what's needed */
++ *outbuf =
++ gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER
++ (ffmpegdec), output_size);
++
++ gst_buffer_map (*outbuf, &minfo, GST_MAP_WRITE);
++
++ switch (ffmpegdec->info.finfo->width) {
++ case 8:{
++ guint8 *odata = minfo.data;
++
++ for (i = 0; i < nsamples; i++) {
++ for (j = 0; j < channels; j++) {
++ odata[j] = ((const guint8 *) frame.extended_data[j])[i];
++ }
++ odata += channels;
++ }
++ break;
++ }
++ case 16:{
++ guint16 *odata = (guint16 *) minfo.data;
++
++ for (i = 0; i < nsamples; i++) {
++ for (j = 0; j < channels; j++) {
++ odata[j] = ((const guint16 *) frame.extended_data[j])[i];
++ }
++ odata += channels;
++ }
++ break;
++ }
++ case 32:{
++ guint32 *odata = (guint32 *) minfo.data;
++
++ for (i = 0; i < nsamples; i++) {
++ for (j = 0; j < channels; j++) {
++ odata[j] = ((const guint32 *) frame.extended_data[j])[i];
++ }
++ odata += channels;
++ }
++ break;
++ }
++ case 64:{
++ guint64 *odata = (guint64 *) minfo.data;
++
++ for (i = 0; i < nsamples; i++) {
++ for (j = 0; j < channels; j++) {
++ odata[j] = ((const guint64 *) frame.extended_data[j])[i];
++ }
++ odata += channels;
++ }
++ break;
++ }
++ default:
++ g_assert_not_reached ();
++ break;
++ }
++ gst_buffer_unmap (*outbuf, &minfo);
++ } else {
++ *outbuf =
++ gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER
++ (ffmpegdec), output_size);
++ gst_buffer_fill (*outbuf, 0, frame.data[0], output_size);
++ }
++
++ GST_DEBUG_OBJECT (ffmpegdec, "Buffer created. Size: %d", have_data);
++
++ /* Reorder channels to the GStreamer channel order */
++ if (ffmpegdec->needs_reorder) {
++ *outbuf = gst_buffer_make_writable (*outbuf);
++ gst_audio_buffer_reorder_channels (*outbuf, ffmpegdec->info.finfo->format,
++ ffmpegdec->info.channels, ffmpegdec->ffmpeg_layout,
++ ffmpegdec->info.position);
++ }
++ } else {
++ *outbuf = NULL;
++ }
++
++beach:
++ GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d",
++ *ret, *outbuf, len);
++ return len;
++}
++
++/* gst_ffmpegauddec_frame:
++ * ffmpegdec:
++ * data: pointer to the data to decode
++ * size: size of data in bytes
++ * got_data: 0 if no data was decoded, != 0 otherwise.
++ * in_time: timestamp of data
++ * in_duration: duration of data
++ * ret: GstFlowReturn to return in the chain function
++ *
++ * Decode the given frame and pushes it downstream.
++ *
++ * Returns: Number of bytes used in decoding, -1 on error/failure.
++ */
++
++static gint
++gst_ffmpegauddec_frame (GstFFMpegAudDec * ffmpegdec,
++ guint8 * data, guint size, gint * got_data, GstFlowReturn * ret)
++{
++ GstFFMpegAudDecClass *oclass;
++ GstBuffer *outbuf = NULL;
++ gint have_data = 0, len = 0;
++
++ if (G_UNLIKELY (ffmpegdec->context->codec == NULL))
++ goto no_codec;
++
++ GST_LOG_OBJECT (ffmpegdec, "data:%p, size:%d", data, size);
++
++ *ret = GST_FLOW_OK;
++ ffmpegdec->context->frame_number++;
++
++ oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
++
++ len =
++ gst_ffmpegauddec_audio_frame (ffmpegdec, oclass->in_plugin, data, size,
++ &outbuf, ret);
++
++ if (outbuf)
++ have_data = 1;
++
++ if (len < 0 || have_data < 0) {
++ GST_WARNING_OBJECT (ffmpegdec,
++ "avdec_%s: decoding error (len: %d, have_data: %d)",
++ oclass->in_plugin->name, len, have_data);
++ *got_data = 0;
++ goto beach;
++ } else if (len == 0 && have_data == 0) {
++ *got_data = 0;
++ goto beach;
++ } else {
++ /* this is where I lost my last clue on ffmpeg... */
++ *got_data = 1;
++ }
++
++ if (outbuf) {
++ GST_LOG_OBJECT (ffmpegdec, "Decoded data, now storing buffer %p", outbuf);
++
++ if (ffmpegdec->outbuf)
++ ffmpegdec->outbuf = gst_buffer_append (ffmpegdec->outbuf, outbuf);
++ else
++ ffmpegdec->outbuf = outbuf;
++ } else {
++ GST_DEBUG_OBJECT (ffmpegdec, "We didn't get a decoded buffer");
++ }
++
++beach:
++ return len;
++
++ /* ERRORS */
++no_codec:
++ {
++ GST_ERROR_OBJECT (ffmpegdec, "no codec context");
++ return -1;
++ }
++}
++
++static void
++gst_ffmpegauddec_drain (GstFFMpegAudDec * ffmpegdec)
++{
++ GstFFMpegAudDecClass *oclass;
++
++ oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
++
++ if (oclass->in_plugin->capabilities & CODEC_CAP_DELAY) {
++ gint have_data, len, try = 0;
++
++ GST_LOG_OBJECT (ffmpegdec,
++ "codec has delay capabilities, calling until libav has drained everything");
++
++ do {
++ GstFlowReturn ret;
++
++ len = gst_ffmpegauddec_frame (ffmpegdec, NULL, 0, &have_data, &ret);
++ if (len < 0 || have_data == 0)
++ break;
++ } while (try++ < 10);
++ }
++
++ if (ffmpegdec->outbuf)
++ gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (ffmpegdec),
++ ffmpegdec->outbuf, 1);
++ ffmpegdec->outbuf = NULL;
++}
++
++static void
++gst_ffmpegauddec_flush (GstAudioDecoder * decoder, gboolean hard)
++{
++ GstFFMpegAudDec *ffmpegdec = (GstFFMpegAudDec *) decoder;
++
++ if (ffmpegdec->opened) {
++ avcodec_flush_buffers (ffmpegdec->context);
++ }
++}
++
++static GstFlowReturn
++gst_ffmpegauddec_handle_frame (GstAudioDecoder * decoder, GstBuffer * inbuf)
++{
++ GstFFMpegAudDec *ffmpegdec;
++ GstFFMpegAudDecClass *oclass;
++ guint8 *data, *bdata;
++ GstMapInfo map;
++ gint size, bsize, len, have_data;
++ GstFlowReturn ret = GST_FLOW_OK;
++
++ ffmpegdec = (GstFFMpegAudDec *) decoder;
++
++ if (G_UNLIKELY (!ffmpegdec->opened))
++ goto not_negotiated;
++
++ if (inbuf == NULL) {
++ gst_ffmpegauddec_drain (ffmpegdec);
++ return GST_FLOW_OK;
++ }
++
++ inbuf = gst_buffer_ref (inbuf);
++
++ oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
++
++ GST_LOG_OBJECT (ffmpegdec,
++ "Received new data of size %" G_GSIZE_FORMAT ", offset:%" G_GUINT64_FORMAT
++ ", ts:%" GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT,
++ gst_buffer_get_size (inbuf), GST_BUFFER_OFFSET (inbuf),
++ GST_TIME_ARGS (GST_BUFFER_PTS (inbuf)),
++ GST_TIME_ARGS (GST_BUFFER_DURATION (inbuf)));
++
++ /* workarounds, functions write to buffers:
++ * libavcodec/svq1.c:svq1_decode_frame writes to the given buffer.
++ * libavcodec/svq3.c:svq3_decode_slice_header too.
++ * ffmpeg devs know about it and will fix it (they said). */
++ if (oclass->in_plugin->id == CODEC_ID_SVQ1 ||
++ oclass->in_plugin->id == CODEC_ID_SVQ3) {
++ inbuf = gst_buffer_make_writable (inbuf);
++ }
++
++ gst_buffer_map (inbuf, &map, GST_MAP_READ);
++
++ bdata = map.data;
++ bsize = map.size;
++
++ do {
++ data = bdata;
++ size = bsize;
++
++ /* decode a frame of audio now */
++ len = gst_ffmpegauddec_frame (ffmpegdec, data, size, &have_data, &ret);
++
++ if (ret != GST_FLOW_OK) {
++ GST_LOG_OBJECT (ffmpegdec, "breaking because of flow ret %s",
++ gst_flow_get_name (ret));
++ /* bad flow return, make sure we discard all data and exit */
++ bsize = 0;
++ break;
++ }
++
++ if (len == 0 && !have_data) {
++ /* nothing was decoded, this could be because no data was available or
++ * because we were skipping frames.
++ * If we have no context we must exit and wait for more data, we keep the
++ * data we tried. */
++ GST_LOG_OBJECT (ffmpegdec, "Decoding didn't return any data, breaking");
++ break;
++ } else if (len < 0) {
++ /* a decoding error happened, we must break and try again with next data. */
++ GST_LOG_OBJECT (ffmpegdec, "Decoding error, breaking");
++ bsize = 0;
++ break;
++ }
++ /* prepare for the next round, for codecs with a context we did this
++ * already when using the parser. */
++ bsize -= len;
++ bdata += len;
++
++ GST_LOG_OBJECT (ffmpegdec, "Before (while bsize>0). bsize:%d , bdata:%p",
++ bsize, bdata);
++ } while (bsize > 0);
++
++ gst_buffer_unmap (inbuf, &map);
++ gst_buffer_unref (inbuf);
++
++ if (ffmpegdec->outbuf)
++ ret =
++ gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (ffmpegdec),
++ ffmpegdec->outbuf, 1);
++ ffmpegdec->outbuf = NULL;
++
++ if (bsize > 0) {
++ GST_DEBUG_OBJECT (ffmpegdec, "Dropping %d bytes of data", bsize);
++ }
++
++ return ret;
++
++ /* ERRORS */
++not_negotiated:
++ {
++ oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
++ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
++ ("avdec_%s: input format was not set before data start",
++ oclass->in_plugin->name));
++ return GST_FLOW_NOT_NEGOTIATED;
++ }
++}
++
++gboolean
++gst_ffmpegauddec_register (GstPlugin * plugin)
++{
++ GTypeInfo typeinfo = {
++ sizeof (GstFFMpegAudDecClass),
++ (GBaseInitFunc) gst_ffmpegauddec_base_init,
++ NULL,
++ (GClassInitFunc) gst_ffmpegauddec_class_init,
++ NULL,
++ NULL,
++ sizeof (GstFFMpegAudDec),
++ 0,
++ (GInstanceInitFunc) gst_ffmpegauddec_init,
++ };
++ GType type;
++ AVCodec *in_plugin;
++ gint rank;
++
++ in_plugin = av_codec_next (NULL);
++
++ GST_LOG ("Registering decoders");
++
++ while (in_plugin) {
++ gchar *type_name;
++ gchar *plugin_name;
++
++ /* only decoders */
++ if (!av_codec_is_decoder (in_plugin)
++ || in_plugin->type != AVMEDIA_TYPE_AUDIO) {
++ goto next;
++ }
++
++ /* no quasi-codecs, please */
++ if (in_plugin->id >= CODEC_ID_PCM_S16LE &&
++ in_plugin->id <= CODEC_ID_PCM_BLURAY) {
++ goto next;
++ }
++
++ /* No decoders depending on external libraries (we don't build them, but
++ * people who build against an external ffmpeg might have them.
++ * We have native gstreamer plugins for all of those libraries anyway. */
++ if (!strncmp (in_plugin->name, "lib", 3)) {
++ GST_DEBUG
++ ("Not using external library decoder %s. Use the gstreamer-native ones instead.",
++ in_plugin->name);
++ goto next;
++ }
++
++ GST_DEBUG ("Trying plugin %s [%s]", in_plugin->name, in_plugin->long_name);
++
++ /* no codecs for which we're GUARANTEED to have better alternatives */
++ /* MP1 : Use MP3 for decoding */
++ /* MP2 : Use MP3 for decoding */
++ /* Theora: Use libtheora based theoradec */
++ if (!strcmp (in_plugin->name, "vorbis") ||
++ !strcmp (in_plugin->name, "wavpack") ||
++ !strcmp (in_plugin->name, "mp1") ||
++ !strcmp (in_plugin->name, "mp2") ||
++ !strcmp (in_plugin->name, "libfaad") ||
++ !strcmp (in_plugin->name, "mpeg4aac") ||
++ !strcmp (in_plugin->name, "ass") ||
++ !strcmp (in_plugin->name, "srt") ||
++ !strcmp (in_plugin->name, "pgssub") ||
++ !strcmp (in_plugin->name, "dvdsub") ||
++ !strcmp (in_plugin->name, "dvbsub")) {
++ GST_LOG ("Ignoring decoder %s", in_plugin->name);
++ goto next;
++ }
++
++ /* construct the type */
++ plugin_name = g_strdup ((gchar *) in_plugin->name);
++ g_strdelimit (plugin_name, NULL, '_');
++ type_name = g_strdup_printf ("avdec_%s", plugin_name);
++ g_free (plugin_name);
++
++ type = g_type_from_name (type_name);
++
++ if (!type) {
++ /* create the gtype now */
++ type =
++ g_type_register_static (GST_TYPE_AUDIO_DECODER, type_name, &typeinfo,
++ 0);
++ g_type_set_qdata (type, GST_FFDEC_PARAMS_QDATA, (gpointer) in_plugin);
++ }
++
++ /* (Ronald) MPEG-4 gets a higher priority because it has been well-
++ * tested and by far outperforms divxdec/xviddec - so we prefer it.
++ * msmpeg4v3 same, as it outperforms divxdec for divx3 playback.
++ * VC1/WMV3 are not working and thus unpreferred for now. */
++ switch (in_plugin->id) {
++ case CODEC_ID_RA_144:
++ case CODEC_ID_RA_288:
++ case CODEC_ID_COOK:
++ rank = GST_RANK_PRIMARY;
++ break;
++ /* SIPR: decoder should have a higher rank than realaudiodec.
++ */
++ case CODEC_ID_SIPR:
++ rank = GST_RANK_SECONDARY;
++ break;
++ case CODEC_ID_MP3:
++ rank = GST_RANK_NONE;
++ break;
++ default:
++ rank = GST_RANK_MARGINAL;
++ break;
++ }
++ if (!gst_element_register (plugin, type_name, rank, type)) {
++ g_warning ("Failed to register %s", type_name);
++ g_free (type_name);
++ return FALSE;
++ }
++
++ g_free (type_name);
++
++ next:
++ in_plugin = av_codec_next (in_plugin);
++ }
++
++ GST_LOG ("Finished Registering decoders");
++
++ return TRUE;
++}
+diff --git a/ext/libav/gstavauddec.h b/ext/libav/gstavauddec.h
+new file mode 100644
+index 0000000..23d11fd
+--- /dev/null
++++ b/ext/libav/gstavauddec.h
+@@ -0,0 +1,73 @@
++/* GStreamer
++ * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Library General Public
++ * License as published by the Free Software Foundation; either
++ * version 2 of the License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Library General Public License for more details.
++ *
++ * You should have received a copy of the GNU Library General Public
++ * License along with this library; if not, write to the
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
++ */
++#ifndef __GST_FFMPEGAUDDEC_H__
++#define __GST_FFMPEGAUDDEC_H__
++
++G_BEGIN_DECLS
++
++#include <gst/gst.h>
++#include <gst/audio/audio.h>
++#include <gst/audio/gstaudiodecoder.h>
++#include <libavcodec/avcodec.h>
++
++typedef struct _GstFFMpegAudDec GstFFMpegAudDec;
++struct _GstFFMpegAudDec
++{
++ GstAudioDecoder parent;
++
++ /* decoding */
++ AVCodecContext *context;
++ gboolean opened;
++
++ /* prevent reopening the decoder on GST_EVENT_CAPS when caps are same as last time. */
++ GstCaps *last_caps;
++
++ /* Stores current buffers to push as GstAudioDecoder wants 1:1 mapping for input/output buffers */
++ GstBuffer *outbuf;
++
++ /* current output format */
++ GstAudioInfo info;
++ GstAudioChannelPosition ffmpeg_layout[64];
++ gboolean needs_reorder;
++};
++
++typedef struct _GstFFMpegAudDecClass GstFFMpegAudDecClass;
++
++struct _GstFFMpegAudDecClass
++{
++ GstAudioDecoderClass parent_class;
++
++ AVCodec *in_plugin;
++ GstPadTemplate *srctempl, *sinktempl;
++};
++
++#define GST_TYPE_FFMPEGDEC \
++ (gst_ffmpegauddec_get_type())
++#define GST_FFMPEGDEC(obj) \
++ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGDEC,GstFFMpegAudDec))
++#define GST_FFMPEGAUDDEC_CLASS(klass) \
++ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGDEC,GstFFMpegAudDecClass))
++#define GST_IS_FFMPEGDEC(obj) \
++ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGDEC))
++#define GST_IS_FFMPEGAUDDEC_CLASS(klass) \
++ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGDEC))
++
++G_END_DECLS
++
++#endif
+diff --git a/ext/libav/gstavaudenc.c b/ext/libav/gstavaudenc.c
+new file mode 100644
+index 0000000..2db08bb
+--- /dev/null
++++ b/ext/libav/gstavaudenc.c
+@@ -0,0 +1,752 @@
++/* GStreamer
++ * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
++ * Copyright (C) <2012> Collabora Ltd.
++ * Author: Sebastian Dröge <sebastian.droege@collabora.co.uk>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Library General Public
++ * License as published by the Free Software Foundation; either
++ * version 2 of the License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Library General Public License for more details.
++ *
++ * You should have received a copy of the GNU Library General Public
++ * License along with this library; if not, write to the
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
++ */
++
++#ifdef HAVE_CONFIG_H
++#include "config.h"
++#endif
++
++#include <assert.h>
++#include <string.h>
++/* for stats file handling */
++#include <stdio.h>
++#include <glib/gstdio.h>
++#include <errno.h>
++
++#include <libavcodec/avcodec.h>
++
++#include <gst/gst.h>
++
++#include "gstav.h"
++#include "gstavcodecmap.h"
++#include "gstavutils.h"
++#include "gstavaudenc.h"
++
++#define DEFAULT_AUDIO_BITRATE 128000
++
++enum
++{
++ /* FILL ME */
++ LAST_SIGNAL
++};
++
++enum
++{
++ PROP_0,
++ PROP_BIT_RATE,
++ PROP_RTP_PAYLOAD_SIZE,
++};
++
++/* A number of function prototypes are given so we can refer to them later. */
++static void gst_ffmpegaudenc_class_init (GstFFMpegAudEncClass * klass);
++static void gst_ffmpegaudenc_base_init (GstFFMpegAudEncClass * klass);
++static void gst_ffmpegaudenc_init (GstFFMpegAudEnc * ffmpegaudenc);
++static void gst_ffmpegaudenc_finalize (GObject * object);
++
++static GstCaps *gst_ffmpegaudenc_getcaps (GstAudioEncoder * encoder,
++ GstCaps * filter);
++static gboolean gst_ffmpegaudenc_set_format (GstAudioEncoder * encoder,
++ GstAudioInfo * info);
++static GstFlowReturn gst_ffmpegaudenc_handle_frame (GstAudioEncoder * encoder,
++ GstBuffer * inbuf);
++static gboolean gst_ffmpegaudenc_stop (GstAudioEncoder * encoder);
++static void gst_ffmpegaudenc_flush (GstAudioEncoder * encoder);
++
++static void gst_ffmpegaudenc_set_property (GObject * object,
++ guint prop_id, const GValue * value, GParamSpec * pspec);
++static void gst_ffmpegaudenc_get_property (GObject * object,
++ guint prop_id, GValue * value, GParamSpec * pspec);
++
++#define GST_FFENC_PARAMS_QDATA g_quark_from_static_string("avenc-params")
++
++static GstElementClass *parent_class = NULL;
++
++/*static guint gst_ffmpegaudenc_signals[LAST_SIGNAL] = { 0 }; */
++
++static void
++gst_ffmpegaudenc_base_init (GstFFMpegAudEncClass * klass)
++{
++ GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
++ AVCodec *in_plugin;
++ GstPadTemplate *srctempl = NULL, *sinktempl = NULL;
++ GstCaps *srccaps = NULL, *sinkcaps = NULL;
++ gchar *longname, *description;
++
++ in_plugin =
++ (AVCodec *) g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass),
++ GST_FFENC_PARAMS_QDATA);
++ g_assert (in_plugin != NULL);
++
++ /* construct the element details struct */
++ longname = g_strdup_printf ("libav %s encoder", in_plugin->long_name);
++ description = g_strdup_printf ("libav %s encoder", in_plugin->name);
++ gst_element_class_set_metadata (element_class, longname,
++ "Codec/Encoder/Audio", description,
++ "Wim Taymans <wim.taymans@gmail.com>, "
++ "Ronald Bultje <rbultje@ronald.bitfreak.net>");
++ g_free (longname);
++ g_free (description);
++
++ if (!(srccaps = gst_ffmpeg_codecid_to_caps (in_plugin->id, NULL, TRUE))) {
++ GST_DEBUG ("Couldn't get source caps for encoder '%s'", in_plugin->name);
++ srccaps = gst_caps_new_empty_simple ("unknown/unknown");
++ }
++
++ sinkcaps = gst_ffmpeg_codectype_to_audio_caps (NULL,
++ in_plugin->id, TRUE, in_plugin);
++ if (!sinkcaps) {
++ GST_DEBUG ("Couldn't get sink caps for encoder '%s'", in_plugin->name);
++ sinkcaps = gst_caps_new_empty_simple ("unknown/unknown");
++ }
++
++ /* pad templates */
++ sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK,
++ GST_PAD_ALWAYS, sinkcaps);
++ srctempl = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, srccaps);
++
++ gst_element_class_add_pad_template (element_class, srctempl);
++ gst_element_class_add_pad_template (element_class, sinktempl);
++
++ klass->in_plugin = in_plugin;
++ klass->srctempl = srctempl;
++ klass->sinktempl = sinktempl;
++
++ return;
++}
++
++static void
++gst_ffmpegaudenc_class_init (GstFFMpegAudEncClass * klass)
++{
++ GObjectClass *gobject_class;
++ GstAudioEncoderClass *gstaudioencoder_class;
++
++ gobject_class = (GObjectClass *) klass;
++ gstaudioencoder_class = (GstAudioEncoderClass *) klass;
++
++ parent_class = g_type_class_peek_parent (klass);
++
++ gobject_class->set_property = gst_ffmpegaudenc_set_property;
++ gobject_class->get_property = gst_ffmpegaudenc_get_property;
++
++ /* FIXME: could use -1 for a sensible per-codec defaults */
++ g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BIT_RATE,
++ g_param_spec_int ("bitrate", "Bit Rate",
++ "Target Audio Bitrate", 0, G_MAXINT, DEFAULT_AUDIO_BITRATE,
++ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
++
++ gobject_class->finalize = gst_ffmpegaudenc_finalize;
++
++ gstaudioencoder_class->stop = GST_DEBUG_FUNCPTR (gst_ffmpegaudenc_stop);
++ gstaudioencoder_class->getcaps = GST_DEBUG_FUNCPTR (gst_ffmpegaudenc_getcaps);
++ gstaudioencoder_class->flush = GST_DEBUG_FUNCPTR (gst_ffmpegaudenc_flush);
++ gstaudioencoder_class->set_format =
++ GST_DEBUG_FUNCPTR (gst_ffmpegaudenc_set_format);
++ gstaudioencoder_class->handle_frame =
++ GST_DEBUG_FUNCPTR (gst_ffmpegaudenc_handle_frame);
++}
++
++static void
++gst_ffmpegaudenc_init (GstFFMpegAudEnc * ffmpegaudenc)
++{
++ GstFFMpegAudEncClass *klass =
++ (GstFFMpegAudEncClass *) G_OBJECT_GET_CLASS (ffmpegaudenc);
++
++ /* ffmpeg objects */
++ ffmpegaudenc->context = avcodec_alloc_context3 (klass->in_plugin);
++ ffmpegaudenc->opened = FALSE;
++
++ gst_audio_encoder_set_drainable (GST_AUDIO_ENCODER (ffmpegaudenc), TRUE);
++}
++
++static void
++gst_ffmpegaudenc_finalize (GObject * object)
++{
++ GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) object;
++
++ /* clean up remaining allocated data */
++ av_free (ffmpegaudenc->context);
++
++ G_OBJECT_CLASS (parent_class)->finalize (object);
++}
++
++static gboolean
++gst_ffmpegaudenc_stop (GstAudioEncoder * encoder)
++{
++ GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) encoder;
++
++ /* close old session */
++ gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
++ ffmpegaudenc->opened = FALSE;
++
++ return TRUE;
++}
++
++static void
++gst_ffmpegaudenc_flush (GstAudioEncoder * encoder)
++{
++ GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) encoder;
++
++ if (ffmpegaudenc->opened) {
++ avcodec_flush_buffers (ffmpegaudenc->context);
++ }
++}
++
++static GstCaps *
++gst_ffmpegaudenc_getcaps (GstAudioEncoder * encoder, GstCaps * filter)
++{
++ GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) encoder;
++ GstCaps *caps = NULL;
++
++ GST_DEBUG_OBJECT (ffmpegaudenc, "getting caps");
++
++ /* audio needs no special care */
++ caps = gst_audio_encoder_proxy_getcaps (encoder, NULL, filter);
++
++ GST_DEBUG_OBJECT (ffmpegaudenc, "audio caps, return %" GST_PTR_FORMAT, caps);
++
++ return caps;
++}
++
++static gboolean
++gst_ffmpegaudenc_set_format (GstAudioEncoder * encoder, GstAudioInfo * info)
++{
++ GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) encoder;
++ GstCaps *other_caps;
++ GstCaps *allowed_caps;
++ GstCaps *icaps;
++ gsize frame_size;
++ GstFFMpegAudEncClass *oclass =
++ (GstFFMpegAudEncClass *) G_OBJECT_GET_CLASS (ffmpegaudenc);
++
++ /* close old session */
++ if (ffmpegaudenc->opened) {
++ gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
++ ffmpegaudenc->opened = FALSE;
++ }
++
++ /* if we set it in _getcaps we should set it also in _link */
++ ffmpegaudenc->context->strict_std_compliance = -1;
++
++ /* user defined properties */
++ if (ffmpegaudenc->bitrate > 0) {
++ GST_INFO_OBJECT (ffmpegaudenc, "Setting avcontext to bitrate %d",
++ ffmpegaudenc->bitrate);
++ ffmpegaudenc->context->bit_rate = ffmpegaudenc->bitrate;
++ ffmpegaudenc->context->bit_rate_tolerance = ffmpegaudenc->bitrate;
++ } else {
++ GST_INFO_OBJECT (ffmpegaudenc, "Using avcontext default bitrate %d",
++ ffmpegaudenc->context->bit_rate);
++ }
++
++ /* RTP payload used for GOB production (for Asterisk) */
++ if (ffmpegaudenc->rtp_payload_size) {
++ ffmpegaudenc->context->rtp_payload_size = ffmpegaudenc->rtp_payload_size;
++ }
++
++ /* some other defaults */
++ ffmpegaudenc->context->rc_strategy = 2;
++ ffmpegaudenc->context->b_frame_strategy = 0;
++ ffmpegaudenc->context->coder_type = 0;
++ ffmpegaudenc->context->context_model = 0;
++ ffmpegaudenc->context->scenechange_threshold = 0;
++ ffmpegaudenc->context->inter_threshold = 0;
++
++ /* fetch pix_fmt and so on */
++ gst_ffmpeg_audioinfo_to_context (info, ffmpegaudenc->context);
++ if (!ffmpegaudenc->context->time_base.den) {
++ ffmpegaudenc->context->time_base.den = GST_AUDIO_INFO_RATE (info);
++ ffmpegaudenc->context->time_base.num = 1;
++ ffmpegaudenc->context->ticks_per_frame = 1;
++ }
++
++ if (ffmpegaudenc->context->channel_layout) {
++ gst_ffmpeg_channel_layout_to_gst (ffmpegaudenc->context->channel_layout,
++ ffmpegaudenc->context->channels, ffmpegaudenc->ffmpeg_layout);
++ ffmpegaudenc->needs_reorder =
++ (memcmp (ffmpegaudenc->ffmpeg_layout, info->position,
++ sizeof (GstAudioChannelPosition) *
++ ffmpegaudenc->context->channels) != 0);
++ }
++
++ /* open codec */
++ if (gst_ffmpeg_avcodec_open (ffmpegaudenc->context, oclass->in_plugin) < 0) {
++ if (ffmpegaudenc->context->priv_data)
++ gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
++ GST_DEBUG_OBJECT (ffmpegaudenc, "avenc_%s: Failed to open FFMPEG codec",
++ oclass->in_plugin->name);
++ return FALSE;
++ }
++
++ /* some codecs support more than one format, first auto-choose one */
++ GST_DEBUG_OBJECT (ffmpegaudenc, "picking an output format ...");
++ allowed_caps = gst_pad_get_allowed_caps (GST_AUDIO_ENCODER_SRC_PAD (encoder));
++ if (!allowed_caps) {
++ GST_DEBUG_OBJECT (ffmpegaudenc, "... but no peer, using template caps");
++ /* we need to copy because get_allowed_caps returns a ref, and
++ * get_pad_template_caps doesn't */
++ allowed_caps =
++ gst_pad_get_pad_template_caps (GST_AUDIO_ENCODER_SRC_PAD (encoder));
++ }
++ GST_DEBUG_OBJECT (ffmpegaudenc, "chose caps %" GST_PTR_FORMAT, allowed_caps);
++ gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
++ oclass->in_plugin->type, allowed_caps, ffmpegaudenc->context);
++
++ /* try to set this caps on the other side */
++ other_caps = gst_ffmpeg_codecid_to_caps (oclass->in_plugin->id,
++ ffmpegaudenc->context, TRUE);
++
++ if (!other_caps) {
++ gst_caps_unref (allowed_caps);
++ gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
++ GST_DEBUG ("Unsupported codec - no caps found");
++ return FALSE;
++ }
++
++ icaps = gst_caps_intersect (allowed_caps, other_caps);
++ gst_caps_unref (allowed_caps);
++ gst_caps_unref (other_caps);
++ if (gst_caps_is_empty (icaps)) {
++ gst_caps_unref (icaps);
++ return FALSE;
++ }
++ icaps = gst_caps_truncate (icaps);
++
++ if (!gst_audio_encoder_set_output_format (GST_AUDIO_ENCODER (ffmpegaudenc),
++ icaps)) {
++ gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
++ gst_caps_unref (icaps);
++ return FALSE;
++ }
++ gst_caps_unref (icaps);
++
++ frame_size = ffmpegaudenc->context->frame_size;
++ if (frame_size > 1) {
++ gst_audio_encoder_set_frame_samples_min (GST_AUDIO_ENCODER (ffmpegaudenc),
++ frame_size);
++ gst_audio_encoder_set_frame_samples_max (GST_AUDIO_ENCODER (ffmpegaudenc),
++ frame_size);
++ gst_audio_encoder_set_frame_max (GST_AUDIO_ENCODER (ffmpegaudenc), 1);
++ } else {
++ gst_audio_encoder_set_frame_samples_min (GST_AUDIO_ENCODER (ffmpegaudenc),
++ 0);
++ gst_audio_encoder_set_frame_samples_max (GST_AUDIO_ENCODER (ffmpegaudenc),
++ 0);
++ gst_audio_encoder_set_frame_max (GST_AUDIO_ENCODER (ffmpegaudenc), 0);
++ }
++
++ /* success! */
++ ffmpegaudenc->opened = TRUE;
++
++ return TRUE;
++}
++
++
++static GstFlowReturn
++gst_ffmpegaudenc_encode_audio (GstFFMpegAudEnc * ffmpegaudenc,
++ guint8 * audio_in, guint in_size, gint * have_data)
++{
++ GstAudioEncoder *enc;
++ AVCodecContext *ctx;
++ gint res;
++ GstFlowReturn ret;
++ GstAudioInfo *info;
++ AVPacket pkt;
++ AVFrame frame;
++ gboolean planar;
++
++ enc = GST_AUDIO_ENCODER (ffmpegaudenc);
++
++ ctx = ffmpegaudenc->context;
++
++ GST_LOG_OBJECT (ffmpegaudenc, "encoding buffer ");
++
++ memset (&pkt, 0, sizeof (pkt));
++ memset (&frame, 0, sizeof (frame));
++ avcodec_get_frame_defaults (&frame);
++
++ info = gst_audio_encoder_get_audio_info (enc);
++ planar = av_sample_fmt_is_planar (ffmpegaudenc->context->sample_fmt);
++
++ if (planar && info->channels > 1) {
++ gint channels, nsamples;
++ gint i, j;
++
++ nsamples = frame.nb_samples = in_size / info->bpf;
++ channels = info->channels;
++
++ if (info->channels > AV_NUM_DATA_POINTERS) {
++ frame.extended_data = g_new (uint8_t *, info->channels);
++ } else {
++ frame.extended_data = frame.data;
++ }
++
++ frame.extended_data[0] = g_malloc (in_size);
++ frame.linesize[0] = in_size / channels;
++ for (i = 1; i < channels; i++)
++ frame.extended_data[i] = frame.extended_data[i - 1] + frame.linesize[0];
++
++ switch (info->finfo->width) {
++ case 8:{
++ const guint8 *idata = (const guint8 *) audio_in;
++
++ for (i = 0; i < nsamples; i++) {
++ for (j = 0; j < channels; j++) {
++ ((guint8 *) frame.extended_data[j])[i] = idata[j];
++ }
++ idata += channels;
++ }
++ break;
++ }
++ case 16:{
++ const guint16 *idata = (const guint16 *) audio_in;
++
++ for (i = 0; i < nsamples; i++) {
++ for (j = 0; j < channels; j++) {
++ ((guint16 *) frame.extended_data[j])[i] = idata[j];
++ }
++ idata += channels;
++ }
++ break;
++ }
++ case 32:{
++ const guint32 *idata = (const guint32 *) audio_in;
++
++ for (i = 0; i < nsamples; i++) {
++ for (j = 0; j < channels; j++) {
++ ((guint32 *) frame.extended_data[j])[i] = idata[j];
++ }
++ idata += channels;
++ }
++
++ break;
++ }
++ case 64:{
++ const guint64 *idata = (const guint64 *) audio_in;
++
++ for (i = 0; i < nsamples; i++) {
++ for (j = 0; j < channels; j++) {
++ ((guint64 *) frame.extended_data[j])[i] = idata[j];
++ }
++ idata += channels;
++ }
++
++ break;
++ }
++ default:
++ g_assert_not_reached ();
++ break;
++ }
++
++ } else {
++ frame.data[0] = audio_in;
++ frame.extended_data = frame.data;
++ frame.linesize[0] = in_size;
++ frame.nb_samples = in_size / info->bpf;
++ }
++
++ res = avcodec_encode_audio2 (ctx, &pkt, &frame, have_data);
++ if (planar && info->channels > 1)
++ g_free (frame.data[0]);
++ if (frame.extended_data != frame.data)
++ g_free (frame.extended_data);
++
++ if (res < 0) {
++ char error_str[128] = { 0, };
++
++ av_strerror (res, error_str, sizeof (error_str));
++ GST_ERROR_OBJECT (enc, "Failed to encode buffer: %d - %s", res, error_str);
++ return GST_FLOW_OK;
++ }
++ GST_LOG_OBJECT (ffmpegaudenc, "got output size %d", res);
++
++ if (*have_data) {
++ GstBuffer *outbuf;
++ const AVCodec *codec;
++
++ GST_LOG_OBJECT (ffmpegaudenc, "pushing size %d", pkt.size);
++
++ outbuf =
++ gst_buffer_new_wrapped_full (0, pkt.data, pkt.size, 0, pkt.size,
++ pkt.data, av_free);
++
++ codec = ffmpegaudenc->context->codec;
++ if ((codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
++ ret = gst_audio_encoder_finish_frame (enc, outbuf, -1);
++ } else {
++ ret = gst_audio_encoder_finish_frame (enc, outbuf, frame.nb_samples);
++ }
++ } else {
++ GST_LOG_OBJECT (ffmpegaudenc, "no output produced");
++ ret = GST_FLOW_OK;
++ }
++
++ return ret;
++}
++
++static void
++gst_ffmpegaudenc_drain (GstFFMpegAudEnc * ffmpegaudenc)
++{
++ GstFFMpegAudEncClass *oclass;
++
++ oclass = (GstFFMpegAudEncClass *) (G_OBJECT_GET_CLASS (ffmpegaudenc));
++
++ if (oclass->in_plugin->capabilities & CODEC_CAP_DELAY) {
++ gint have_data, try = 0;
++
++ GST_LOG_OBJECT (ffmpegaudenc,
++ "codec has delay capabilities, calling until libav has drained everything");
++
++ do {
++ GstFlowReturn ret;
++
++ ret = gst_ffmpegaudenc_encode_audio (ffmpegaudenc, NULL, 0, &have_data);
++ if (ret != GST_FLOW_OK || have_data == 0)
++ break;
++ } while (try++ < 10);
++ }
++}
++
++static GstFlowReturn
++gst_ffmpegaudenc_handle_frame (GstAudioEncoder * encoder, GstBuffer * inbuf)
++{
++ GstFFMpegAudEnc *ffmpegaudenc;
++ gsize size;
++ GstFlowReturn ret;
++ guint8 *in_data;
++ GstMapInfo map;
++ gint have_data;
++
++ ffmpegaudenc = (GstFFMpegAudEnc *) encoder;
++
++ if (G_UNLIKELY (!ffmpegaudenc->opened))
++ goto not_negotiated;
++
++ if (!inbuf) {
++ gst_ffmpegaudenc_drain (ffmpegaudenc);
++ return GST_FLOW_OK;
++ }
++
++ inbuf = gst_buffer_ref (inbuf);
++
++ GST_DEBUG_OBJECT (ffmpegaudenc,
++ "Received time %" GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT
++ ", size %" G_GSIZE_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (inbuf)),
++ GST_TIME_ARGS (GST_BUFFER_DURATION (inbuf)), gst_buffer_get_size (inbuf));
++
++ /* Reorder channels to the GStreamer channel order */
++ if (ffmpegaudenc->needs_reorder) {
++ GstAudioInfo *info = gst_audio_encoder_get_audio_info (encoder);
++
++ inbuf = gst_buffer_make_writable (inbuf);
++ gst_audio_buffer_reorder_channels (inbuf, info->finfo->format,
++ info->channels, info->position, ffmpegaudenc->ffmpeg_layout);
++ }
++
++ gst_buffer_map (inbuf, &map, GST_MAP_READ);
++ in_data = map.data;
++ size = map.size;
++ ret = gst_ffmpegaudenc_encode_audio (ffmpegaudenc, in_data, size, &have_data);
++ gst_buffer_unmap (inbuf, &map);
++ gst_buffer_unref (inbuf);
++
++ if (ret != GST_FLOW_OK)
++ goto push_failed;
++
++ return GST_FLOW_OK;
++
++ /* ERRORS */
++not_negotiated:
++ {
++ GST_ELEMENT_ERROR (ffmpegaudenc, CORE, NEGOTIATION, (NULL),
++ ("not configured to input format before data start"));
++ gst_buffer_unref (inbuf);
++ return GST_FLOW_NOT_NEGOTIATED;
++ }
++push_failed:
++ {
++ GST_DEBUG_OBJECT (ffmpegaudenc, "Failed to push buffer %d (%s)", ret,
++ gst_flow_get_name (ret));
++ return ret;
++ }
++}
++
++static void
++gst_ffmpegaudenc_set_property (GObject * object,
++ guint prop_id, const GValue * value, GParamSpec * pspec)
++{
++ GstFFMpegAudEnc *ffmpegaudenc;
++
++ /* Get a pointer of the right type. */
++ ffmpegaudenc = (GstFFMpegAudEnc *) (object);
++
++ if (ffmpegaudenc->opened) {
++ GST_WARNING_OBJECT (ffmpegaudenc,
++ "Can't change properties once decoder is setup !");
++ return;
++ }
++
++ /* Check the argument id to see which argument we're setting. */
++ switch (prop_id) {
++ case PROP_BIT_RATE:
++ ffmpegaudenc->bitrate = g_value_get_int (value);
++ break;
++ case PROP_RTP_PAYLOAD_SIZE:
++ ffmpegaudenc->rtp_payload_size = g_value_get_int (value);
++ break;
++ default:
++ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
++ break;
++ }
++}
++
++/* The set function is simply the inverse of the get fuction. */
++static void
++gst_ffmpegaudenc_get_property (GObject * object,
++ guint prop_id, GValue * value, GParamSpec * pspec)
++{
++ GstFFMpegAudEnc *ffmpegaudenc;
++
++ /* It's not null if we got it, but it might not be ours */
++ ffmpegaudenc = (GstFFMpegAudEnc *) (object);
++
++ switch (prop_id) {
++ case PROP_BIT_RATE:
++ g_value_set_int (value, ffmpegaudenc->bitrate);
++ break;
++ break;
++ case PROP_RTP_PAYLOAD_SIZE:
++ g_value_set_int (value, ffmpegaudenc->rtp_payload_size);
++ break;
++ default:
++ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
++ break;
++ }
++}
++
++gboolean
++gst_ffmpegaudenc_register (GstPlugin * plugin)
++{
++ GTypeInfo typeinfo = {
++ sizeof (GstFFMpegAudEncClass),
++ (GBaseInitFunc) gst_ffmpegaudenc_base_init,
++ NULL,
++ (GClassInitFunc) gst_ffmpegaudenc_class_init,
++ NULL,
++ NULL,
++ sizeof (GstFFMpegAudEnc),
++ 0,
++ (GInstanceInitFunc) gst_ffmpegaudenc_init,
++ };
++ GType type;
++ AVCodec *in_plugin;
++
++
++ GST_LOG ("Registering encoders");
++
++ in_plugin = av_codec_next (NULL);
++ while (in_plugin) {
++ gchar *type_name;
++ guint rank;
++
++ /* Skip non-AV codecs */
++ if (in_plugin->type != AVMEDIA_TYPE_AUDIO)
++ goto next;
++
++ /* no quasi codecs, please */
++ if ((in_plugin->id >= CODEC_ID_PCM_S16LE &&
++ in_plugin->id <= CODEC_ID_PCM_BLURAY)) {
++ goto next;
++ }
++
++ /* No encoders depending on external libraries (we don't build them, but
++ * people who build against an external ffmpeg might have them.
++ * We have native gstreamer plugins for all of those libraries anyway. */
++ if (!strncmp (in_plugin->name, "lib", 3)) {
++ GST_DEBUG
++ ("Not using external library encoder %s. Use the gstreamer-native ones instead.",
++ in_plugin->name);
++ goto next;
++ }
++
++ /* only encoders */
++ if (!av_codec_is_encoder (in_plugin)) {
++ goto next;
++ }
++
++ /* FIXME : We should have a method to know cheaply whether we have a mapping
++ * for the given plugin or not */
++
++ GST_DEBUG ("Trying plugin %s [%s]", in_plugin->name, in_plugin->long_name);
++
++ /* no codecs for which we're GUARANTEED to have better alternatives */
++ if (!strcmp (in_plugin->name, "vorbis")
++ || !strcmp (in_plugin->name, "flac")) {
++ GST_LOG ("Ignoring encoder %s", in_plugin->name);
++ goto next;
++ }
++
++ /* construct the type */
++ type_name = g_strdup_printf ("avenc_%s", in_plugin->name);
++
++ type = g_type_from_name (type_name);
++
++ if (!type) {
++
++ /* create the glib type now */
++ type =
++ g_type_register_static (GST_TYPE_AUDIO_ENCODER, type_name, &typeinfo,
++ 0);
++ g_type_set_qdata (type, GST_FFENC_PARAMS_QDATA, (gpointer) in_plugin);
++
++ {
++ static const GInterfaceInfo preset_info = {
++ NULL,
++ NULL,
++ NULL
++ };
++ g_type_add_interface_static (type, GST_TYPE_PRESET, &preset_info);
++ }
++ }
++
++ switch (in_plugin->id) {
++ /* avenc_aac: see https://bugzilla.gnome.org/show_bug.cgi?id=691617 */
++ case CODEC_ID_AAC:
++ rank = GST_RANK_NONE;
++ break;
++ default:
++ rank = GST_RANK_SECONDARY;
++ break;
++ }
++
++ if (!gst_element_register (plugin, type_name, rank, type)) {
++ g_free (type_name);
++ return FALSE;
++ }
++
++ g_free (type_name);
++
++ next:
++ in_plugin = av_codec_next (in_plugin);
++ }
++
++ GST_LOG ("Finished registering encoders");
++
++ return TRUE;
++}
+diff --git a/ext/libav/gstavaudenc.h b/ext/libav/gstavaudenc.h
+new file mode 100644
+index 0000000..b01184f
+--- /dev/null
++++ b/ext/libav/gstavaudenc.h
+@@ -0,0 +1,77 @@
++/* GStreamer
++ * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Library General Public
++ * License as published by the Free Software Foundation; either
++ * version 2 of the License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Library General Public License for more details.
++ *
++ * You should have received a copy of the GNU Library General Public
++ * License along with this library; if not, write to the
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
++ */
++
++/* First, include the header file for the plugin, to bring in the
++ * object definition and other useful things.
++ */
++
++#ifndef __GST_FFMPEGAUDENC_H__
++#define __GST_FFMPEGAUDENC_H__
++
++G_BEGIN_DECLS
++
++#include <gst/gst.h>
++#include <gst/audio/gstaudioencoder.h>
++#include <libavcodec/avcodec.h>
++
++typedef struct _GstFFMpegAudEnc GstFFMpegAudEnc;
++
++struct _GstFFMpegAudEnc
++{
++ GstAudioEncoder parent;
++
++ AVCodecContext *context;
++ gboolean opened;
++
++ /* cache */
++ gint bitrate;
++ gint rtp_payload_size;
++
++ /* other settings are copied over straight,
++ * include a context here, rather than copy-and-past it from avcodec.h */
++ AVCodecContext config;
++
++ GstAudioChannelPosition ffmpeg_layout[64];
++ gboolean needs_reorder;
++};
++
++typedef struct _GstFFMpegAudEncClass GstFFMpegAudEncClass;
++
++struct _GstFFMpegAudEncClass
++{
++ GstAudioEncoderClass parent_class;
++
++ AVCodec *in_plugin;
++ GstPadTemplate *srctempl, *sinktempl;
++};
++
++#define GST_TYPE_FFMPEGAUDENC \
++ (gst_ffmpegaudenc_get_type())
++#define GST_FFMPEGAUDENC(obj) \
++ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGAUDENC,GstFFMpegAudEnc))
++#define GST_FFMPEGAUDENC_CLASS(klass) \
++ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGAUDENC,GstFFMpegAudEncClass))
++#define GST_IS_FFMPEGAUDENC(obj) \
++ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGAUDENC))
++#define GST_IS_FFMPEGAUDENC_CLASS(klass) \
++ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGAUDENC))
++
++G_END_DECLS
++
++#endif /* __GST_FFMPEGAUDENC_H__ */
+diff --git a/ext/libav/gstavcfg.c b/ext/libav/gstavcfg.c
+index 5ee23dd..1d7c9d7 100644
+--- a/ext/libav/gstavcfg.c
++++ b/ext/libav/gstavcfg.c
+@@ -16,8 +16,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+
+@@ -147,7 +147,6 @@ gst_ffmpeg_dct_algo_get_type (void)
+ {FF_DCT_FASTINT, "Fast Integer", "fastint"},
+ {FF_DCT_INT, "Accurate Integer", "int"},
+ {FF_DCT_MMX, "MMX", "mmx"},
+- {FF_DCT_MLIB, "MLIB", "mlib"},
+ {FF_DCT_ALTIVEC, "ALTIVEC", "altivec"},
+ {FF_DCT_FAAN, "FAAN", "faan"},
+ {0, NULL, NULL},
+@@ -173,8 +172,6 @@ gst_ffmpeg_idct_algo_get_type (void)
+ {FF_IDCT_SIMPLE, "Simple", "simple"},
+ {FF_IDCT_SIMPLEMMX, "Simple MMX", "simplemmx"},
+ {FF_IDCT_LIBMPEG2MMX, "LIBMPEG2MMX", "libmpeg2mmx"},
+- {FF_IDCT_PS2, "PS2", "ps2"},
+- {FF_IDCT_MLIB, "MLIB", "mlib"},
+ {FF_IDCT_ARM, "ARM", "arm"},
+ {FF_IDCT_ALTIVEC, "ALTIVEC", "altivec"},
+ {FF_IDCT_SH4, "SH4", "sh4"},
+@@ -263,16 +260,11 @@ gst_ffmpeg_flags_get_type (void)
+
+ if (!ffmpeg_flags_type) {
+ static const GFlagsValue ffmpeg_flags[] = {
+- {CODEC_FLAG_OBMC, "Use overlapped block motion compensation (h263+)",
+- "obmc"},
+ {CODEC_FLAG_QSCALE, "Use fixed qscale", "qscale"},
+ {CODEC_FLAG_4MV, "Allow 4 MV per MB", "4mv"},
+- {CODEC_FLAG_H263P_AIV, "H.263 alternative inter VLC", "aiv"},
+ {CODEC_FLAG_QPEL, "Quartel Pel Motion Compensation", "qpel"},
+ {CODEC_FLAG_GMC, "GMC", "gmc"},
+ {CODEC_FLAG_MV0, "Always try a MB with MV (0,0)", "mv0"},
+- {CODEC_FLAG_PART,
+- "Store MV, DC and AC coefficients in seperate partitions", "part"},
+ {CODEC_FLAG_LOOP_FILTER, "Loop filter", "loop-filter"},
+ {CODEC_FLAG_GRAY, "Only decode/encode grayscale", "gray"},
+ {CODEC_FLAG_NORMALIZE_AQP,
+@@ -282,13 +274,9 @@ gst_ffmpeg_flags_get_type (void)
+ "global-headers"},
+ {CODEC_FLAG_AC_PRED, "H263 Advanced Intra Coding / MPEG4 AC prediction",
+ "aic"},
+- {CODEC_FLAG_H263P_UMV, "Unlimited Motion Vector", "umv"},
+ {CODEC_FLAG_CBP_RD, "Rate Distoration Optimization for CBP", "cbp-rd"},
+ {CODEC_FLAG_QP_RD, "Rate Distoration Optimization for QP selection",
+ "qp-rd"},
+- {CODEC_FLAG_H263P_SLICE_STRUCT, "H263 slice struct", "ss"},
+- {CODEC_FLAG_SVCD_SCAN_OFFSET,
+- "Reserve space for SVCD scan offset user data", "scanoffset"},
+ {CODEC_FLAG_CLOSED_GOP, "Closed GOP", "closedgop"},
+ {0, NULL, NULL},
+ };
+@@ -750,10 +738,8 @@ gst_ffmpeg_cfg_install_property (GstFFMpegVidEncClass * klass, guint base)
+ prop_id = base;
+ g_return_if_fail (base > 0);
+
+- ctx = avcodec_alloc_context ();
+- if (ctx)
+- avcodec_get_context_defaults (ctx);
+- else
++ ctx = avcodec_alloc_context3 (klass->in_plugin);
++ if (!ctx)
+ g_warning ("could not get context");
+
+ for (list = property_list; list; list = list->next) {
+diff --git a/ext/libav/gstavcfg.h b/ext/libav/gstavcfg.h
+index 5251eb2..2aef665 100644
+--- a/ext/libav/gstavcfg.h
++++ b/ext/libav/gstavcfg.h
+@@ -13,8 +13,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+
+diff --git a/ext/libav/gstavcodecmap.c b/ext/libav/gstavcodecmap.c
+index 7b28800..cac15ee 100644
+--- a/ext/libav/gstavcodecmap.c
++++ b/ext/libav/gstavcodecmap.c
+@@ -15,8 +15,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+@@ -27,6 +27,7 @@
+
+ #include <gst/gst.h>
+ #include <libavcodec/avcodec.h>
++#include <libavutil/channel_layout.h>
+
+ #include "gstav.h"
+ #include "gstavcodecmap.h"
+@@ -35,44 +36,6 @@
+ #include <gst/audio/audio.h>
+ #include <gst/pbutils/codec-utils.h>
+
+-/*
+- * Read a palette from a caps.
+- */
+-
+-static void
+-gst_ffmpeg_get_palette (const GstCaps * caps, AVCodecContext * context)
+-{
+- GstStructure *str = gst_caps_get_structure (caps, 0);
+- const GValue *palette_v;
+- GstBuffer *palette;
+-
+- /* do we have a palette? */
+- if ((palette_v = gst_structure_get_value (str, "palette_data")) && context) {
+- palette = gst_value_get_buffer (palette_v);
+- GST_DEBUG ("got palette data %p", palette);
+- if (gst_buffer_get_size (palette) >= AVPALETTE_SIZE) {
+- if (context->palctrl)
+- av_free (context->palctrl);
+- context->palctrl = av_malloc (sizeof (AVPaletteControl));
+- context->palctrl->palette_changed = 1;
+- gst_buffer_extract (palette, 0, context->palctrl->palette,
+- AVPALETTE_SIZE);
+- GST_DEBUG ("extracted palette data");
+- }
+- }
+-}
+-
+-static void
+-gst_ffmpeg_set_palette (GstCaps * caps, AVCodecContext * context)
+-{
+- if (context->palctrl) {
+- GstBuffer *palette = gst_buffer_new_and_alloc (AVPALETTE_SIZE);
+-
+- gst_buffer_fill (palette, 0, context->palctrl->palette, AVPALETTE_SIZE);
+- gst_caps_set_simple (caps, "palette_data", GST_TYPE_BUFFER, palette, NULL);
+- }
+-}
+-
+ /* IMPORTANT: Keep this sorted by the ffmpeg channel masks */
+ static const struct
+ {
+@@ -80,34 +43,60 @@ static const struct
+ GstAudioChannelPosition gst;
+ } _ff_to_gst_layout[] = {
+ {
+- CH_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT}, {
+- CH_FRONT_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT}, {
+- CH_FRONT_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER}, {
+- CH_LOW_FREQUENCY, GST_AUDIO_CHANNEL_POSITION_LFE1}, {
+- CH_BACK_LEFT, GST_AUDIO_CHANNEL_POSITION_REAR_LEFT}, {
+- CH_BACK_RIGHT, GST_AUDIO_CHANNEL_POSITION_REAR_RIGHT}, {
+- CH_FRONT_LEFT_OF_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER}, {
+- CH_FRONT_RIGHT_OF_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER}, {
+- CH_BACK_CENTER, GST_AUDIO_CHANNEL_POSITION_REAR_CENTER}, {
+- CH_SIDE_LEFT, GST_AUDIO_CHANNEL_POSITION_SIDE_LEFT}, {
+- CH_SIDE_RIGHT, GST_AUDIO_CHANNEL_POSITION_SIDE_RIGHT}, {
+- CH_TOP_CENTER, GST_AUDIO_CHANNEL_POSITION_TOP_CENTER}, {
+- CH_TOP_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_TOP_FRONT_LEFT}, {
+- CH_TOP_FRONT_CENTER, GST_AUDIO_CHANNEL_POSITION_TOP_FRONT_CENTER}, {
+- CH_TOP_FRONT_RIGHT, GST_AUDIO_CHANNEL_POSITION_TOP_FRONT_RIGHT}, {
+- CH_TOP_BACK_LEFT, GST_AUDIO_CHANNEL_POSITION_TOP_REAR_LEFT}, {
+- CH_TOP_BACK_CENTER, GST_AUDIO_CHANNEL_POSITION_TOP_REAR_CENTER}, {
+- CH_TOP_BACK_RIGHT, GST_AUDIO_CHANNEL_POSITION_TOP_REAR_RIGHT}, {
+- CH_STEREO_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT}, {
+- CH_STEREO_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT}
++ AV_CH_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT}, {
++ AV_CH_FRONT_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT}, {
++ AV_CH_FRONT_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER}, {
++ AV_CH_LOW_FREQUENCY, GST_AUDIO_CHANNEL_POSITION_LFE1}, {
++ AV_CH_BACK_LEFT, GST_AUDIO_CHANNEL_POSITION_REAR_LEFT}, {
++ AV_CH_BACK_RIGHT, GST_AUDIO_CHANNEL_POSITION_REAR_RIGHT}, {
++ AV_CH_FRONT_LEFT_OF_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER}, {
++ AV_CH_FRONT_RIGHT_OF_CENTER,
++ GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER}, {
++ AV_CH_BACK_CENTER, GST_AUDIO_CHANNEL_POSITION_REAR_CENTER}, {
++ AV_CH_SIDE_LEFT, GST_AUDIO_CHANNEL_POSITION_SIDE_LEFT}, {
++ AV_CH_SIDE_RIGHT, GST_AUDIO_CHANNEL_POSITION_SIDE_RIGHT}, {
++ AV_CH_TOP_CENTER, GST_AUDIO_CHANNEL_POSITION_TOP_CENTER}, {
++ AV_CH_TOP_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_TOP_FRONT_LEFT}, {
++ AV_CH_TOP_FRONT_CENTER, GST_AUDIO_CHANNEL_POSITION_TOP_FRONT_CENTER}, {
++ AV_CH_TOP_FRONT_RIGHT, GST_AUDIO_CHANNEL_POSITION_TOP_FRONT_RIGHT}, {
++ AV_CH_TOP_BACK_LEFT, GST_AUDIO_CHANNEL_POSITION_TOP_REAR_LEFT}, {
++ AV_CH_TOP_BACK_CENTER, GST_AUDIO_CHANNEL_POSITION_TOP_REAR_CENTER}, {
++ AV_CH_TOP_BACK_RIGHT, GST_AUDIO_CHANNEL_POSITION_TOP_REAR_RIGHT}, {
++ AV_CH_STEREO_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT}, {
++ AV_CH_STEREO_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT}
+ };
+
++static guint64
++gst_ffmpeg_channel_positions_to_layout (GstAudioChannelPosition * pos,
++ gint channels)
++{
++ gint i, j;
++ guint64 ret = 0;
++ gint channels_found = 0;
++
++ if (!pos)
++ return 0;
++
++ for (i = 0; i < channels; i++) {
++ for (j = 0; j < G_N_ELEMENTS (_ff_to_gst_layout); j++) {
++ if (_ff_to_gst_layout[j].gst == pos[i]) {
++ ret |= _ff_to_gst_layout[j].ff;
++ channels_found++;
++ break;
++ }
++ }
++ }
++
++ if (channels_found != channels)
++ return 0;
++ return ret;
++}
++
+ gboolean
+-gst_ffmpeg_channel_layout_to_gst (AVCodecContext * context,
++gst_ffmpeg_channel_layout_to_gst (guint64 channel_layout, gint channels,
+ GstAudioChannelPosition * pos)
+ {
+- guint nchannels = 0, channels = context->channels;
+- guint64 channel_layout = context->channel_layout;
++ guint nchannels = 0;
+ gboolean none_layout = FALSE;
+
+ if (channel_layout == 0) {
+@@ -170,6 +159,52 @@ gst_ffmpeg_channel_layout_to_gst (AVCodecContext * context,
+ return TRUE;
+ }
+
++static void
++gst_ffmpeg_video_set_pix_fmts (GstCaps * caps, const enum AVPixelFormat *fmts)
++{
++ GValue va = { 0, };
++ GValue v = { 0, };
++ GstVideoFormat format;
++
++ if (!fmts || fmts[0] == -1) {
++ gint i;
++
++ g_value_init (&va, GST_TYPE_LIST);
++ g_value_init (&v, G_TYPE_STRING);
++ for (i = 0; i <= PIX_FMT_NB; i++) {
++ format = gst_ffmpeg_pixfmt_to_videoformat (i);
++ if (format == GST_VIDEO_FORMAT_UNKNOWN)
++ continue;
++ g_value_set_string (&v, gst_video_format_to_string (format));
++ gst_value_list_append_value (&va, &v);
++ }
++ gst_caps_set_value (caps, "format", &va);
++ g_value_unset (&v);
++ g_value_unset (&va);
++ return;
++ }
++
++ /* Only a single format */
++ g_value_init (&va, GST_TYPE_LIST);
++ g_value_init (&v, G_TYPE_STRING);
++ while (*fmts != -1) {
++ format = gst_ffmpeg_pixfmt_to_videoformat (*fmts);
++ if (format != GST_VIDEO_FORMAT_UNKNOWN) {
++ g_value_set_string (&v, gst_video_format_to_string (format));
++ gst_value_list_append_value (&va, &v);
++ }
++ fmts++;
++ }
++ if (gst_value_list_get_size (&va) == 1) {
++ /* The single value is still in v */
++ gst_caps_set_value (caps, "format", &v);
++ } else if (gst_value_list_get_size (&va) > 1) {
++ gst_caps_set_value (caps, "format", &va);
++ }
++ g_value_unset (&v);
++ g_value_unset (&va);
++}
++
+ /* this macro makes a caps width fixed or unfixed width/height
+ * properties depending on whether we've got a context.
+ *
+@@ -179,10 +214,10 @@ gst_ffmpeg_channel_layout_to_gst (AVCodecContext * context,
+ * but I'm too lazy today. Maybe later.
+ */
+ static GstCaps *
+-gst_ff_vid_caps_new (AVCodecContext * context, enum CodecID codec_id,
+- gboolean encode, const char *mimetype, const char *fieldname, ...)
++gst_ff_vid_caps_new (AVCodecContext * context, AVCodec * codec,
++ enum CodecID codec_id, gboolean encode, const char *mimetype,
++ const char *fieldname, ...)
+ {
+- GstStructure *structure = NULL;
+ GstCaps *caps = NULL;
+ va_list var_args;
+ gint i;
+@@ -252,32 +287,58 @@ gst_ff_vid_caps_new (AVCodecContext * context, enum CodecID codec_id,
+ {
+ static struct
+ {
+- guint32 csp;
++ const gchar *csp;
+ gint width, height;
+ gint par_n, par_d;
+ gint framerate_n, framerate_d;
+ } profiles[] = {
+ {
+- GST_MAKE_FOURCC ('Y', '4', '1', 'B'), 720, 480, 10, 11, 30000, 1001}, {
+- GST_MAKE_FOURCC ('Y', '4', '1', 'B'), 720, 480, 40, 33, 30000, 1001}, {
+- GST_MAKE_FOURCC ('I', '4', '2', '0'), 720, 576, 59, 54, 25, 1}, {
+- GST_MAKE_FOURCC ('I', '4', '2', '0'), 720, 576, 118, 81, 25, 1}, {
+- GST_MAKE_FOURCC ('Y', '4', '1', 'B'), 720, 576, 59, 54, 25, 1}, {
+- GST_MAKE_FOURCC ('Y', '4', '1', 'B'), 720, 576, 118, 81, 25, 1}
+- };
++ "Y41B", 720, 480, 8, 9, 30000, 1001}, {
++ "Y41B", 720, 480, 32, 27, 30000, 1001}, {
++ "Y42B", 720, 480, 8, 9, 30000, 1001}, {
++ "Y42B", 720, 480, 32, 27, 30000, 1001}, {
++ "I420", 720, 576, 16, 15, 25, 1}, {
++ "I420", 720, 576, 64, 45, 25, 1}, {
++ "Y41B", 720, 576, 16, 15, 25, 1}, {
++ "Y41B", 720, 576, 64, 45, 25, 1}, {
++ "Y42B", 720, 576, 16, 15, 25, 1}, {
++ "Y42B", 720, 576, 64, 45, 25, 1}, {
++ "Y42B", 1280, 1080, 1, 1, 30000, 1001}, {
++ "Y42B", 1280, 1080, 3, 2, 30000, 1001}, {
++ "Y42B", 1440, 1080, 1, 1, 25, 1}, {
++ "Y42B", 1440, 1080, 4, 3, 25, 1}, {
++ "Y42B", 960, 720, 1, 1, 60000, 1001}, {
++ "Y42B", 960, 720, 4, 3, 60000, 1001}, {
++ "Y42B", 960, 720, 1, 1, 50, 1}, {
++ "Y42B", 960, 720, 4, 3, 50, 1},};
+ GstCaps *temp;
+ gint n_sizes = G_N_ELEMENTS (profiles);
+
+- caps = gst_caps_new_empty ();
+- for (i = 0; i < n_sizes; i++) {
+- temp = gst_caps_new_simple (mimetype,
+- "width", G_TYPE_INT, profiles[i].width,
+- "height", G_TYPE_INT, profiles[i].height,
+- "framerate", GST_TYPE_FRACTION, profiles[i].framerate_n,
+- profiles[i].framerate_d, "pixel-aspect-ratio", GST_TYPE_FRACTION,
+- profiles[i].par_n, profiles[i].par_d, NULL);
+-
+- gst_caps_append (caps, temp);
++ if (strcmp (mimetype, "video/x-raw") == 0) {
++ caps = gst_caps_new_empty ();
++ for (i = 0; i < n_sizes; i++) {
++ temp = gst_caps_new_simple (mimetype,
++ "format", G_TYPE_STRING, profiles[i].csp,
++ "width", G_TYPE_INT, profiles[i].width,
++ "height", G_TYPE_INT, profiles[i].height,
++ "framerate", GST_TYPE_FRACTION, profiles[i].framerate_n,
++ profiles[i].framerate_d, "pixel-aspect-ratio",
++ GST_TYPE_FRACTION, profiles[i].par_n, profiles[i].par_d, NULL);
++
++ gst_caps_append (caps, temp);
++ }
++ } else {
++ caps = gst_caps_new_empty ();
++ for (i = 0; i < n_sizes; i++) {
++ temp = gst_caps_new_simple (mimetype,
++ "width", G_TYPE_INT, profiles[i].width,
++ "height", G_TYPE_INT, profiles[i].height,
++ "framerate", GST_TYPE_FRACTION, profiles[i].framerate_n,
++ profiles[i].framerate_d, "pixel-aspect-ratio",
++ GST_TYPE_FRACTION, profiles[i].par_n, profiles[i].par_d, NULL);
++
++ gst_caps_append (caps, temp);
++ }
+ }
+ break;
+ }
+@@ -294,7 +355,40 @@ gst_ff_vid_caps_new (AVCodecContext * context, enum CodecID codec_id,
+ break;
+ }
+ default:
++ {
++ if (codec && codec->supported_framerates
++ && codec->supported_framerates[0].num != 0
++ && codec->supported_framerates[0].den != 0) {
++ GValue va = { 0, };
++ GValue v = { 0, };
++ const AVRational *rates = codec->supported_framerates;
++
++ if (rates[1].num == 0 && rates[1].den == 0) {
++ caps =
++ gst_caps_new_simple (mimetype, "framerate", GST_TYPE_FRACTION,
++ rates[0].num, rates[0].den, NULL);
++ } else {
++ g_value_init (&va, GST_TYPE_LIST);
++ g_value_init (&v, GST_TYPE_FRACTION);
++
++ while (rates->num != 0 && rates->den != 0) {
++ gst_value_set_fraction (&v, rates->num, rates->den);
++ gst_value_list_append_value (&va, &v);
++ rates++;
++ }
++
++ caps = gst_caps_new_simple (mimetype, NULL, NULL, NULL);
++ gst_caps_set_value (caps, "framerate", &va);
++ g_value_unset (&va);
++ g_value_unset (&v);
++ }
++
++ } else {
++ caps = gst_caps_new_empty_simple (mimetype);
++ }
++
+ break;
++ }
+ }
+ }
+
+@@ -302,27 +396,84 @@ gst_ff_vid_caps_new (AVCodecContext * context, enum CodecID codec_id,
+ * default unfixed setting */
+ if (!caps) {
+ GST_DEBUG ("Creating default caps");
+- caps = gst_caps_new_simple (mimetype, NULL, NULL, NULL);
++ caps = gst_caps_new_empty_simple (mimetype);
+ }
+
+- for (i = 0; i < gst_caps_get_size (caps); i++) {
+- va_start (var_args, fieldname);
+- structure = gst_caps_get_structure (caps, i);
+- gst_structure_set_valist (structure, fieldname, var_args);
+- va_end (var_args);
+- }
++ va_start (var_args, fieldname);
++ gst_caps_set_simple_valist (caps, fieldname, var_args);
++ va_end (var_args);
+
+ return caps;
+ }
+
++static gint
++get_nbits_set (guint64 n)
++{
++ gint i, x;
++
++ x = 0;
++ for (i = 0; i < 64; i++) {
++ if ((n & (G_GUINT64_CONSTANT (1) << i)))
++ x++;
++ }
++
++ return x;
++}
++
++static void
++gst_ffmpeg_audio_set_sample_fmts (GstCaps * caps,
++ const enum AVSampleFormat *fmts)
++{
++ GValue va = { 0, };
++ GValue v = { 0, };
++ GstAudioFormat format;
++
++ if (!fmts || fmts[0] == -1) {
++ gint i;
++
++ g_value_init (&va, GST_TYPE_LIST);
++ g_value_init (&v, G_TYPE_STRING);
++ for (i = 0; i <= AV_SAMPLE_FMT_DBL; i++) {
++ format = gst_ffmpeg_smpfmt_to_audioformat (i);
++ if (format == GST_AUDIO_FORMAT_UNKNOWN)
++ continue;
++ g_value_set_string (&v, gst_audio_format_to_string (format));
++ gst_value_list_append_value (&va, &v);
++ }
++ gst_caps_set_value (caps, "format", &va);
++ g_value_unset (&v);
++ g_value_unset (&va);
++ return;
++ }
++
++ g_value_init (&va, GST_TYPE_LIST);
++ g_value_init (&v, G_TYPE_STRING);
++ while (*fmts != -1) {
++ format = gst_ffmpeg_smpfmt_to_audioformat (*fmts);
++ if (format != GST_AUDIO_FORMAT_UNKNOWN) {
++ g_value_set_string (&v, gst_audio_format_to_string (format));
++ gst_value_list_append_value (&va, &v);
++ }
++ fmts++;
++ }
++ if (gst_value_list_get_size (&va) == 1) {
++ /* The single value is still in v */
++ gst_caps_set_value (caps, "format", &v);
++ } else if (gst_value_list_get_size (&va) > 1) {
++ gst_caps_set_value (caps, "format", &va);
++ }
++ g_value_unset (&v);
++ g_value_unset (&va);
++}
++
+ /* same for audio - now with channels/sample rate
+ */
+ static GstCaps *
+-gst_ff_aud_caps_new (AVCodecContext * context, enum CodecID codec_id,
+- gboolean encode, const char *mimetype, const char *fieldname, ...)
++gst_ff_aud_caps_new (AVCodecContext * context, AVCodec * codec,
++ enum CodecID codec_id, gboolean encode, const char *mimetype,
++ const char *fieldname, ...)
+ {
+ GstCaps *caps = NULL;
+- GstStructure *structure = NULL;
+ gint i;
+ va_list var_args;
+
+@@ -334,7 +485,8 @@ gst_ff_aud_caps_new (AVCodecContext * context, enum CodecID codec_id,
+ "rate", G_TYPE_INT, context->sample_rate,
+ "channels", G_TYPE_INT, context->channels, NULL);
+
+- if (gst_ffmpeg_channel_layout_to_gst (context, pos)) {
++ if (gst_ffmpeg_channel_layout_to_gst (context->channel_layout,
++ context->channels, pos)) {
+ guint64 mask;
+
+ if (gst_audio_channel_positions_to_mask (pos, context->channels, FALSE,
+@@ -422,10 +574,6 @@ gst_ff_aud_caps_new (AVCodecContext * context, enum CodecID codec_id,
+ break;
+ }
+
+- /* TODO: handle context->channel_layouts here to set
+- * the list of channel layouts supported by the encoder.
+- * Unfortunately no encoder uses this yet....
+- */
+ /* regardless of encode/decode, open up channels if applicable */
+ /* Until decoders/encoders expose the maximum number of channels
+ * they support, we whitelist them here. */
+@@ -438,15 +586,40 @@ gst_ff_aud_caps_new (AVCodecContext * context, enum CodecID codec_id,
+ break;
+ }
+
+- if (maxchannels == 1)
+- caps = gst_caps_new_simple (mimetype,
+- "channels", G_TYPE_INT, maxchannels, NULL);
+- else
+- caps = gst_caps_new_simple (mimetype,
+- "channels", GST_TYPE_INT_RANGE, 1, maxchannels, NULL);
++ if (codec && codec->channel_layouts) {
++ const uint64_t *layouts = codec->channel_layouts;
++ GstAudioChannelPosition pos[64];
++
++ caps = gst_caps_new_empty ();
++ while (*layouts) {
++ gint nbits_set = get_nbits_set (*layouts);
++
++ if (gst_ffmpeg_channel_layout_to_gst (*layouts, nbits_set, pos)) {
++ guint64 mask;
++
++ if (gst_audio_channel_positions_to_mask (pos, nbits_set, FALSE,
++ &mask)) {
++ GstCaps *tmp =
++ gst_caps_new_simple (mimetype, "channel-mask", GST_TYPE_BITMASK,
++ mask,
++ "channels", G_TYPE_INT, nbits_set, NULL);
++
++ gst_caps_append (caps, tmp);
++ }
++ }
++ layouts++;
++ }
++ } else {
++ if (maxchannels == 1)
++ caps = gst_caps_new_simple (mimetype,
++ "channels", G_TYPE_INT, maxchannels, NULL);
++ else
++ caps = gst_caps_new_simple (mimetype,
++ "channels", GST_TYPE_INT_RANGE, 1, maxchannels, NULL);
++ }
++
+ if (n_rates) {
+ GValue list = { 0, };
+- GstStructure *structure;
+
+ g_value_init (&list, GST_TYPE_LIST);
+ for (i = 0; i < n_rates; i++) {
+@@ -457,21 +630,41 @@ gst_ff_aud_caps_new (AVCodecContext * context, enum CodecID codec_id,
+ gst_value_list_append_value (&list, &v);
+ g_value_unset (&v);
+ }
+- structure = gst_caps_get_structure (caps, 0);
+- gst_structure_set_value (structure, "rate", &list);
++ gst_caps_set_value (caps, "rate", &list);
+ g_value_unset (&list);
+- } else
++ } else if (codec && codec->supported_samplerates
++ && codec->supported_samplerates[0]) {
++ GValue va = { 0, };
++ GValue v = { 0, };
++
++ if (!codec->supported_samplerates[1]) {
++ gst_caps_set_simple (caps, "rate", G_TYPE_INT,
++ codec->supported_samplerates[0], NULL);
++ } else {
++ const int *rates = codec->supported_samplerates;
++
++ g_value_init (&va, GST_TYPE_LIST);
++ g_value_init (&v, G_TYPE_INT);
++
++ while (*rates) {
++ g_value_set_int (&v, *rates);
++ gst_value_list_append_value (&va, &v);
++ rates++;
++ }
++ gst_caps_set_value (caps, "rate", &va);
++ g_value_unset (&va);
++ g_value_unset (&v);
++ }
++ } else {
+ gst_caps_set_simple (caps, "rate", GST_TYPE_INT_RANGE, 4000, 96000, NULL);
++ }
+ } else {
+ caps = gst_caps_new_empty_simple (mimetype);
+ }
+
+- for (i = 0; i < gst_caps_get_size (caps); i++) {
+- va_start (var_args, fieldname);
+- structure = gst_caps_get_structure (caps, i);
+- gst_structure_set_valist (structure, fieldname, var_args);
+- va_end (var_args);
+- }
++ va_start (var_args, fieldname);
++ gst_caps_set_simple_valist (caps, fieldname, var_args);
++ va_end (var_args);
+
+ return caps;
+ }
+@@ -504,7 +697,7 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ switch (codec_id) {
+ case CODEC_ID_MPEG1VIDEO:
+ /* FIXME: bitrate */
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/mpeg",
++ caps = gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/mpeg",
+ "mpegversion", G_TYPE_INT, 1,
+ "systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
+ break;
+@@ -512,9 +705,10 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ case CODEC_ID_MPEG2VIDEO:
+ if (encode) {
+ /* FIXME: bitrate */
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/mpeg",
+- "mpegversion", G_TYPE_INT, 2,
+- "systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/mpeg",
++ "mpegversion", G_TYPE_INT, 2, "systemstream", G_TYPE_BOOLEAN, FALSE,
++ NULL);
+ } else {
+ /* decode both MPEG-1 and MPEG-2; width/height/fps are all in
+ * the MPEG video stream headers, so may be omitted from caps. */
+@@ -530,23 +724,25 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ case CODEC_ID_H263:
+ if (encode) {
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-h263",
+- "variant", G_TYPE_STRING, "itu",
+- "h263version", G_TYPE_STRING, "h263", NULL);
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-h263", "variant", G_TYPE_STRING, "itu", "h263version",
++ G_TYPE_STRING, "h263", NULL);
+ } else {
+ /* don't pass codec_id, we can decode other variants with the H263
+ * decoder that don't have specific size requirements
+ */
+ caps =
+- gst_ff_vid_caps_new (context, CODEC_ID_NONE, encode, "video/x-h263",
+- "variant", G_TYPE_STRING, "itu", NULL);
++ gst_ff_vid_caps_new (context, NULL, CODEC_ID_NONE, encode,
++ "video/x-h263", "variant", G_TYPE_STRING, "itu", NULL);
+ }
+ break;
+
+ case CODEC_ID_H263P:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-h263",
+- "variant", G_TYPE_STRING, "itu",
+- "h263version", G_TYPE_STRING, "h263p", NULL);
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-h263",
++ "variant", G_TYPE_STRING, "itu", "h263version", G_TYPE_STRING,
++ "h263p", NULL);
+ if (encode && context) {
+
+ gst_caps_set_simple (caps,
+@@ -560,13 +756,14 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ case CODEC_ID_H263I:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-intel-h263",
+- "variant", G_TYPE_STRING, "intel", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-intel-h263", "variant", G_TYPE_STRING, "intel", NULL);
+ break;
+
+ case CODEC_ID_H261:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-h261", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-h261",
++ NULL);
+ break;
+
+ case CODEC_ID_RV10:
+@@ -593,7 +790,7 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ /* FIXME: context->sub_id must be filled in during decoding */
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode,
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-pn-realvideo", "systemstream", G_TYPE_BOOLEAN, FALSE,
+ "rmversion", G_TYPE_INT, version, NULL);
+ if (context) {
+@@ -609,20 +806,21 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ case CODEC_ID_MP1:
+ /* FIXME: bitrate */
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, "audio/mpeg",
++ caps = gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/mpeg",
+ "mpegversion", G_TYPE_INT, 1, "layer", G_TYPE_INT, 1, NULL);
+ break;
+
+ case CODEC_ID_MP2:
+ /* FIXME: bitrate */
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, "audio/mpeg",
++ caps = gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/mpeg",
+ "mpegversion", G_TYPE_INT, 1, "layer", G_TYPE_INT, 2, NULL);
+ break;
+
+ case CODEC_ID_MP3:
+ if (encode) {
+ /* FIXME: bitrate */
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, "audio/mpeg",
++ caps =
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/mpeg",
+ "mpegversion", G_TYPE_INT, 1, "layer", G_TYPE_INT, 3, NULL);
+ } else {
+ /* Decodes MPEG-1 layer 1/2/3. Samplerate, channels et al are
+@@ -635,14 +833,14 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ case CODEC_ID_MUSEPACK7:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode,
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-ffmpeg-parsed-musepack", "streamversion", G_TYPE_INT, 7,
+ NULL);
+ break;
+
+ case CODEC_ID_MUSEPACK8:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode,
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-ffmpeg-parsed-musepack", "streamversion", G_TYPE_INT, 8,
+ NULL);
+ break;
+@@ -650,41 +848,44 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ case CODEC_ID_AC3:
+ /* FIXME: bitrate */
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-ac3", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-ac3",
++ NULL);
+ break;
+
+ case CODEC_ID_EAC3:
+ /* FIXME: bitrate */
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-eac3", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-eac3",
++ NULL);
+ break;
+
+ case CODEC_ID_TRUEHD:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-true-hd",
+- NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
++ "audio/x-true-hd", NULL);
+ break;
+
+ case CODEC_ID_ATRAC1:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode,
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-vnd.sony.atrac1", NULL);
+ break;
+
+ case CODEC_ID_ATRAC3:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode,
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-vnd.sony.atrac3", NULL);
+ break;
+
+ case CODEC_ID_DTS:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-dts", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-dts",
++ NULL);
+ break;
+
+ case CODEC_ID_APE:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode,
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-ffmpeg-parsed-ape", NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+@@ -694,12 +895,14 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ case CODEC_ID_MLP:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-mlp", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-mlp",
++ NULL);
+ break;
+
+ case CODEC_ID_IMC:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-imc", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-imc",
++ NULL);
+ break;
+
+ /* MJPEG is normal JPEG, Motion-JPEG and Quicktime MJPEG-A. MJPEGB
+@@ -710,18 +913,20 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ case CODEC_ID_MJPEG:
+ case CODEC_ID_LJPEG:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "image/jpeg", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/jpeg",
++ NULL);
+ break;
+
+ case CODEC_ID_SP5X:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/sp5x", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/sp5x",
++ NULL);
+ break;
+
+ case CODEC_ID_MJPEGB:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-mjpeg-b",
+- NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-mjpeg-b", NULL);
+ break;
+
+ case CODEC_ID_MPEG4:
+@@ -731,41 +936,42 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ switch (context->codec_tag) {
+ case GST_MAKE_FOURCC ('D', 'I', 'V', 'X'):
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-divx",
+- "divxversion", G_TYPE_INT, 5, NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-divx", "divxversion", G_TYPE_INT, 5, NULL);
+ break;
+ case GST_MAKE_FOURCC ('m', 'p', '4', 'v'):
+ default:
+ /* FIXME: bitrate */
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/mpeg",
+- "systemstream", G_TYPE_BOOLEAN, FALSE,
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/mpeg", "systemstream", G_TYPE_BOOLEAN, FALSE,
+ "mpegversion", G_TYPE_INT, 4, NULL);
+ break;
+ }
+ } else {
+ /* The trick here is to separate xvid, divx, mpeg4, 3ivx et al */
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/mpeg",
+- "mpegversion", G_TYPE_INT, 4,
+- "systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/mpeg",
++ "mpegversion", G_TYPE_INT, 4, "systemstream", G_TYPE_BOOLEAN, FALSE,
++ NULL);
+ if (encode) {
+- gst_caps_append (caps, gst_ff_vid_caps_new (context, codec_id, encode,
+- "video/x-divx", "divxversion", G_TYPE_INT, 5, NULL));
++ gst_caps_append (caps, gst_ff_vid_caps_new (context, NULL, codec_id,
++ encode, "video/x-divx", "divxversion", G_TYPE_INT, 5, NULL));
+ } else {
+- gst_caps_append (caps, gst_ff_vid_caps_new (context, codec_id, encode,
+- "video/x-divx", "divxversion", GST_TYPE_INT_RANGE, 4, 5,
+- NULL));
+- gst_caps_append (caps, gst_ff_vid_caps_new (context, codec_id, encode,
+- "video/x-xvid", NULL));
+- gst_caps_append (caps, gst_ff_vid_caps_new (context, codec_id, encode,
+- "video/x-3ivx", NULL));
++ gst_caps_append (caps, gst_ff_vid_caps_new (context, NULL, codec_id,
++ encode, "video/x-divx", "divxversion", GST_TYPE_INT_RANGE, 4,
++ 5, NULL));
++ gst_caps_append (caps, gst_ff_vid_caps_new (context, NULL, codec_id,
++ encode, "video/x-xvid", NULL));
++ gst_caps_append (caps, gst_ff_vid_caps_new (context, NULL, codec_id,
++ encode, "video/x-3ivx", NULL));
+ }
+ }
+ break;
+
+ case CODEC_ID_RAWVIDEO:
+ caps =
+- gst_ffmpeg_codectype_to_caps (AVMEDIA_TYPE_VIDEO, context, codec_id,
+- encode);
++ gst_ffmpeg_codectype_to_video_caps (context, codec_id, encode, NULL);
+ break;
+
+ case CODEC_ID_MSMPEG4V1:
+@@ -775,11 +981,12 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ gint version = 41 + codec_id - CODEC_ID_MSMPEG4V1;
+
+ /* encode-FIXME: bitrate */
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-msmpeg",
+- "msmpegversion", G_TYPE_INT, version, NULL);
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-msmpeg", "msmpegversion", G_TYPE_INT, version, NULL);
+ if (!encode && codec_id == CODEC_ID_MSMPEG4V3) {
+- gst_caps_append (caps, gst_ff_vid_caps_new (context, codec_id, encode,
+- "video/x-divx", "divxversion", G_TYPE_INT, 3, NULL));
++ gst_caps_append (caps, gst_ff_vid_caps_new (context, NULL, codec_id,
++ encode, "video/x-divx", "divxversion", G_TYPE_INT, 3, NULL));
+ }
+ }
+ break;
+@@ -789,30 +996,34 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ {
+ gint version = (codec_id == CODEC_ID_WMV1) ? 1 : 2;
+
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-wmv",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-wmv",
+ "wmvversion", G_TYPE_INT, version, NULL);
+ }
+ break;
+
+ case CODEC_ID_FLV1:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-flash-video",
+- "flvversion", G_TYPE_INT, 1, NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-flash-video", "flvversion", G_TYPE_INT, 1, NULL);
+ break;
+
+ case CODEC_ID_SVQ1:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-svq",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-svq",
+ "svqversion", G_TYPE_INT, 1, NULL);
+ break;
+
+ case CODEC_ID_SVQ3:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-svq",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-svq",
+ "svqversion", G_TYPE_INT, 3, NULL);
+ break;
+
+ case CODEC_ID_DVAUDIO:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-dv", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-dv",
++ NULL);
+ break;
+
+ case CODEC_ID_DVVIDEO:
+@@ -846,11 +1057,13 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ format = "I420";
+ break;
+ }
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-dv",
+- "systemstream", G_TYPE_BOOLEAN, FALSE,
+- "format", G_TYPE_STRING, format, NULL);
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-dv",
++ "systemstream", G_TYPE_BOOLEAN, FALSE, "format", G_TYPE_STRING,
++ format, NULL);
+ } else {
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-dv",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-dv",
+ "systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
+ }
+ }
+@@ -862,21 +1075,24 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ gint version = (codec_id == CODEC_ID_WMAV1) ? 1 : 2;
+
+ if (context) {
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-wma",
+- "wmaversion", G_TYPE_INT, version,
+- "block_align", G_TYPE_INT, context->block_align,
+- "bitrate", G_TYPE_INT, context->bit_rate, NULL);
++ caps =
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-wma",
++ "wmaversion", G_TYPE_INT, version, "block_align", G_TYPE_INT,
++ context->block_align, "bitrate", G_TYPE_INT, context->bit_rate,
++ NULL);
+ } else {
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-wma",
+- "wmaversion", G_TYPE_INT, version,
+- "block_align", GST_TYPE_INT_RANGE, 0, G_MAXINT,
+- "bitrate", GST_TYPE_INT_RANGE, 0, G_MAXINT, NULL);
++ caps =
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-wma",
++ "wmaversion", G_TYPE_INT, version, "block_align",
++ GST_TYPE_INT_RANGE, 0, G_MAXINT, "bitrate", GST_TYPE_INT_RANGE, 0,
++ G_MAXINT, NULL);
+ }
+ }
+ break;
+ case CODEC_ID_WMAPRO:
+ {
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-wma",
++ caps =
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-wma",
+ "wmaversion", G_TYPE_INT, 3, NULL);
+ break;
+ }
+@@ -884,7 +1100,8 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ case CODEC_ID_WMAVOICE:
+ {
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-wms", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-wms",
++ NULL);
+ break;
+ }
+
+@@ -893,15 +1110,16 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ {
+ gint version = (codec_id == CODEC_ID_MACE3) ? 3 : 6;
+
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-mace",
++ caps =
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-mace",
+ "maceversion", G_TYPE_INT, version, NULL);
+ }
+ break;
+
+ case CODEC_ID_HUFFYUV:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-huffyuv",
+- NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-huffyuv", NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "bpp", G_TYPE_INT, context->bits_per_coded_sample, NULL);
+@@ -910,84 +1128,93 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ case CODEC_ID_CYUV:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode,
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-compressed-yuv", NULL);
+ break;
+
+ case CODEC_ID_H264:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-h264",
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-h264",
+ "alignment", G_TYPE_STRING, "au", NULL);
+ break;
+
+ case CODEC_ID_INDEO5:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-indeo",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-indeo",
+ "indeoversion", G_TYPE_INT, 5, NULL);
+ break;
+
+ case CODEC_ID_INDEO4:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-indeo",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-indeo",
+ "indeoversion", G_TYPE_INT, 4, NULL);
+ break;
+
+ case CODEC_ID_INDEO3:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-indeo",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-indeo",
+ "indeoversion", G_TYPE_INT, 3, NULL);
+ break;
+
+ case CODEC_ID_INDEO2:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-indeo",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-indeo",
+ "indeoversion", G_TYPE_INT, 2, NULL);
+ break;
+
+ case CODEC_ID_FLASHSV:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode,
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-flash-screen", NULL);
+ break;
+
+ case CODEC_ID_VP3:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-vp3", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-vp3",
++ NULL);
+ break;
+
+ case CODEC_ID_VP5:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-vp5", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-vp5",
++ NULL);
+ break;
+
+ case CODEC_ID_VP6:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-vp6", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-vp6",
++ NULL);
+ break;
+
+ case CODEC_ID_VP6F:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-vp6-flash",
+- NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-vp6-flash", NULL);
+ break;
+
+ case CODEC_ID_VP6A:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-vp6-alpha",
+- NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-vp6-alpha", NULL);
+ break;
+
+ case CODEC_ID_VP8:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-vp8", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-vp8",
++ NULL);
+ break;
+
+ case CODEC_ID_THEORA:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-theora",
+- NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-theora", NULL);
+ break;
+
+ case CODEC_ID_AAC:
+ {
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/mpeg", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/mpeg",
++ NULL);
+
+ if (!encode) {
+ GValue arr = { 0, };
+@@ -1029,45 +1256,50 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ break;
+ }
+ case CODEC_ID_AAC_LATM: /* LATM/LOAS AAC syntax */
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, "audio/mpeg",
++ caps = gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/mpeg",
+ "mpegversion", G_TYPE_INT, 4, "stream-format", G_TYPE_STRING, "loas",
+ NULL);
+ break;
+
+ case CODEC_ID_ASV1:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-asus",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-asus",
+ "asusversion", G_TYPE_INT, 1, NULL);
+ break;
+ case CODEC_ID_ASV2:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-asus",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-asus",
+ "asusversion", G_TYPE_INT, 2, NULL);
+ break;
+
+ case CODEC_ID_FFV1:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-ffv",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-ffv",
+ "ffvversion", G_TYPE_INT, 1, NULL);
+ break;
+
+ case CODEC_ID_4XM:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-4xm", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-4xm",
++ NULL);
+ break;
+
+ case CODEC_ID_XAN_WC3:
+ case CODEC_ID_XAN_WC4:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-xan",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-xan",
+ "wcversion", G_TYPE_INT, 3 - CODEC_ID_XAN_WC3 + codec_id, NULL);
+ break;
+
+ case CODEC_ID_CLJR:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode,
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-cirrus-logic-accupak", NULL);
+ break;
+
+ case CODEC_ID_FRAPS:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-fraps",
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-fraps",
+ NULL);
+ break;
+
+@@ -1078,26 +1310,28 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ break;
+
+ case CODEC_ID_VCR1:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-ati-vcr",
+- "vcrversion", G_TYPE_INT, 1, NULL);
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-ati-vcr", "vcrversion", G_TYPE_INT, 1, NULL);
+ break;
+
+ case CODEC_ID_RPZA:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-apple-video",
+- NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-apple-video", NULL);
+ break;
+
+ case CODEC_ID_CINEPAK:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-cinepak",
+- NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-cinepak", NULL);
+ break;
+
+ /* WS_VQA belogns here (order) */
+
+ case CODEC_ID_MSRLE:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-rle",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-rle",
+ "layout", G_TYPE_STRING, "microsoft", NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+@@ -1108,7 +1342,8 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ break;
+
+ case CODEC_ID_QTRLE:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-rle",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-rle",
+ "layout", G_TYPE_STRING, "quicktime", NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+@@ -1120,54 +1355,59 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ case CODEC_ID_MSVIDEO1:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode,
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-msvideocodec", "msvideoversion", G_TYPE_INT, 1, NULL);
+ break;
+
+ case CODEC_ID_WMV3:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-wmv",
+- "wmvversion", G_TYPE_INT, 3, NULL);
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-wmv",
++ "wmvversion", G_TYPE_INT, 3, "format", G_TYPE_STRING, "WMV3", NULL);
+ break;
+ case CODEC_ID_VC1:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-wmv",
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-wmv",
+ "wmvversion", G_TYPE_INT, 3, "format", G_TYPE_STRING, "WVC1", NULL);
+ break;
+ case CODEC_ID_QDM2:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-qdm2", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-qdm2",
++ NULL);
+ break;
+
+ case CODEC_ID_MSZH:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-mszh", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-mszh",
++ NULL);
+ break;
+
+ case CODEC_ID_ZLIB:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-zlib", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-zlib",
++ NULL);
+ break;
+
+ case CODEC_ID_TRUEMOTION1:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-truemotion",
+- "trueversion", G_TYPE_INT, 1, NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-truemotion", "trueversion", G_TYPE_INT, 1, NULL);
+ break;
+ case CODEC_ID_TRUEMOTION2:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-truemotion",
+- "trueversion", G_TYPE_INT, 2, NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-truemotion", "trueversion", G_TYPE_INT, 2, NULL);
+ break;
+
+ case CODEC_ID_ULTI:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-ultimotion",
+- NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-ultimotion", NULL);
+ break;
+
+ case CODEC_ID_TSCC:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-camtasia",
+- NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-camtasia", NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "depth", G_TYPE_INT, (gint) context->bits_per_coded_sample, NULL);
+@@ -1178,142 +1418,164 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ case CODEC_ID_KMVC:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-kmvc", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-kmvc",
++ NULL);
+ break;
+
+ case CODEC_ID_NUV:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-nuv", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-nuv",
++ NULL);
+ break;
+
+ case CODEC_ID_GIF:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "image/gif", NULL);
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/gif",
++ NULL);
+ break;
+
+ case CODEC_ID_PNG:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "image/png", NULL);
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/png",
++ NULL);
+ break;
+
+ case CODEC_ID_PPM:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "image/ppm", NULL);
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/ppm",
++ NULL);
+ break;
+
+ case CODEC_ID_PBM:
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, "image/pbm", NULL);
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/pbm",
++ NULL);
+ break;
+
+ case CODEC_ID_PAM:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode,
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "image/x-portable-anymap", NULL);
+ break;
+
+ case CODEC_ID_PGM:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode,
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "image/x-portable-graymap", NULL);
+ break;
+
+ case CODEC_ID_PCX:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "image/x-pcx", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/x-pcx",
++ NULL);
+ break;
+
+ case CODEC_ID_SGI:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "image/x-sgi", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/x-sgi",
++ NULL);
+ break;
+
+ case CODEC_ID_TARGA:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "image/x-tga", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/x-tga",
++ NULL);
+ break;
+
+ case CODEC_ID_TIFF:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "image/tiff", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/tiff",
++ NULL);
+ break;
+
+ case CODEC_ID_SUNRAST:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "image/x-sun-raster",
+- NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "image/x-sun-raster", NULL);
+ break;
+
+ case CODEC_ID_SMC:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-smc", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-smc",
++ NULL);
+ break;
+
+ case CODEC_ID_QDRAW:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-qdrw", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-qdrw",
++ NULL);
+ break;
+
+ case CODEC_ID_DNXHD:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-dnxhd",
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-dnxhd",
+ NULL);
+ break;
+
+ case CODEC_ID_PRORES:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-prores",
+- NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-prores", NULL);
+ break;
+
+ case CODEC_ID_MIMIC:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-mimic",
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-mimic",
+ NULL);
+ break;
+
+ case CODEC_ID_VMNC:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-vmnc", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-vmnc",
++ NULL);
+ break;
+
+ case CODEC_ID_TRUESPEECH:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-truespeech",
+- NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
++ "audio/x-truespeech", NULL);
+ break;
+
+ case CODEC_ID_QCELP:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/qcelp", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/qcelp",
++ NULL);
+ break;
+
+ case CODEC_ID_AMV:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-amv", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-amv",
++ NULL);
+ break;
+
+ case CODEC_ID_AASC:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-aasc", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-aasc",
++ NULL);
+ break;
+
+ case CODEC_ID_LOCO:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-loco", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-loco",
++ NULL);
+ break;
+
+ case CODEC_ID_ZMBV:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-zmbv", NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-zmbv",
++ NULL);
+ break;
+
+ case CODEC_ID_LAGARITH:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-lagarith",
+- NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-lagarith", NULL);
+ break;
+
+ case CODEC_ID_CSCD:
+ caps =
+- gst_ff_vid_caps_new (context, codec_id, encode, "video/x-camstudio",
+- NULL);
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
++ "video/x-camstudio", NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "depth", G_TYPE_INT, (gint) context->bits_per_coded_sample, NULL);
+@@ -1328,8 +1590,6 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ case CODEC_ID_FLIC:
+ case CODEC_ID_VMDVIDEO:
+ case CODEC_ID_VMDAUDIO:
+- case CODEC_ID_SONIC:
+- case CODEC_ID_SONIC_LS:
+ case CODEC_ID_SNOW:
+ case CODEC_ID_VIXL:
+ case CODEC_ID_QPEG:
+@@ -1379,7 +1639,8 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ break;
+ }
+
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-raw",
++ caps =
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-raw",
+ "format", G_TYPE_STRING, gst_audio_format_to_string (format),
+ "layout", G_TYPE_STRING, "interleaved", NULL);
+ }
+@@ -1387,18 +1648,20 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ case CODEC_ID_PCM_MULAW:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-mulaw",
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-mulaw",
+ NULL);
+ break;
+
+ case CODEC_ID_PCM_ALAW:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-alaw", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-alaw",
++ NULL);
+ break;
+
+ case CODEC_ID_ADPCM_G722:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/G722", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/G722",
++ NULL);
+ if (context)
+ gst_caps_set_simple (caps,
+ "block_align", G_TYPE_INT, context->block_align,
+@@ -1408,7 +1671,8 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ case CODEC_ID_ADPCM_G726:
+ {
+ /* the G726 decoder can also handle G721 */
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-adpcm",
++ caps =
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-adpcm",
+ "layout", G_TYPE_STRING, "g726", NULL);
+ if (context)
+ gst_caps_set_simple (caps,
+@@ -1541,7 +1805,8 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ /* FIXME: someone please check whether we need additional properties
+ * in this caps definition. */
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-adpcm",
++ caps =
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-adpcm",
+ "layout", G_TYPE_STRING, layout, NULL);
+ if (context)
+ gst_caps_set_simple (caps,
+@@ -1551,34 +1816,39 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ break;
+
+ case CODEC_ID_AMR_NB:
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, "audio/AMR", NULL);
++ caps =
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/AMR",
++ NULL);
+ break;
+
+ case CODEC_ID_AMR_WB:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/AMR-WB", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/AMR-WB",
++ NULL);
+ break;
+
+ case CODEC_ID_GSM:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-gsm", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-gsm",
++ NULL);
+ break;
+
+ case CODEC_ID_GSM_MS:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/ms-gsm", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/ms-gsm",
++ NULL);
+ break;
+
+ case CODEC_ID_NELLYMOSER:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-nellymoser",
+- NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
++ "audio/x-nellymoser", NULL);
+ break;
+
+ case CODEC_ID_SIPR:
+ {
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-sipro",
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-sipro",
+ NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+@@ -1610,7 +1880,7 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ /* FIXME: properties? */
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode,
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-pn-realaudio", "raversion", G_TYPE_INT, version, NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+@@ -1647,7 +1917,8 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ /* FIXME: someone please check whether we need additional properties
+ * in this caps definition. */
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-dpcm",
++ caps =
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-dpcm",
+ "layout", G_TYPE_STRING, layout, NULL);
+ if (context)
+ gst_caps_set_simple (caps,
+@@ -1662,7 +1933,8 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+
+ case CODEC_ID_ALAC:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-alac", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-alac",
++ NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "samplesize", G_TYPE_INT, context->bits_per_coded_sample, NULL);
+@@ -1677,6 +1949,10 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ }
+ break;
+
++ case CODEC_ID_S302M:
++ caps = gst_caps_new_empty_simple ("audio/x-smpte-302m");
++ break;
++
+ case CODEC_ID_DVD_SUBTITLE:
+ case CODEC_ID_DVB_SUBTITLE:
+ caps = NULL;
+@@ -1686,7 +1962,8 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ break;
+ case CODEC_ID_TTA:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-tta", NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-tta",
++ NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "samplesize", G_TYPE_INT, context->bits_per_coded_sample, NULL);
+@@ -1694,8 +1971,8 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ break;
+ case CODEC_ID_TWINVQ:
+ caps =
+- gst_ff_aud_caps_new (context, codec_id, encode, "audio/x-twin-vq",
+- NULL);
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
++ "audio/x-twin-vq", NULL);
+ break;
+ default:
+ GST_DEBUG ("Unknown codec ID %d, please add mapping here", codec_id);
+@@ -1714,12 +1991,14 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ switch (codec->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ mime = g_strdup_printf ("video/x-gst-av-%s", codec->name);
+- caps = gst_ff_vid_caps_new (context, codec_id, encode, mime, NULL);
++ caps =
++ gst_ff_vid_caps_new (context, NULL, codec_id, encode, mime, NULL);
+ g_free (mime);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ mime = g_strdup_printf ("audio/x-gst-av-%s", codec->name);
+- caps = gst_ff_aud_caps_new (context, codec_id, encode, mime, NULL);
++ caps =
++ gst_ff_aud_caps_new (context, NULL, codec_id, encode, mime, NULL);
+ if (context)
+ gst_caps_set_simple (caps,
+ "block_align", G_TYPE_INT, context->block_align,
+@@ -1743,11 +2022,6 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ gst_buffer_unref (data);
+ }
+
+- /* palette */
+- if (context) {
+- gst_ffmpeg_set_palette (caps, context);
+- }
+-
+ GST_LOG ("caps for codec_id=%d: %" GST_PTR_FORMAT, codec_id, caps);
+
+ } else {
+@@ -1764,7 +2038,7 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ * See below for usefullness
+ */
+
+-GstCaps *
++static GstCaps *
+ gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt, AVCodecContext * context,
+ enum CodecID codec_id)
+ {
+@@ -1774,7 +2048,7 @@ gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt, AVCodecContext * context,
+ format = gst_ffmpeg_pixfmt_to_videoformat (pix_fmt);
+
+ if (format != GST_VIDEO_FORMAT_UNKNOWN) {
+- caps = gst_ff_vid_caps_new (context, codec_id, TRUE, "video/x-raw",
++ caps = gst_ff_vid_caps_new (context, NULL, codec_id, TRUE, "video/x-raw",
+ "format", G_TYPE_STRING, gst_video_format_to_string (format), NULL);
+ }
+
+@@ -1787,41 +2061,55 @@ gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt, AVCodecContext * context,
+ return caps;
+ }
+
+-/* Convert a FFMPEG Sample Format and optional AVCodecContext
+- * to a GstCaps. If the context is ommitted, no fixed values
+- * for video/audio size will be included in the GstCaps
+- *
+- * See below for usefullness
+- */
+-
+-static GstCaps *
+-gst_ffmpeg_smpfmt_to_caps (enum AVSampleFormat sample_fmt,
+- AVCodecContext * context, enum CodecID codec_id)
++GstAudioFormat
++gst_ffmpeg_smpfmt_to_audioformat (enum AVSampleFormat sample_fmt)
+ {
+- GstCaps *caps = NULL;
+- GstAudioFormat format;
+-
+ switch (sample_fmt) {
++ case AV_SAMPLE_FMT_U8:
++ case AV_SAMPLE_FMT_U8P:
++ return GST_AUDIO_FORMAT_U8;
++ break;
+ case AV_SAMPLE_FMT_S16:
+- format = GST_AUDIO_FORMAT_S16;
++ case AV_SAMPLE_FMT_S16P:
++ return GST_AUDIO_FORMAT_S16;
+ break;
+ case AV_SAMPLE_FMT_S32:
+- format = GST_AUDIO_FORMAT_S32;
++ case AV_SAMPLE_FMT_S32P:
++ return GST_AUDIO_FORMAT_S32;
+ break;
+ case AV_SAMPLE_FMT_FLT:
+- format = GST_AUDIO_FORMAT_F32;
++ case AV_SAMPLE_FMT_FLTP:
++ return GST_AUDIO_FORMAT_F32;
+ break;
+ case AV_SAMPLE_FMT_DBL:
+- format = GST_AUDIO_FORMAT_F64;
++ case AV_SAMPLE_FMT_DBLP:
++ return GST_AUDIO_FORMAT_F64;
+ break;
+ default:
+ /* .. */
+- format = GST_AUDIO_FORMAT_UNKNOWN;
++ return GST_AUDIO_FORMAT_UNKNOWN;
+ break;
+ }
++}
++
++/* Convert a FFMPEG Sample Format and optional AVCodecContext
++ * to a GstCaps. If the context is ommitted, no fixed values
++ * for video/audio size will be included in the GstCaps
++ *
++ * See below for usefullness
++ */
++
++static GstCaps *
++gst_ffmpeg_smpfmt_to_caps (enum AVSampleFormat sample_fmt,
++ AVCodecContext * context, AVCodec * codec, enum CodecID codec_id)
++{
++ GstCaps *caps = NULL;
++ GstAudioFormat format;
++
++ format = gst_ffmpeg_smpfmt_to_audioformat (sample_fmt);
+
+ if (format != GST_AUDIO_FORMAT_UNKNOWN) {
+- caps = gst_ff_aud_caps_new (context, codec_id, TRUE, "audio/x-raw",
++ caps = gst_ff_aud_caps_new (context, codec, codec_id, TRUE, "audio/x-raw",
+ "format", G_TYPE_STRING, gst_audio_format_to_string (format),
+ "layout", G_TYPE_STRING, "interleaved", NULL);
+ GST_LOG ("caps for sample_fmt=%d: %" GST_PTR_FORMAT, sample_fmt, caps);
+@@ -1832,6 +2120,22 @@ gst_ffmpeg_smpfmt_to_caps (enum AVSampleFormat sample_fmt,
+ return caps;
+ }
+
++static gboolean
++caps_has_field (GstCaps * caps, const gchar * field)
++{
++ guint i, n;
++
++ n = gst_caps_get_size (caps);
++ for (i = 0; i < n; i++) {
++ GstStructure *s = gst_caps_get_structure (caps, i);
++
++ if (gst_structure_has_field (s, field))
++ return TRUE;
++ }
++
++ return FALSE;
++}
++
+ GstCaps *
+ gst_ffmpeg_codectype_to_audio_caps (AVCodecContext * context,
+ enum CodecID codec_id, gboolean encode, AVCodec * codec)
+@@ -1846,32 +2150,17 @@ gst_ffmpeg_codectype_to_audio_caps (AVCodecContext * context,
+
+ if (context) {
+ /* Specific codec context */
+- caps = gst_ffmpeg_smpfmt_to_caps (context->sample_fmt, context, codec_id);
+- } else if (codec && codec->sample_fmts) {
+- GstCaps *temp;
+- int i;
+-
+- caps = gst_caps_new_empty ();
+- for (i = 0; codec->sample_fmts[i] != -1; i++) {
+- temp =
+- gst_ffmpeg_smpfmt_to_caps (codec->sample_fmts[i], context, codec_id);
+- if (temp != NULL)
+- gst_caps_append (caps, temp);
+- }
++ caps =
++ gst_ffmpeg_smpfmt_to_caps (context->sample_fmt, context, codec,
++ codec_id);
+ } else {
+- GstCaps *temp;
+- enum AVSampleFormat i;
+- AVCodecContext ctx = { 0, };
+-
+- ctx.channels = -1;
+- caps = gst_caps_new_empty ();
+- for (i = 0; i <= AV_SAMPLE_FMT_DBL; i++) {
+- temp = gst_ffmpeg_smpfmt_to_caps (i, encode ? &ctx : NULL, codec_id);
+- if (temp != NULL) {
+- gst_caps_append (caps, temp);
+- }
+- }
++ caps = gst_ff_aud_caps_new (context, codec, codec_id, encode, "audio/x-raw",
++ "layout", G_TYPE_STRING, "interleaved", NULL);
++ if (!caps_has_field (caps, "format"))
++ gst_ffmpeg_audio_set_sample_fmts (caps,
++ codec ? codec->sample_fmts : NULL);
+ }
++
+ return caps;
+ }
+
+@@ -1887,50 +2176,12 @@ gst_ffmpeg_codectype_to_video_caps (AVCodecContext * context,
+ if (context) {
+ caps = gst_ffmpeg_pixfmt_to_caps (context->pix_fmt, context, codec_id);
+ } else {
+- GstCaps *temp;
+- enum PixelFormat i;
+- AVCodecContext ctx = { 0, };
+-
+- caps = gst_caps_new_empty ();
+- for (i = 0; i < PIX_FMT_NB; i++) {
+- ctx.width = -1;
+- ctx.pix_fmt = i;
+- temp = gst_ffmpeg_pixfmt_to_caps (i, encode ? &ctx : NULL, codec_id);
+- if (temp != NULL) {
+- gst_caps_append (caps, temp);
+- }
+- }
+- }
+- return caps;
+-}
+-
+-/* Convert a FFMPEG codec Type and optional AVCodecContext
+- * to a GstCaps. If the context is ommitted, no fixed values
+- * for video/audio size will be included in the GstCaps
+- *
+- * AVMediaType is primarily meant for uncompressed data GstCaps!
+- */
+-
+-GstCaps *
+-gst_ffmpeg_codectype_to_caps (enum AVMediaType codec_type,
+- AVCodecContext * context, enum CodecID codec_id, gboolean encode)
+-{
+- GstCaps *caps;
+-
+- switch (codec_type) {
+- case AVMEDIA_TYPE_VIDEO:
+- caps =
+- gst_ffmpeg_codectype_to_video_caps (context, codec_id, encode, NULL);
+- break;
+- case AVMEDIA_TYPE_AUDIO:
+- caps =
+- gst_ffmpeg_codectype_to_audio_caps (context, codec_id, encode, NULL);
+- break;
+- default:
+- caps = NULL;
+- break;
++ caps =
++ gst_ff_vid_caps_new (context, codec, codec_id, encode, "video/x-raw",
++ NULL);
++ if (!caps_has_field (caps, "format"))
++ gst_ffmpeg_video_set_pix_fmts (caps, codec ? codec->pix_fmts : NULL);
+ }
+-
+ return caps;
+ }
+
+@@ -2033,8 +2284,6 @@ gst_ffmpeg_caps_to_pixfmt (const GstCaps * caps,
+ context->sample_aspect_ratio.num);
+ }
+
+- gst_ffmpeg_get_palette (caps, context);
+-
+ if (!raw)
+ return;
+
+@@ -2230,6 +2484,112 @@ gst_ffmpeg_videoinfo_to_context (GstVideoInfo * info, AVCodecContext * context)
+ gst_ffmpeg_videoformat_to_pixfmt (GST_VIDEO_INFO_FORMAT (info));
+ }
+
++void
++gst_ffmpeg_audioinfo_to_context (GstAudioInfo * info, AVCodecContext * context)
++{
++ const AVCodec *codec;
++ const enum AVSampleFormat *smpl_fmts;
++ enum AVSampleFormat smpl_fmt = -1;
++
++ context->channels = info->channels;
++ context->sample_rate = info->rate;
++ context->channel_layout =
++ gst_ffmpeg_channel_positions_to_layout (info->position, info->channels);
++
++ codec = context->codec;
++
++ smpl_fmts = codec->sample_fmts;
++
++ switch (info->finfo->format) {
++ case GST_AUDIO_FORMAT_F32:
++ if (smpl_fmts) {
++ while (*smpl_fmts != -1) {
++ if (*smpl_fmts == AV_SAMPLE_FMT_FLT) {
++ smpl_fmt = *smpl_fmts;
++ break;
++ } else if (*smpl_fmts == AV_SAMPLE_FMT_FLTP) {
++ smpl_fmt = *smpl_fmts;
++ }
++
++ smpl_fmts++;
++ }
++ } else {
++ smpl_fmt = AV_SAMPLE_FMT_FLT;
++ }
++ break;
++ case GST_AUDIO_FORMAT_F64:
++ if (smpl_fmts) {
++ while (*smpl_fmts != -1) {
++ if (*smpl_fmts == AV_SAMPLE_FMT_DBL) {
++ smpl_fmt = *smpl_fmts;
++ break;
++ } else if (*smpl_fmts == AV_SAMPLE_FMT_DBLP) {
++ smpl_fmt = *smpl_fmts;
++ }
++
++ smpl_fmts++;
++ }
++ } else {
++ smpl_fmt = AV_SAMPLE_FMT_DBL;
++ }
++ break;
++ case GST_AUDIO_FORMAT_S32:
++ if (smpl_fmts) {
++ while (*smpl_fmts != -1) {
++ if (*smpl_fmts == AV_SAMPLE_FMT_S32) {
++ smpl_fmt = *smpl_fmts;
++ break;
++ } else if (*smpl_fmts == AV_SAMPLE_FMT_S32P) {
++ smpl_fmt = *smpl_fmts;
++ }
++
++ smpl_fmts++;
++ }
++ } else {
++ smpl_fmt = AV_SAMPLE_FMT_S32;
++ }
++ break;
++ case GST_AUDIO_FORMAT_S16:
++ if (smpl_fmts) {
++ while (*smpl_fmts != -1) {
++ if (*smpl_fmts == AV_SAMPLE_FMT_S16) {
++ smpl_fmt = *smpl_fmts;
++ break;
++ } else if (*smpl_fmts == AV_SAMPLE_FMT_S16P) {
++ smpl_fmt = *smpl_fmts;
++ }
++
++ smpl_fmts++;
++ }
++ } else {
++ smpl_fmt = AV_SAMPLE_FMT_S16;
++ }
++ break;
++ case GST_AUDIO_FORMAT_U8:
++ if (smpl_fmts) {
++ while (*smpl_fmts != -1) {
++ if (*smpl_fmts == AV_SAMPLE_FMT_U8) {
++ smpl_fmt = *smpl_fmts;
++ break;
++ } else if (*smpl_fmts == AV_SAMPLE_FMT_U8P) {
++ smpl_fmt = *smpl_fmts;
++ }
++
++ smpl_fmts++;
++ }
++ } else {
++ smpl_fmt = AV_SAMPLE_FMT_U8;
++ }
++ break;
++ default:
++ break;
++ }
++
++ g_assert (smpl_fmt != -1);
++
++ context->sample_fmt = smpl_fmt;
++}
++
+ /* Convert a GstCaps and a FFMPEG codec Type to a
+ * AVCodecContext. If the context is ommitted, no fixed values
+ * for video/audio size will be included in the context
+diff --git a/ext/libav/gstavcodecmap.h b/ext/libav/gstavcodecmap.h
+index 52e5bec..01ce9b1 100644
+--- a/ext/libav/gstavcodecmap.h
++++ b/ext/libav/gstavcodecmap.h
+@@ -13,8 +13,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifndef __GST_FFMPEG_CODECMAP_H__
+@@ -41,11 +41,6 @@ gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
+ */
+
+ GstCaps *
+-gst_ffmpeg_codectype_to_caps (enum AVMediaType codec_type,
+- AVCodecContext *context,
+- enum CodecID codec_id,
+- gboolean encode);
+-GstCaps *
+ gst_ffmpeg_codectype_to_audio_caps (AVCodecContext *context,
+ enum CodecID codec_id,
+ gboolean encode,
+@@ -91,9 +86,15 @@ void
+ gst_ffmpeg_videoinfo_to_context (GstVideoInfo *info,
+ AVCodecContext *context);
+
++void
++gst_ffmpeg_audioinfo_to_context (GstAudioInfo *info,
++ AVCodecContext *context);
++
+ GstVideoFormat gst_ffmpeg_pixfmt_to_videoformat (enum PixelFormat pixfmt);
+ enum PixelFormat gst_ffmpeg_videoformat_to_pixfmt (GstVideoFormat format);
+
++GstAudioFormat gst_ffmpeg_smpfmt_to_audioformat (enum AVSampleFormat sample_fmt);
++
+ /*
+ * _formatid_to_caps () is meant for muxers/demuxers, it
+ * transforms a name (ffmpeg way of ID'ing these, why don't
+@@ -104,19 +105,6 @@ enum PixelFormat gst_ffmpeg_videoformat_to_pixfmt (GstVideoFormat format);
+ GstCaps *
+ gst_ffmpeg_formatid_to_caps (const gchar *format_name);
+
+-GstVideoFormat
+-gst_ffmpeg_pixfmt_to_video_format (enum PixelFormat pix_fmt);
+-
+-/* Convert a FFMPEG Pixel Format and optional AVCodecContext
+- * to a GstCaps. If the context is ommitted, no fixed values
+- * for video/audio size will be included in the GstCaps
+- *
+- * See below for usefullness
+- */
+-
+-GstCaps *
+-gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt, AVCodecContext * context, enum CodecID codec_id);
+-
+ /*
+ * _formatid_get_codecids () can be used to get the codecIDs
+ * (CODEC_ID_NONE-terminated list) that fit that specific
+@@ -131,7 +119,7 @@ gst_ffmpeg_formatid_get_codecids (const gchar *format_name,
+
+
+ gboolean
+-gst_ffmpeg_channel_layout_to_gst (AVCodecContext * context,
++gst_ffmpeg_channel_layout_to_gst (guint64 channel_layout, gint channels,
+ GstAudioChannelPosition * pos);
+
+ #endif /* __GST_FFMPEG_CODECMAP_H__ */
+diff --git a/ext/libav/gstavdec.c b/ext/libav/gstavdec.c
+deleted file mode 100644
+index 8291099..0000000
+--- a/ext/libav/gstavdec.c
++++ /dev/null
+@@ -1,1457 +0,0 @@
+-/* GStreamer
+- * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
+- *
+- * This library is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU Library General Public
+- * License as published by the Free Software Foundation; either
+- * version 2 of the License, or (at your option) any later version.
+- *
+- * This library is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * Library General Public License for more details.
+- *
+- * You should have received a copy of the GNU Library General Public
+- * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
+- */
+-
+-#ifdef HAVE_CONFIG_H
+-#include "config.h"
+-#endif
+-
+-#include <assert.h>
+-#include <string.h>
+-
+-#include <libavcodec/avcodec.h>
+-
+-#include <gst/gst.h>
+-
+-#include "gstav.h"
+-#include "gstavcodecmap.h"
+-#include "gstavutils.h"
+-
+-GST_DEBUG_CATEGORY_EXTERN (GST_CAT_PERFORMANCE);
+-
+-typedef struct _GstFFMpegAudDec GstFFMpegAudDec;
+-
+-#define MAX_TS_MASK 0xff
+-
+-/* for each incomming buffer we keep all timing info in a structure like this.
+- * We keep a circular array of these structures around to store the timing info.
+- * The index in the array is what we pass as opaque data (to pictures) and
+- * pts (to parsers) so that ffmpeg can remember them for us. */
+-typedef struct
+-{
+- gint idx;
+- GstClockTime dts;
+- GstClockTime pts;
+- GstClockTime duration;
+- gint64 offset;
+-} GstTSInfo;
+-
+-struct _GstFFMpegAudDec
+-{
+- GstElement element;
+-
+- /* We need to keep track of our pads, so we do so here. */
+- GstPad *srcpad;
+- GstPad *sinkpad;
+-
+- /* decoding */
+- AVCodecContext *context;
+- gboolean opened;
+-
+- /* current output format */
+- gint channels, samplerate, depth;
+- GstAudioChannelPosition ffmpeg_layout[64], gst_layout[64];
+-
+- gboolean discont;
+- gboolean clear_ts;
+-
+- /* for tracking DTS/PTS */
+- GstClockTime next_out;
+-
+- /* parsing */
+- gboolean turnoff_parser; /* used for turning off aac raw parsing
+- * See bug #566250 */
+- AVCodecParserContext *pctx;
+- GstBuffer *pcache;
+-
+- /* clipping segment */
+- GstSegment segment;
+-
+- GstTSInfo ts_info[MAX_TS_MASK + 1];
+- gint ts_idx;
+-
+- /* reverse playback queue */
+- GList *queued;
+-
+- /* prevent reopening the decoder on GST_EVENT_CAPS when caps are same as last time. */
+- GstCaps *last_caps;
+-};
+-
+-typedef struct _GstFFMpegAudDecClass GstFFMpegAudDecClass;
+-
+-struct _GstFFMpegAudDecClass
+-{
+- GstElementClass parent_class;
+-
+- AVCodec *in_plugin;
+- GstPadTemplate *srctempl, *sinktempl;
+-};
+-
+-#define GST_TS_INFO_NONE &ts_info_none
+-static const GstTSInfo ts_info_none = { -1, -1, -1, -1 };
+-
+-static const GstTSInfo *
+-gst_ts_info_store (GstFFMpegAudDec * dec, GstClockTime dts, GstClockTime pts,
+- GstClockTime duration, gint64 offset)
+-{
+- gint idx = dec->ts_idx;
+- dec->ts_info[idx].idx = idx;
+- dec->ts_info[idx].dts = dts;
+- dec->ts_info[idx].pts = pts;
+- dec->ts_info[idx].duration = duration;
+- dec->ts_info[idx].offset = offset;
+- dec->ts_idx = (idx + 1) & MAX_TS_MASK;
+-
+- return &dec->ts_info[idx];
+-}
+-
+-static const GstTSInfo *
+-gst_ts_info_get (GstFFMpegAudDec * dec, gint idx)
+-{
+- if (G_UNLIKELY (idx < 0 || idx > MAX_TS_MASK))
+- return GST_TS_INFO_NONE;
+-
+- return &dec->ts_info[idx];
+-}
+-
+-#define GST_TYPE_FFMPEGDEC \
+- (gst_ffmpegauddec_get_type())
+-#define GST_FFMPEGDEC(obj) \
+- (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGDEC,GstFFMpegAudDec))
+-#define GST_FFMPEGAUDDEC_CLASS(klass) \
+- (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGDEC,GstFFMpegAudDecClass))
+-#define GST_IS_FFMPEGDEC(obj) \
+- (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGDEC))
+-#define GST_IS_FFMPEGAUDDEC_CLASS(klass) \
+- (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGDEC))
+-
+-/* A number of function prototypes are given so we can refer to them later. */
+-static void gst_ffmpegauddec_base_init (GstFFMpegAudDecClass * klass);
+-static void gst_ffmpegauddec_class_init (GstFFMpegAudDecClass * klass);
+-static void gst_ffmpegauddec_init (GstFFMpegAudDec * ffmpegdec);
+-static void gst_ffmpegauddec_finalize (GObject * object);
+-
+-static gboolean gst_ffmpegauddec_setcaps (GstFFMpegAudDec * ffmpegdec,
+- GstCaps * caps);
+-static gboolean gst_ffmpegauddec_sink_event (GstPad * pad, GstObject * parent,
+- GstEvent * event);
+-static gboolean gst_ffmpegauddec_sink_query (GstPad * pad, GstObject * parent,
+- GstQuery * query);
+-static GstFlowReturn gst_ffmpegauddec_chain (GstPad * pad, GstObject * parent,
+- GstBuffer * buf);
+-
+-static GstStateChangeReturn gst_ffmpegauddec_change_state (GstElement * element,
+- GstStateChange transition);
+-
+-static gboolean gst_ffmpegauddec_negotiate (GstFFMpegAudDec * ffmpegdec,
+- gboolean force);
+-
+-static void gst_ffmpegauddec_drain (GstFFMpegAudDec * ffmpegdec);
+-
+-#define GST_FFDEC_PARAMS_QDATA g_quark_from_static_string("avdec-params")
+-
+-static GstElementClass *parent_class = NULL;
+-
+-static void
+-gst_ffmpegauddec_base_init (GstFFMpegAudDecClass * klass)
+-{
+- GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
+- GstPadTemplate *sinktempl, *srctempl;
+- GstCaps *sinkcaps, *srccaps;
+- AVCodec *in_plugin;
+- gchar *longname, *description;
+-
+- in_plugin =
+- (AVCodec *) g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass),
+- GST_FFDEC_PARAMS_QDATA);
+- g_assert (in_plugin != NULL);
+-
+- /* construct the element details struct */
+- longname = g_strdup_printf ("libav %s decoder", in_plugin->long_name);
+- description = g_strdup_printf ("libav %s decoder", in_plugin->name);
+- gst_element_class_set_metadata (element_class, longname,
+- "Codec/Decoder/Audio", description,
+- "Wim Taymans <wim.taymans@gmail.com>, "
+- "Ronald Bultje <rbultje@ronald.bitfreak.net>, "
+- "Edward Hervey <bilboed@bilboed.com>");
+- g_free (longname);
+- g_free (description);
+-
+- /* get the caps */
+- sinkcaps = gst_ffmpeg_codecid_to_caps (in_plugin->id, NULL, FALSE);
+- if (!sinkcaps) {
+- GST_DEBUG ("Couldn't get sink caps for decoder '%s'", in_plugin->name);
+- sinkcaps = gst_caps_from_string ("unknown/unknown");
+- }
+- srccaps = gst_ffmpeg_codectype_to_audio_caps (NULL,
+- in_plugin->id, FALSE, in_plugin);
+- if (!srccaps) {
+- GST_DEBUG ("Couldn't get source caps for decoder '%s'", in_plugin->name);
+- srccaps = gst_caps_from_string ("unknown/unknown");
+- }
+-
+- /* pad templates */
+- sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK,
+- GST_PAD_ALWAYS, sinkcaps);
+- srctempl = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, srccaps);
+-
+- gst_element_class_add_pad_template (element_class, srctempl);
+- gst_element_class_add_pad_template (element_class, sinktempl);
+-
+- klass->in_plugin = in_plugin;
+- klass->srctempl = srctempl;
+- klass->sinktempl = sinktempl;
+-}
+-
+-static void
+-gst_ffmpegauddec_class_init (GstFFMpegAudDecClass * klass)
+-{
+- GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
+- GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
+-
+- parent_class = g_type_class_peek_parent (klass);
+-
+- gobject_class->finalize = gst_ffmpegauddec_finalize;
+-
+- gstelement_class->change_state = gst_ffmpegauddec_change_state;
+-}
+-
+-static void
+-gst_ffmpegauddec_init (GstFFMpegAudDec * ffmpegdec)
+-{
+- GstFFMpegAudDecClass *oclass;
+-
+- oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
+-
+- /* setup pads */
+- ffmpegdec->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
+- gst_pad_set_query_function (ffmpegdec->sinkpad,
+- GST_DEBUG_FUNCPTR (gst_ffmpegauddec_sink_query));
+- gst_pad_set_event_function (ffmpegdec->sinkpad,
+- GST_DEBUG_FUNCPTR (gst_ffmpegauddec_sink_event));
+- gst_pad_set_chain_function (ffmpegdec->sinkpad,
+- GST_DEBUG_FUNCPTR (gst_ffmpegauddec_chain));
+- gst_element_add_pad (GST_ELEMENT (ffmpegdec), ffmpegdec->sinkpad);
+-
+- ffmpegdec->srcpad = gst_pad_new_from_template (oclass->srctempl, "src");
+- gst_pad_use_fixed_caps (ffmpegdec->srcpad);
+- gst_element_add_pad (GST_ELEMENT (ffmpegdec), ffmpegdec->srcpad);
+-
+- /* some ffmpeg data */
+- ffmpegdec->context = avcodec_alloc_context ();
+- ffmpegdec->pctx = NULL;
+- ffmpegdec->pcache = NULL;
+- ffmpegdec->opened = FALSE;
+-
+- gst_segment_init (&ffmpegdec->segment, GST_FORMAT_TIME);
+-}
+-
+-static void
+-gst_ffmpegauddec_finalize (GObject * object)
+-{
+- GstFFMpegAudDec *ffmpegdec = (GstFFMpegAudDec *) object;
+-
+- if (ffmpegdec->context != NULL)
+- av_free (ffmpegdec->context);
+-
+- G_OBJECT_CLASS (parent_class)->finalize (object);
+-}
+-
+-static void
+-gst_ffmpegauddec_reset_ts (GstFFMpegAudDec * ffmpegdec)
+-{
+- ffmpegdec->next_out = GST_CLOCK_TIME_NONE;
+-}
+-
+-/* with LOCK */
+-static void
+-gst_ffmpegauddec_close (GstFFMpegAudDec * ffmpegdec)
+-{
+- if (!ffmpegdec->opened)
+- return;
+-
+- GST_LOG_OBJECT (ffmpegdec, "closing libav codec");
+-
+- gst_caps_replace (&ffmpegdec->last_caps, NULL);
+-
+- if (ffmpegdec->context->priv_data)
+- gst_ffmpeg_avcodec_close (ffmpegdec->context);
+- ffmpegdec->opened = FALSE;
+-
+- if (ffmpegdec->context->palctrl) {
+- av_free (ffmpegdec->context->palctrl);
+- ffmpegdec->context->palctrl = NULL;
+- }
+-
+- if (ffmpegdec->context->extradata) {
+- av_free (ffmpegdec->context->extradata);
+- ffmpegdec->context->extradata = NULL;
+- }
+-
+- if (ffmpegdec->pctx) {
+- if (ffmpegdec->pcache) {
+- gst_buffer_unref (ffmpegdec->pcache);
+- ffmpegdec->pcache = NULL;
+- }
+- av_parser_close (ffmpegdec->pctx);
+- ffmpegdec->pctx = NULL;
+- }
+-}
+-
+-/* with LOCK */
+-static gboolean
+-gst_ffmpegauddec_open (GstFFMpegAudDec * ffmpegdec)
+-{
+- GstFFMpegAudDecClass *oclass;
+-
+- oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
+-
+- if (gst_ffmpeg_avcodec_open (ffmpegdec->context, oclass->in_plugin) < 0)
+- goto could_not_open;
+-
+- ffmpegdec->opened = TRUE;
+-
+- GST_LOG_OBJECT (ffmpegdec, "Opened libav codec %s, id %d",
+- oclass->in_plugin->name, oclass->in_plugin->id);
+-
+- if (!ffmpegdec->turnoff_parser) {
+- ffmpegdec->pctx = av_parser_init (oclass->in_plugin->id);
+- if (ffmpegdec->pctx)
+- GST_LOG_OBJECT (ffmpegdec, "Using parser %p", ffmpegdec->pctx);
+- else
+- GST_LOG_OBJECT (ffmpegdec, "No parser for codec");
+- } else {
+- GST_LOG_OBJECT (ffmpegdec, "Parser deactivated for format");
+- }
+-
+- ffmpegdec->samplerate = 0;
+- ffmpegdec->channels = 0;
+- ffmpegdec->depth = 0;
+-
+- gst_ffmpegauddec_reset_ts (ffmpegdec);
+-
+- return TRUE;
+-
+- /* ERRORS */
+-could_not_open:
+- {
+- gst_ffmpegauddec_close (ffmpegdec);
+- GST_DEBUG_OBJECT (ffmpegdec, "avdec_%s: Failed to open libav codec",
+- oclass->in_plugin->name);
+- return FALSE;
+- }
+-}
+-
+-static gboolean
+-gst_ffmpegauddec_setcaps (GstFFMpegAudDec * ffmpegdec, GstCaps * caps)
+-{
+- GstFFMpegAudDecClass *oclass;
+- GstStructure *structure;
+- gboolean ret = TRUE;
+-
+- oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
+-
+- GST_DEBUG_OBJECT (ffmpegdec, "setcaps called");
+-
+- GST_OBJECT_LOCK (ffmpegdec);
+-
+- /* close old session */
+- if (ffmpegdec->opened) {
+- GST_OBJECT_UNLOCK (ffmpegdec);
+- gst_ffmpegauddec_drain (ffmpegdec);
+- GST_OBJECT_LOCK (ffmpegdec);
+- gst_ffmpegauddec_close (ffmpegdec);
+-
+- /* and reset the defaults that were set when a context is created */
+- avcodec_get_context_defaults (ffmpegdec->context);
+- }
+-
+- /* default is to let format decide if it needs a parser */
+- ffmpegdec->turnoff_parser = FALSE;
+-
+- /* get size and so */
+- gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
+- oclass->in_plugin->type, caps, ffmpegdec->context);
+-
+- /* get pixel aspect ratio if it's set */
+- structure = gst_caps_get_structure (caps, 0);
+-
+- /* for AAC we only use av_parse if not on stream-format==raw or ==loas */
+- if (oclass->in_plugin->id == CODEC_ID_AAC
+- || oclass->in_plugin->id == CODEC_ID_AAC_LATM) {
+- const gchar *format = gst_structure_get_string (structure, "stream-format");
+-
+- if (format == NULL || strcmp (format, "raw") == 0) {
+- ffmpegdec->turnoff_parser = TRUE;
+- }
+- }
+-
+- /* for FLAC, don't parse if it's already parsed */
+- if (oclass->in_plugin->id == CODEC_ID_FLAC) {
+- if (gst_structure_has_field (structure, "streamheader"))
+- ffmpegdec->turnoff_parser = TRUE;
+- }
+-
+- /* workaround encoder bugs */
+- ffmpegdec->context->workaround_bugs |= FF_BUG_AUTODETECT;
+- ffmpegdec->context->error_recognition = 1;
+-
+- /* open codec - we don't select an output pix_fmt yet,
+- * simply because we don't know! We only get it
+- * during playback... */
+- if (!gst_ffmpegauddec_open (ffmpegdec))
+- goto open_failed;
+-
+-done:
+- GST_OBJECT_UNLOCK (ffmpegdec);
+-
+- return ret;
+-
+- /* ERRORS */
+-open_failed:
+- {
+- GST_DEBUG_OBJECT (ffmpegdec, "Failed to open");
+- ret = FALSE;
+- goto done;
+- }
+-}
+-
+-static gboolean
+-gst_ffmpegauddec_negotiate (GstFFMpegAudDec * ffmpegdec, gboolean force)
+-{
+- GstFFMpegAudDecClass *oclass;
+- GstCaps *caps;
+- gint depth;
+- GstAudioChannelPosition pos[64] = { 0, };
+-
+- oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
+-
+- depth = av_smp_format_depth (ffmpegdec->context->sample_fmt);
+- gst_ffmpeg_channel_layout_to_gst (ffmpegdec->context, pos);
+-
+- if (!force && ffmpegdec->samplerate ==
+- ffmpegdec->context->sample_rate &&
+- ffmpegdec->channels == ffmpegdec->context->channels &&
+- ffmpegdec->depth == depth)
+- return TRUE;
+-
+- GST_DEBUG_OBJECT (ffmpegdec,
+- "Renegotiating audio from %dHz@%dchannels (%d) to %dHz@%dchannels (%d)",
+- ffmpegdec->samplerate, ffmpegdec->channels,
+- ffmpegdec->depth,
+- ffmpegdec->context->sample_rate, ffmpegdec->context->channels, depth);
+-
+- ffmpegdec->samplerate = ffmpegdec->context->sample_rate;
+- ffmpegdec->channels = ffmpegdec->context->channels;
+- ffmpegdec->depth = depth;
+- memcpy (ffmpegdec->ffmpeg_layout, pos,
+- sizeof (GstAudioChannelPosition) * ffmpegdec->context->channels);
+-
+- /* Get GStreamer channel layout */
+- memcpy (ffmpegdec->gst_layout,
+- ffmpegdec->ffmpeg_layout,
+- sizeof (GstAudioChannelPosition) * ffmpegdec->channels);
+- gst_audio_channel_positions_to_valid_order (ffmpegdec->gst_layout,
+- ffmpegdec->channels);
+-
+- caps = gst_ffmpeg_codectype_to_caps (oclass->in_plugin->type,
+- ffmpegdec->context, oclass->in_plugin->id, FALSE);
+-
+- if (caps == NULL)
+- goto no_caps;
+-
+- GST_LOG_OBJECT (ffmpegdec, "output caps %" GST_PTR_FORMAT, caps);
+-
+- if (!gst_pad_set_caps (ffmpegdec->srcpad, caps))
+- goto caps_failed;
+-
+- gst_caps_unref (caps);
+-
+- return TRUE;
+-
+- /* ERRORS */
+-no_caps:
+- {
+-#ifdef HAVE_LIBAV_UNINSTALLED
+- /* using internal ffmpeg snapshot */
+- GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
+- ("Could not find GStreamer caps mapping for libav codec '%s'.",
+- oclass->in_plugin->name), (NULL));
+-#else
+- /* using external ffmpeg */
+- GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
+- ("Could not find GStreamer caps mapping for libav codec '%s', and "
+- "you are using an external libavcodec. This is most likely due to "
+- "a packaging problem and/or libavcodec having been upgraded to a "
+- "version that is not compatible with this version of "
+- "gstreamer-libav. Make sure your gstreamer-libav and libavcodec "
+- "packages come from the same source/repository.",
+- oclass->in_plugin->name), (NULL));
+-#endif
+- return FALSE;
+- }
+-caps_failed:
+- {
+- GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
+- ("Could not set caps for libav decoder (%s), not fixed?",
+- oclass->in_plugin->name));
+- gst_caps_unref (caps);
+-
+- return FALSE;
+- }
+-}
+-
+-static void
+-clear_queued (GstFFMpegAudDec * ffmpegdec)
+-{
+- g_list_foreach (ffmpegdec->queued, (GFunc) gst_mini_object_unref, NULL);
+- g_list_free (ffmpegdec->queued);
+- ffmpegdec->queued = NULL;
+-}
+-
+-static GstFlowReturn
+-flush_queued (GstFFMpegAudDec * ffmpegdec)
+-{
+- GstFlowReturn res = GST_FLOW_OK;
+-
+- while (ffmpegdec->queued) {
+- GstBuffer *buf = GST_BUFFER_CAST (ffmpegdec->queued->data);
+-
+- GST_LOG_OBJECT (ffmpegdec, "pushing buffer %p, offset %"
+- G_GUINT64_FORMAT ", timestamp %"
+- GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT, buf,
+- GST_BUFFER_OFFSET (buf),
+- GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
+- GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
+-
+- /* iterate ouput queue an push downstream */
+- res = gst_pad_push (ffmpegdec->srcpad, buf);
+-
+- ffmpegdec->queued =
+- g_list_delete_link (ffmpegdec->queued, ffmpegdec->queued);
+- }
+- return res;
+-}
+-
+-static void
+-gst_avpacket_init (AVPacket * packet, guint8 * data, guint size)
+-{
+- memset (packet, 0, sizeof (AVPacket));
+- packet->data = data;
+- packet->size = size;
+-}
+-
+-/* returns TRUE if buffer is within segment, else FALSE.
+- * if Buffer is on segment border, it's timestamp and duration will be clipped */
+-static gboolean
+-clip_audio_buffer (GstFFMpegAudDec * dec, GstBuffer * buf, GstClockTime in_ts,
+- GstClockTime in_dur)
+-{
+- GstClockTime stop;
+- gint64 diff;
+- guint64 ctime, cstop;
+- gboolean res = TRUE;
+- gsize size, offset;
+-
+- size = gst_buffer_get_size (buf);
+- offset = 0;
+-
+- GST_LOG_OBJECT (dec,
+- "timestamp:%" GST_TIME_FORMAT ", duration:%" GST_TIME_FORMAT
+- ", size %" G_GSIZE_FORMAT, GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur),
+- size);
+-
+- /* can't clip without TIME segment */
+- if (G_UNLIKELY (dec->segment.format != GST_FORMAT_TIME))
+- goto beach;
+-
+- /* we need a start time */
+- if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (in_ts)))
+- goto beach;
+-
+- /* trust duration */
+- stop = in_ts + in_dur;
+-
+- res = gst_segment_clip (&dec->segment, GST_FORMAT_TIME, in_ts, stop, &ctime,
+- &cstop);
+- if (G_UNLIKELY (!res))
+- goto out_of_segment;
+-
+- /* see if some clipping happened */
+- if (G_UNLIKELY ((diff = ctime - in_ts) > 0)) {
+- /* bring clipped time to bytes */
+- diff =
+- gst_util_uint64_scale_int (diff, dec->samplerate,
+- GST_SECOND) * (dec->depth * dec->channels);
+-
+- GST_DEBUG_OBJECT (dec, "clipping start to %" GST_TIME_FORMAT " %"
+- G_GINT64_FORMAT " bytes", GST_TIME_ARGS (ctime), diff);
+-
+- offset += diff;
+- size -= diff;
+- }
+- if (G_UNLIKELY ((diff = stop - cstop) > 0)) {
+- /* bring clipped time to bytes */
+- diff =
+- gst_util_uint64_scale_int (diff, dec->samplerate,
+- GST_SECOND) * (dec->depth * dec->channels);
+-
+- GST_DEBUG_OBJECT (dec, "clipping stop to %" GST_TIME_FORMAT " %"
+- G_GINT64_FORMAT " bytes", GST_TIME_ARGS (cstop), diff);
+-
+- size -= diff;
+- }
+- gst_buffer_resize (buf, offset, size);
+- GST_BUFFER_TIMESTAMP (buf) = ctime;
+- GST_BUFFER_DURATION (buf) = cstop - ctime;
+-
+-beach:
+- GST_LOG_OBJECT (dec, "%sdropping", (res ? "not " : ""));
+- return res;
+-
+- /* ERRORS */
+-out_of_segment:
+- {
+- GST_LOG_OBJECT (dec, "out of segment");
+- goto beach;
+- }
+-}
+-
+-static gint
+-gst_ffmpegauddec_audio_frame (GstFFMpegAudDec * ffmpegdec,
+- AVCodec * in_plugin, guint8 * data, guint size,
+- const GstTSInfo * dec_info, GstBuffer ** outbuf, GstFlowReturn * ret)
+-{
+- gint len = -1;
+- gint have_data = AVCODEC_MAX_AUDIO_FRAME_SIZE;
+- GstClockTime out_pts, out_duration;
+- GstMapInfo map;
+- gint64 out_offset;
+- int16_t *odata;
+- AVPacket packet;
+-
+- GST_DEBUG_OBJECT (ffmpegdec,
+- "size:%d, offset:%" G_GINT64_FORMAT ", dts:%" GST_TIME_FORMAT ", pts:%"
+- GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT ", ffmpegdec->next_out:%"
+- GST_TIME_FORMAT, size, dec_info->offset, GST_TIME_ARGS (dec_info->dts),
+- GST_TIME_ARGS (dec_info->pts), GST_TIME_ARGS (dec_info->duration),
+- GST_TIME_ARGS (ffmpegdec->next_out));
+-
+- *outbuf = new_aligned_buffer (AVCODEC_MAX_AUDIO_FRAME_SIZE);
+-
+- gst_buffer_map (*outbuf, &map, GST_MAP_WRITE);
+- odata = (int16_t *) map.data;
+-
+- gst_avpacket_init (&packet, data, size);
+- len = avcodec_decode_audio3 (ffmpegdec->context, odata, &have_data, &packet);
+-
+- GST_DEBUG_OBJECT (ffmpegdec,
+- "Decode audio: len=%d, have_data=%d", len, have_data);
+-
+- if (len >= 0 && have_data > 0) {
+- GstAudioFormat fmt;
+-
+- /* Buffer size */
+- gst_buffer_unmap (*outbuf, &map);
+- gst_buffer_resize (*outbuf, 0, have_data);
+-
+- GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer");
+- if (!gst_ffmpegauddec_negotiate (ffmpegdec, FALSE)) {
+- gst_buffer_unref (*outbuf);
+- *outbuf = NULL;
+- len = -1;
+- goto beach;
+- }
+-
+- /*
+- * Timestamps:
+- *
+- * 1) Copy input timestamp if valid
+- * 2) else interpolate from previous input timestamp
+- */
+- /* always take timestamps from the input buffer if any */
+- if (GST_CLOCK_TIME_IS_VALID (dec_info->pts)) {
+- out_pts = dec_info->pts;
+- } else {
+- out_pts = ffmpegdec->next_out;
+- }
+-
+- /*
+- * Duration:
+- *
+- * 1) calculate based on number of samples
+- */
+- out_duration = gst_util_uint64_scale (have_data, GST_SECOND,
+- ffmpegdec->depth * ffmpegdec->channels * ffmpegdec->samplerate);
+-
+- /* offset:
+- *
+- * Just copy
+- */
+- out_offset = dec_info->offset;
+-
+- GST_DEBUG_OBJECT (ffmpegdec,
+- "Buffer created. Size:%d , pts:%" GST_TIME_FORMAT " , duration:%"
+- GST_TIME_FORMAT, have_data,
+- GST_TIME_ARGS (out_pts), GST_TIME_ARGS (out_duration));
+-
+- GST_BUFFER_PTS (*outbuf) = out_pts;
+- GST_BUFFER_DURATION (*outbuf) = out_duration;
+- GST_BUFFER_OFFSET (*outbuf) = out_offset;
+-
+- /* the next timestamp we'll use when interpolating */
+- if (GST_CLOCK_TIME_IS_VALID (out_pts))
+- ffmpegdec->next_out = out_pts + out_duration;
+-
+- /* now see if we need to clip the buffer against the segment boundaries. */
+- if (G_UNLIKELY (!clip_audio_buffer (ffmpegdec, *outbuf, out_pts,
+- out_duration)))
+- goto clipped;
+-
+-
+- /* Reorder channels to the GStreamer channel order */
+- /* Only the width really matters here... and it's stored as depth */
+- fmt =
+- gst_audio_format_build_integer (TRUE, G_BYTE_ORDER,
+- ffmpegdec->depth * 8, ffmpegdec->depth * 8);
+-
+- gst_audio_buffer_reorder_channels (*outbuf, fmt,
+- ffmpegdec->channels, ffmpegdec->ffmpeg_layout, ffmpegdec->gst_layout);
+- } else {
+- gst_buffer_unmap (*outbuf, &map);
+- gst_buffer_unref (*outbuf);
+- *outbuf = NULL;
+- }
+-
+-beach:
+- GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d",
+- *ret, *outbuf, len);
+- return len;
+-
+- /* ERRORS */
+-clipped:
+- {
+- GST_DEBUG_OBJECT (ffmpegdec, "buffer clipped");
+- gst_buffer_unref (*outbuf);
+- *outbuf = NULL;
+- goto beach;
+- }
+-}
+-
+-/* gst_ffmpegauddec_frame:
+- * ffmpegdec:
+- * data: pointer to the data to decode
+- * size: size of data in bytes
+- * got_data: 0 if no data was decoded, != 0 otherwise.
+- * in_time: timestamp of data
+- * in_duration: duration of data
+- * ret: GstFlowReturn to return in the chain function
+- *
+- * Decode the given frame and pushes it downstream.
+- *
+- * Returns: Number of bytes used in decoding, -1 on error/failure.
+- */
+-
+-static gint
+-gst_ffmpegauddec_frame (GstFFMpegAudDec * ffmpegdec,
+- guint8 * data, guint size, gint * got_data, const GstTSInfo * dec_info,
+- GstFlowReturn * ret)
+-{
+- GstFFMpegAudDecClass *oclass;
+- GstBuffer *outbuf = NULL;
+- gint have_data = 0, len = 0;
+-
+- if (G_UNLIKELY (ffmpegdec->context->codec == NULL))
+- goto no_codec;
+-
+- GST_LOG_OBJECT (ffmpegdec, "data:%p, size:%d, id:%d", data, size,
+- dec_info->idx);
+-
+- *ret = GST_FLOW_OK;
+- ffmpegdec->context->frame_number++;
+-
+- oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
+-
+- len =
+- gst_ffmpegauddec_audio_frame (ffmpegdec, oclass->in_plugin, data, size,
+- dec_info, &outbuf, ret);
+-
+- /* if we did not get an output buffer and we have a pending discont, don't
+- * clear the input timestamps, we will put them on the next buffer because
+- * else we might create the first buffer with a very big timestamp gap. */
+- if (outbuf == NULL && ffmpegdec->discont) {
+- GST_DEBUG_OBJECT (ffmpegdec, "no buffer but keeping timestamp");
+- ffmpegdec->clear_ts = FALSE;
+- }
+-
+- if (outbuf)
+- have_data = 1;
+-
+- if (len < 0 || have_data < 0) {
+- GST_WARNING_OBJECT (ffmpegdec,
+- "avdec_%s: decoding error (len: %d, have_data: %d)",
+- oclass->in_plugin->name, len, have_data);
+- *got_data = 0;
+- goto beach;
+- } else if (len == 0 && have_data == 0) {
+- *got_data = 0;
+- goto beach;
+- } else {
+- /* this is where I lost my last clue on ffmpeg... */
+- *got_data = 1;
+- }
+-
+- if (outbuf) {
+- GST_LOG_OBJECT (ffmpegdec,
+- "Decoded data, now pushing buffer %p with offset %" G_GINT64_FORMAT
+- ", timestamp %" GST_TIME_FORMAT " and duration %" GST_TIME_FORMAT,
+- outbuf, GST_BUFFER_OFFSET (outbuf),
+- GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)),
+- GST_TIME_ARGS (GST_BUFFER_DURATION (outbuf)));
+-
+- /* mark pending discont */
+- if (ffmpegdec->discont) {
+- GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
+- ffmpegdec->discont = FALSE;
+- }
+- if (ffmpegdec->segment.rate > 0.0) {
+- /* and off we go */
+- *ret = gst_pad_push (ffmpegdec->srcpad, outbuf);
+- } else {
+- /* reverse playback, queue frame till later when we get a discont. */
+- GST_DEBUG_OBJECT (ffmpegdec, "queued frame");
+- ffmpegdec->queued = g_list_prepend (ffmpegdec->queued, outbuf);
+- *ret = GST_FLOW_OK;
+- }
+- } else {
+- GST_DEBUG_OBJECT (ffmpegdec, "We didn't get a decoded buffer");
+- }
+-
+-beach:
+- return len;
+-
+- /* ERRORS */
+-no_codec:
+- {
+- GST_ERROR_OBJECT (ffmpegdec, "no codec context");
+- return -1;
+- }
+-}
+-
+-static void
+-gst_ffmpegauddec_drain (GstFFMpegAudDec * ffmpegdec)
+-{
+- GstFFMpegAudDecClass *oclass;
+-
+- oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
+-
+- if (oclass->in_plugin->capabilities & CODEC_CAP_DELAY) {
+- gint have_data, len, try = 0;
+-
+- GST_LOG_OBJECT (ffmpegdec,
+- "codec has delay capabilities, calling until libav has drained everything");
+-
+- do {
+- GstFlowReturn ret;
+-
+- len =
+- gst_ffmpegauddec_frame (ffmpegdec, NULL, 0, &have_data, &ts_info_none,
+- &ret);
+- if (len < 0 || have_data == 0)
+- break;
+- } while (try++ < 10);
+- }
+- if (ffmpegdec->segment.rate < 0.0) {
+- /* if we have some queued frames for reverse playback, flush them now */
+- flush_queued (ffmpegdec);
+- }
+-}
+-
+-static void
+-gst_ffmpegauddec_flush_pcache (GstFFMpegAudDec * ffmpegdec)
+-{
+- if (ffmpegdec->pctx) {
+- gint size, bsize;
+- guint8 *data;
+- guint8 bdata[FF_INPUT_BUFFER_PADDING_SIZE];
+-
+- bsize = FF_INPUT_BUFFER_PADDING_SIZE;
+- memset (bdata, 0, bsize);
+-
+- /* parse some dummy data to work around some ffmpeg weirdness where it keeps
+- * the previous pts around */
+- av_parser_parse2 (ffmpegdec->pctx, ffmpegdec->context,
+- &data, &size, bdata, bsize, -1, -1, -1);
+- ffmpegdec->pctx->pts = -1;
+- ffmpegdec->pctx->dts = -1;
+- }
+-
+- if (ffmpegdec->pcache) {
+- gst_buffer_unref (ffmpegdec->pcache);
+- ffmpegdec->pcache = NULL;
+- }
+-}
+-
+-static gboolean
+-gst_ffmpegauddec_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
+-{
+- GstFFMpegAudDec *ffmpegdec;
+- gboolean ret = FALSE;
+-
+- ffmpegdec = (GstFFMpegAudDec *) parent;
+-
+- GST_DEBUG_OBJECT (ffmpegdec, "Handling %s event",
+- GST_EVENT_TYPE_NAME (event));
+-
+- switch (GST_EVENT_TYPE (event)) {
+- case GST_EVENT_EOS:
+- {
+- gst_ffmpegauddec_drain (ffmpegdec);
+- break;
+- }
+- case GST_EVENT_FLUSH_STOP:
+- {
+- if (ffmpegdec->opened) {
+- avcodec_flush_buffers (ffmpegdec->context);
+- }
+- gst_ffmpegauddec_reset_ts (ffmpegdec);
+- gst_ffmpegauddec_flush_pcache (ffmpegdec);
+- gst_segment_init (&ffmpegdec->segment, GST_FORMAT_TIME);
+- clear_queued (ffmpegdec);
+- break;
+- }
+- case GST_EVENT_CAPS:
+- {
+- GstCaps *caps;
+-
+- gst_event_parse_caps (event, &caps);
+-
+- if (!ffmpegdec->last_caps
+- || !gst_caps_is_equal (ffmpegdec->last_caps, caps)) {
+- ret = gst_ffmpegauddec_setcaps (ffmpegdec, caps);
+- if (ret) {
+- gst_caps_replace (&ffmpegdec->last_caps, caps);
+- }
+- } else {
+- ret = TRUE;
+- }
+-
+- gst_event_unref (event);
+- goto done;
+- }
+- case GST_EVENT_SEGMENT:
+- {
+- GstSegment segment;
+-
+- gst_event_copy_segment (event, &segment);
+-
+- switch (segment.format) {
+- case GST_FORMAT_TIME:
+- /* fine, our native segment format */
+- break;
+- case GST_FORMAT_BYTES:
+- {
+- gint bit_rate;
+-
+- bit_rate = ffmpegdec->context->bit_rate;
+-
+- /* convert to time or fail */
+- if (!bit_rate)
+- goto no_bitrate;
+-
+- GST_DEBUG_OBJECT (ffmpegdec, "bitrate: %d", bit_rate);
+-
+- /* convert values to TIME */
+- if (segment.start != -1)
+- segment.start =
+- gst_util_uint64_scale_int (segment.start, GST_SECOND, bit_rate);
+- if (segment.stop != -1)
+- segment.stop =
+- gst_util_uint64_scale_int (segment.stop, GST_SECOND, bit_rate);
+- if (segment.time != -1)
+- segment.time =
+- gst_util_uint64_scale_int (segment.time, GST_SECOND, bit_rate);
+-
+- /* unref old event */
+- gst_event_unref (event);
+-
+- /* create new converted time segment */
+- segment.format = GST_FORMAT_TIME;
+- /* FIXME, bitrate is not good enough too find a good stop, let's
+- * hope start and time were 0... meh. */
+- segment.stop = -1;
+- event = gst_event_new_segment (&segment);
+- break;
+- }
+- default:
+- /* invalid format */
+- goto invalid_format;
+- }
+-
+- GST_DEBUG_OBJECT (ffmpegdec, "SEGMENT in time %" GST_SEGMENT_FORMAT,
+- &segment);
+-
+- /* and store the values */
+- gst_segment_copy_into (&segment, &ffmpegdec->segment);
+- break;
+- }
+- default:
+- break;
+- }
+-
+- /* and push segment downstream */
+- ret = gst_pad_push_event (ffmpegdec->srcpad, event);
+-
+-done:
+-
+- return ret;
+-
+- /* ERRORS */
+-no_bitrate:
+- {
+- GST_WARNING_OBJECT (ffmpegdec, "no bitrate to convert BYTES to TIME");
+- gst_event_unref (event);
+- goto done;
+- }
+-invalid_format:
+- {
+- GST_WARNING_OBJECT (ffmpegdec, "unknown format received in NEWSEGMENT");
+- gst_event_unref (event);
+- goto done;
+- }
+-}
+-
+-static gboolean
+-gst_ffmpegauddec_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
+-{
+- GstFFMpegAudDec *ffmpegdec;
+- gboolean ret = FALSE;
+-
+- ffmpegdec = (GstFFMpegAudDec *) parent;
+-
+- GST_DEBUG_OBJECT (ffmpegdec, "Handling %s query",
+- GST_QUERY_TYPE_NAME (query));
+-
+- switch (GST_QUERY_TYPE (query)) {
+- case GST_QUERY_ACCEPT_CAPS:
+- {
+- GstPadTemplate *templ;
+-
+- ret = FALSE;
+- if ((templ = GST_PAD_PAD_TEMPLATE (pad))) {
+- GstCaps *tcaps;
+-
+- if ((tcaps = GST_PAD_TEMPLATE_CAPS (templ))) {
+- GstCaps *caps;
+-
+- gst_query_parse_accept_caps (query, &caps);
+- gst_query_set_accept_caps_result (query,
+- gst_caps_is_subset (caps, tcaps));
+- ret = TRUE;
+- }
+- }
+- break;
+- }
+- default:
+- ret = gst_pad_query_default (pad, parent, query);
+- break;
+- }
+- return ret;
+-}
+-
+-static GstFlowReturn
+-gst_ffmpegauddec_chain (GstPad * pad, GstObject * parent, GstBuffer * inbuf)
+-{
+- GstFFMpegAudDec *ffmpegdec;
+- GstFFMpegAudDecClass *oclass;
+- guint8 *data, *bdata;
+- GstMapInfo map;
+- gint size, bsize, len, have_data;
+- GstFlowReturn ret = GST_FLOW_OK;
+- GstClockTime in_pts, in_dts, in_duration;
+- gboolean discont;
+- gint64 in_offset;
+- const GstTSInfo *in_info;
+- const GstTSInfo *dec_info;
+-
+- ffmpegdec = (GstFFMpegAudDec *) parent;
+-
+- if (G_UNLIKELY (!ffmpegdec->opened))
+- goto not_negotiated;
+-
+- discont = GST_BUFFER_IS_DISCONT (inbuf);
+-
+- /* The discont flags marks a buffer that is not continuous with the previous
+- * buffer. This means we need to clear whatever data we currently have. We let
+- * ffmpeg continue with the data that it has. We currently drain the old
+- * frames that might be inside the decoder and we clear any partial data in
+- * the pcache, we might be able to remove the drain and flush too. */
+- if (G_UNLIKELY (discont)) {
+- GST_DEBUG_OBJECT (ffmpegdec, "received DISCONT");
+- /* drain what we have queued */
+- gst_ffmpegauddec_drain (ffmpegdec);
+- gst_ffmpegauddec_flush_pcache (ffmpegdec);
+- ffmpegdec->discont = TRUE;
+- gst_ffmpegauddec_reset_ts (ffmpegdec);
+- }
+- /* by default we clear the input timestamp after decoding each frame so that
+- * interpollation can work. */
+- ffmpegdec->clear_ts = TRUE;
+-
+- oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
+-
+- /* parse cache joining. If there is cached data */
+- if (ffmpegdec->pcache) {
+- /* join with previous data */
+- GST_LOG_OBJECT (ffmpegdec, "join parse cache");
+- inbuf = gst_buffer_append (ffmpegdec->pcache, inbuf);
+- /* no more cached data, we assume we can consume the complete cache */
+- ffmpegdec->pcache = NULL;
+- }
+-
+- in_dts = GST_BUFFER_DTS (inbuf);
+- in_pts = GST_BUFFER_PTS (inbuf);
+- in_duration = GST_BUFFER_DURATION (inbuf);
+- in_offset = GST_BUFFER_OFFSET (inbuf);
+-
+- /* get handle to timestamp info, we can pass this around to ffmpeg */
+- in_info =
+- gst_ts_info_store (ffmpegdec, in_dts, in_pts, in_duration, in_offset);
+-
+- GST_LOG_OBJECT (ffmpegdec,
+- "Received new data of size %u, offset:%" G_GUINT64_FORMAT ", ts:%"
+- GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT ", info %d",
+- gst_buffer_get_size (inbuf), GST_BUFFER_OFFSET (inbuf),
+- GST_TIME_ARGS (in_pts), GST_TIME_ARGS (in_duration), in_info->idx);
+-
+- /* workarounds, functions write to buffers:
+- * libavcodec/svq1.c:svq1_decode_frame writes to the given buffer.
+- * libavcodec/svq3.c:svq3_decode_slice_header too.
+- * ffmpeg devs know about it and will fix it (they said). */
+- if (oclass->in_plugin->id == CODEC_ID_SVQ1 ||
+- oclass->in_plugin->id == CODEC_ID_SVQ3) {
+- inbuf = gst_buffer_make_writable (inbuf);
+- }
+-
+- gst_buffer_map (inbuf, &map, GST_MAP_READ);
+-
+- bdata = map.data;
+- bsize = map.size;
+-
+- GST_LOG_OBJECT (ffmpegdec,
+- "Received new data of size %u, offset:%" G_GUINT64_FORMAT ", dts:%"
+- GST_TIME_FORMAT ", pts:%" GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT
+- ", info %d", bsize, in_offset, GST_TIME_ARGS (in_dts),
+- GST_TIME_ARGS (in_pts), GST_TIME_ARGS (in_duration), in_info->idx);
+-
+- do {
+- /* parse, if at all possible */
+- if (ffmpegdec->pctx) {
+- gint res;
+-
+- GST_LOG_OBJECT (ffmpegdec,
+- "Calling av_parser_parse2 with offset %" G_GINT64_FORMAT ", ts:%"
+- GST_TIME_FORMAT " size %d", in_offset, GST_TIME_ARGS (in_pts), bsize);
+-
+- /* feed the parser. We pass the timestamp info so that we can recover all
+- * info again later */
+- res = av_parser_parse2 (ffmpegdec->pctx, ffmpegdec->context,
+- &data, &size, bdata, bsize, in_info->idx, in_info->idx, in_offset);
+-
+- GST_LOG_OBJECT (ffmpegdec,
+- "parser returned res %d and size %d, id %" G_GINT64_FORMAT, res, size,
+- (gint64) ffmpegdec->pctx->pts);
+-
+- /* store pts for decoding */
+- if (ffmpegdec->pctx->pts != AV_NOPTS_VALUE && ffmpegdec->pctx->pts != -1)
+- dec_info = gst_ts_info_get (ffmpegdec, ffmpegdec->pctx->pts);
+- else {
+- /* ffmpeg sometimes loses track after a flush, help it by feeding a
+- * valid start time */
+- ffmpegdec->pctx->pts = in_info->idx;
+- ffmpegdec->pctx->dts = in_info->idx;
+- dec_info = in_info;
+- }
+-
+- GST_LOG_OBJECT (ffmpegdec, "consuming %d bytes. id %d", size,
+- dec_info->idx);
+-
+- if (res) {
+- /* there is output, set pointers for next round. */
+- bsize -= res;
+- bdata += res;
+- } else {
+- /* Parser did not consume any data, make sure we don't clear the
+- * timestamp for the next round */
+- ffmpegdec->clear_ts = FALSE;
+- }
+-
+- /* if there is no output, we must break and wait for more data. also the
+- * timestamp in the context is not updated. */
+- if (size == 0) {
+- if (bsize > 0)
+- continue;
+- else
+- break;
+- }
+- } else {
+- data = bdata;
+- size = bsize;
+-
+- dec_info = in_info;
+- }
+-
+- /* decode a frame of audio now */
+- len =
+- gst_ffmpegauddec_frame (ffmpegdec, data, size, &have_data, dec_info,
+- &ret);
+-
+- if (ret != GST_FLOW_OK) {
+- GST_LOG_OBJECT (ffmpegdec, "breaking because of flow ret %s",
+- gst_flow_get_name (ret));
+- /* bad flow return, make sure we discard all data and exit */
+- bsize = 0;
+- break;
+- }
+- if (!ffmpegdec->pctx) {
+- if (len == 0 && !have_data) {
+- /* nothing was decoded, this could be because no data was available or
+- * because we were skipping frames.
+- * If we have no context we must exit and wait for more data, we keep the
+- * data we tried. */
+- GST_LOG_OBJECT (ffmpegdec, "Decoding didn't return any data, breaking");
+- break;
+- } else if (len < 0) {
+- /* a decoding error happened, we must break and try again with next data. */
+- GST_LOG_OBJECT (ffmpegdec, "Decoding error, breaking");
+- bsize = 0;
+- break;
+- }
+- /* prepare for the next round, for codecs with a context we did this
+- * already when using the parser. */
+- bsize -= len;
+- bdata += len;
+- } else {
+- if (len == 0) {
+- /* nothing was decoded, this could be because no data was available or
+- * because we were skipping frames. Since we have a parser we can
+- * continue with the next frame */
+- GST_LOG_OBJECT (ffmpegdec,
+- "Decoding didn't return any data, trying next");
+- } else if (len < 0) {
+- /* we have a context that will bring us to the next frame */
+- GST_LOG_OBJECT (ffmpegdec, "Decoding error, trying next");
+- }
+- }
+-
+- /* make sure we don't use the same old timestamp for the next frame and let
+- * the interpollation take care of it. */
+- if (ffmpegdec->clear_ts) {
+- in_dts = GST_CLOCK_TIME_NONE;
+- in_pts = GST_CLOCK_TIME_NONE;
+- in_duration = GST_CLOCK_TIME_NONE;
+- in_offset = GST_BUFFER_OFFSET_NONE;
+- in_info = GST_TS_INFO_NONE;
+- } else {
+- ffmpegdec->clear_ts = TRUE;
+- }
+-
+- GST_LOG_OBJECT (ffmpegdec, "Before (while bsize>0). bsize:%d , bdata:%p",
+- bsize, bdata);
+- } while (bsize > 0);
+-
+- gst_buffer_unmap (inbuf, &map);
+-
+- /* keep left-over */
+- if (ffmpegdec->pctx && bsize > 0) {
+- in_pts = GST_BUFFER_PTS (inbuf);
+- in_dts = GST_BUFFER_DTS (inbuf);
+- in_offset = GST_BUFFER_OFFSET (inbuf);
+-
+- GST_LOG_OBJECT (ffmpegdec,
+- "Keeping %d bytes of data with offset %" G_GINT64_FORMAT ", pts %"
+- GST_TIME_FORMAT, bsize, in_offset, GST_TIME_ARGS (in_pts));
+-
+- ffmpegdec->pcache = gst_buffer_copy_region (inbuf, GST_BUFFER_COPY_ALL,
+- gst_buffer_get_size (inbuf) - bsize, bsize);
+- /* we keep timestamp, even though all we really know is that the correct
+- * timestamp is not below the one from inbuf */
+- GST_BUFFER_PTS (ffmpegdec->pcache) = in_pts;
+- GST_BUFFER_DTS (ffmpegdec->pcache) = in_dts;
+- GST_BUFFER_OFFSET (ffmpegdec->pcache) = in_offset;
+- } else if (bsize > 0) {
+- GST_DEBUG_OBJECT (ffmpegdec, "Dropping %d bytes of data", bsize);
+- }
+- gst_buffer_unref (inbuf);
+-
+- return ret;
+-
+- /* ERRORS */
+-not_negotiated:
+- {
+- oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
+- GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
+- ("avdec_%s: input format was not set before data start",
+- oclass->in_plugin->name));
+- gst_buffer_unref (inbuf);
+- return GST_FLOW_NOT_NEGOTIATED;
+- }
+-}
+-
+-static GstStateChangeReturn
+-gst_ffmpegauddec_change_state (GstElement * element, GstStateChange transition)
+-{
+- GstFFMpegAudDec *ffmpegdec = (GstFFMpegAudDec *) element;
+- GstStateChangeReturn ret;
+-
+- ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
+-
+- switch (transition) {
+- case GST_STATE_CHANGE_PAUSED_TO_READY:
+- GST_OBJECT_LOCK (ffmpegdec);
+- gst_ffmpegauddec_close (ffmpegdec);
+- GST_OBJECT_UNLOCK (ffmpegdec);
+- clear_queued (ffmpegdec);
+- break;
+- default:
+- break;
+- }
+-
+- return ret;
+-}
+-
+-gboolean
+-gst_ffmpegauddec_register (GstPlugin * plugin)
+-{
+- GTypeInfo typeinfo = {
+- sizeof (GstFFMpegAudDecClass),
+- (GBaseInitFunc) gst_ffmpegauddec_base_init,
+- NULL,
+- (GClassInitFunc) gst_ffmpegauddec_class_init,
+- NULL,
+- NULL,
+- sizeof (GstFFMpegAudDec),
+- 0,
+- (GInstanceInitFunc) gst_ffmpegauddec_init,
+- };
+- GType type;
+- AVCodec *in_plugin;
+- gint rank;
+-
+- in_plugin = av_codec_next (NULL);
+-
+- GST_LOG ("Registering decoders");
+-
+- while (in_plugin) {
+- gchar *type_name;
+- gchar *plugin_name;
+-
+- /* only decoders */
+- if (!in_plugin->decode || in_plugin->type != AVMEDIA_TYPE_AUDIO) {
+- goto next;
+- }
+-
+- /* no quasi-codecs, please */
+- if (in_plugin->id >= CODEC_ID_PCM_S16LE &&
+- in_plugin->id <= CODEC_ID_PCM_BLURAY) {
+- goto next;
+- }
+-
+- /* No decoders depending on external libraries (we don't build them, but
+- * people who build against an external ffmpeg might have them.
+- * We have native gstreamer plugins for all of those libraries anyway. */
+- if (!strncmp (in_plugin->name, "lib", 3)) {
+- GST_DEBUG
+- ("Not using external library decoder %s. Use the gstreamer-native ones instead.",
+- in_plugin->name);
+- goto next;
+- }
+-
+- GST_DEBUG ("Trying plugin %s [%s]", in_plugin->name, in_plugin->long_name);
+-
+- /* no codecs for which we're GUARANTEED to have better alternatives */
+- /* MP1 : Use MP3 for decoding */
+- /* MP2 : Use MP3 for decoding */
+- /* Theora: Use libtheora based theoradec */
+- if (!strcmp (in_plugin->name, "vorbis") ||
+- !strcmp (in_plugin->name, "wavpack") ||
+- !strcmp (in_plugin->name, "mp1") ||
+- !strcmp (in_plugin->name, "mp2") ||
+- !strcmp (in_plugin->name, "libfaad") ||
+- !strcmp (in_plugin->name, "mpeg4aac") ||
+- !strcmp (in_plugin->name, "ass") ||
+- !strcmp (in_plugin->name, "srt") ||
+- !strcmp (in_plugin->name, "pgssub") ||
+- !strcmp (in_plugin->name, "dvdsub") ||
+- !strcmp (in_plugin->name, "dvbsub")) {
+- GST_LOG ("Ignoring decoder %s", in_plugin->name);
+- goto next;
+- }
+-
+- /* construct the type */
+- plugin_name = g_strdup ((gchar *) in_plugin->name);
+- g_strdelimit (plugin_name, NULL, '_');
+- type_name = g_strdup_printf ("avdec_%s", plugin_name);
+- g_free (plugin_name);
+-
+- type = g_type_from_name (type_name);
+-
+- if (!type) {
+- /* create the gtype now */
+- type = g_type_register_static (GST_TYPE_ELEMENT, type_name, &typeinfo, 0);
+- g_type_set_qdata (type, GST_FFDEC_PARAMS_QDATA, (gpointer) in_plugin);
+- }
+-
+- /* (Ronald) MPEG-4 gets a higher priority because it has been well-
+- * tested and by far outperforms divxdec/xviddec - so we prefer it.
+- * msmpeg4v3 same, as it outperforms divxdec for divx3 playback.
+- * VC1/WMV3 are not working and thus unpreferred for now. */
+- switch (in_plugin->id) {
+- case CODEC_ID_RA_144:
+- case CODEC_ID_RA_288:
+- case CODEC_ID_COOK:
+- rank = GST_RANK_PRIMARY;
+- break;
+- /* SIPR: decoder should have a higher rank than realaudiodec.
+- */
+- case CODEC_ID_SIPR:
+- rank = GST_RANK_SECONDARY;
+- break;
+- case CODEC_ID_MP3:
+- rank = GST_RANK_NONE;
+- break;
+- default:
+- rank = GST_RANK_MARGINAL;
+- break;
+- }
+- if (!gst_element_register (plugin, type_name, rank, type)) {
+- g_warning ("Failed to register %s", type_name);
+- g_free (type_name);
+- return FALSE;
+- }
+-
+- g_free (type_name);
+-
+- next:
+- in_plugin = av_codec_next (in_plugin);
+- }
+-
+- GST_LOG ("Finished Registering decoders");
+-
+- return TRUE;
+-}
+diff --git a/ext/libav/gstavdeinterlace.c b/ext/libav/gstavdeinterlace.c
+index 65961f6..faa81bb 100644
+--- a/ext/libav/gstavdeinterlace.c
++++ b/ext/libav/gstavdeinterlace.c
+@@ -16,8 +16,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+@@ -203,7 +203,7 @@ gst_ffmpegdeinterlace_sink_setcaps (GstPad * pad, GstCaps * caps)
+ }
+ gst_ffmpegdeinterlace_update_passthrough (deinterlace);
+
+- ctx = avcodec_alloc_context ();
++ ctx = avcodec_alloc_context3 (NULL);
+ ctx->width = deinterlace->width;
+ ctx->height = deinterlace->height;
+ ctx->pix_fmt = PIX_FMT_NB;
+diff --git a/ext/libav/gstavdemux.c b/ext/libav/gstavdemux.c
+index bee67a5..de0341f 100644
+--- a/ext/libav/gstavdemux.c
++++ b/ext/libav/gstavdemux.c
+@@ -15,8 +15,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+@@ -32,7 +32,7 @@
+ #include "gstav.h"
+ #include "gstavcodecmap.h"
+ #include "gstavutils.h"
+-#include "gstavpipe.h"
++#include "gstavprotocol.h"
+
+ #define MAX_STREAMS 20
+
+@@ -332,7 +332,14 @@ gst_ffmpegdemux_close (GstFFMpegDemux * demux)
+ demux->audiopads = 0;
+
+ /* close demuxer context from ffmpeg */
+- av_close_input_file (demux->context);
++ if (demux->seekable)
++ gst_ffmpegdata_close (demux->context->pb);
++ else
++ gst_ffmpeg_pipe_close (demux->context->pb);
++ demux->context->pb = NULL;
++ avformat_close_input (&demux->context);
++ if (demux->context)
++ avformat_free_context (demux->context);
+ demux->context = NULL;
+
+ GST_OBJECT_LOCK (demux);
+@@ -995,7 +1002,7 @@ gst_ffmpegdemux_get_stream (GstFFMpegDemux * demux, AVStream * avstream)
+
+
+ stream_id =
+- gst_pad_create_stream_id_printf (pad, GST_ELEMENT_CAST (demux), "%u",
++ gst_pad_create_stream_id_printf (pad, GST_ELEMENT_CAST (demux), "%03u",
+ avstream->index);
+ gst_pad_push_event (pad, gst_event_new_stream_start (stream_id));
+ g_free (stream_id);
+@@ -1115,9 +1122,9 @@ gst_ffmpegdemux_read_tags (GstFFMpegDemux * demux)
+ static gboolean
+ gst_ffmpegdemux_open (GstFFMpegDemux * demux)
+ {
++ AVIOContext *iocontext = NULL;
+ GstFFMpegDemuxClass *oclass =
+ (GstFFMpegDemuxClass *) G_OBJECT_GET_CLASS (demux);
+- gchar *location;
+ gint res, n_streams, i;
+ #if 0
+ /* Re-enable once converted to new AVMetaData API
+@@ -1133,15 +1140,14 @@ gst_ffmpegdemux_open (GstFFMpegDemux * demux)
+
+ /* open via our input protocol hack */
+ if (demux->seekable)
+- location = g_strdup_printf ("gstreamer://%p", demux->sinkpad);
++ res = gst_ffmpegdata_open (demux->sinkpad, AVIO_FLAG_READ, &iocontext);
+ else
+- location = g_strdup_printf ("gstpipe://%p", &demux->ffpipe);
+- GST_DEBUG_OBJECT (demux, "about to call av_open_input_file %s", location);
++ res = gst_ffmpeg_pipe_open (&demux->ffpipe, AVIO_FLAG_READ, &iocontext);
+
+- res = av_open_input_file (&demux->context, location,
+- oclass->in_plugin, 0, NULL);
++ demux->context = avformat_alloc_context ();
++ demux->context->pb = iocontext;
++ res = avformat_open_input (&demux->context, NULL, oclass->in_plugin, NULL);
+
+- g_free (location);
+ GST_DEBUG_OBJECT (demux, "av_open_input returned %d", res);
+ if (res < 0)
+ goto open_failed;
+diff --git a/ext/libav/gstavenc.c b/ext/libav/gstavenc.c
+deleted file mode 100644
+index 312787e..0000000
+--- a/ext/libav/gstavenc.c
++++ /dev/null
+@@ -1,825 +0,0 @@
+-/* GStreamer
+- * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
+- *
+- * This library is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU Library General Public
+- * License as published by the Free Software Foundation; either
+- * version 2 of the License, or (at your option) any later version.
+- *
+- * This library is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * Library General Public License for more details.
+- *
+- * You should have received a copy of the GNU Library General Public
+- * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
+- */
+-
+-#ifdef HAVE_CONFIG_H
+-#include "config.h"
+-#endif
+-
+-#include <assert.h>
+-#include <string.h>
+-/* for stats file handling */
+-#include <stdio.h>
+-#include <glib/gstdio.h>
+-#include <errno.h>
+-
+-#include <libavcodec/avcodec.h>
+-
+-#include <gst/gst.h>
+-
+-#include "gstav.h"
+-#include "gstavcodecmap.h"
+-#include "gstavutils.h"
+-#include "gstavenc.h"
+-
+-#define DEFAULT_AUDIO_BITRATE 128000
+-
+-enum
+-{
+- /* FILL ME */
+- LAST_SIGNAL
+-};
+-
+-enum
+-{
+- ARG_0,
+- ARG_BIT_RATE,
+- ARG_BUFSIZE,
+- ARG_RTP_PAYLOAD_SIZE,
+-};
+-
+-/* A number of function prototypes are given so we can refer to them later. */
+-static void gst_ffmpegaudenc_class_init (GstFFMpegAudEncClass * klass);
+-static void gst_ffmpegaudenc_base_init (GstFFMpegAudEncClass * klass);
+-static void gst_ffmpegaudenc_init (GstFFMpegAudEnc * ffmpegaudenc);
+-static void gst_ffmpegaudenc_finalize (GObject * object);
+-
+-static gboolean gst_ffmpegaudenc_setcaps (GstFFMpegAudEnc * ffmpegenc,
+- GstCaps * caps);
+-static GstCaps *gst_ffmpegaudenc_getcaps (GstFFMpegAudEnc * ffmpegenc,
+- GstCaps * filter);
+-static GstFlowReturn gst_ffmpegaudenc_chain_audio (GstPad * pad,
+- GstObject * parent, GstBuffer * buffer);
+-static gboolean gst_ffmpegaudenc_query_sink (GstPad * pad, GstObject * parent,
+- GstQuery * query);
+-static gboolean gst_ffmpegaudenc_event_sink (GstPad * pad, GstObject * parent,
+- GstEvent * event);
+-
+-static void gst_ffmpegaudenc_set_property (GObject * object,
+- guint prop_id, const GValue * value, GParamSpec * pspec);
+-static void gst_ffmpegaudenc_get_property (GObject * object,
+- guint prop_id, GValue * value, GParamSpec * pspec);
+-
+-static GstStateChangeReturn gst_ffmpegaudenc_change_state (GstElement * element,
+- GstStateChange transition);
+-
+-#define GST_FFENC_PARAMS_QDATA g_quark_from_static_string("avenc-params")
+-
+-static GstElementClass *parent_class = NULL;
+-
+-/*static guint gst_ffmpegaudenc_signals[LAST_SIGNAL] = { 0 }; */
+-
+-static void
+-gst_ffmpegaudenc_base_init (GstFFMpegAudEncClass * klass)
+-{
+- GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
+- AVCodec *in_plugin;
+- GstPadTemplate *srctempl = NULL, *sinktempl = NULL;
+- GstCaps *srccaps = NULL, *sinkcaps = NULL;
+- gchar *longname, *description;
+-
+- in_plugin =
+- (AVCodec *) g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass),
+- GST_FFENC_PARAMS_QDATA);
+- g_assert (in_plugin != NULL);
+-
+- /* construct the element details struct */
+- longname = g_strdup_printf ("libav %s encoder", in_plugin->long_name);
+- description = g_strdup_printf ("libav %s encoder", in_plugin->name);
+- gst_element_class_set_metadata (element_class, longname,
+- "Codec/Encoder/Audio", description,
+- "Wim Taymans <wim.taymans@gmail.com>, "
+- "Ronald Bultje <rbultje@ronald.bitfreak.net>");
+- g_free (longname);
+- g_free (description);
+-
+- if (!(srccaps = gst_ffmpeg_codecid_to_caps (in_plugin->id, NULL, TRUE))) {
+- GST_DEBUG ("Couldn't get source caps for encoder '%s'", in_plugin->name);
+- srccaps = gst_caps_new_empty_simple ("unknown/unknown");
+- }
+-
+- sinkcaps = gst_ffmpeg_codectype_to_audio_caps (NULL,
+- in_plugin->id, TRUE, in_plugin);
+- if (!sinkcaps) {
+- GST_DEBUG ("Couldn't get sink caps for encoder '%s'", in_plugin->name);
+- sinkcaps = gst_caps_new_empty_simple ("unknown/unknown");
+- }
+-
+- /* pad templates */
+- sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK,
+- GST_PAD_ALWAYS, sinkcaps);
+- srctempl = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, srccaps);
+-
+- gst_element_class_add_pad_template (element_class, srctempl);
+- gst_element_class_add_pad_template (element_class, sinktempl);
+-
+- klass->in_plugin = in_plugin;
+- klass->srctempl = srctempl;
+- klass->sinktempl = sinktempl;
+- klass->sinkcaps = NULL;
+-
+- return;
+-}
+-
+-static void
+-gst_ffmpegaudenc_class_init (GstFFMpegAudEncClass * klass)
+-{
+- GObjectClass *gobject_class;
+- GstElementClass *gstelement_class;
+-
+- gobject_class = (GObjectClass *) klass;
+- gstelement_class = (GstElementClass *) klass;
+-
+- parent_class = g_type_class_peek_parent (klass);
+-
+- gobject_class->set_property = gst_ffmpegaudenc_set_property;
+- gobject_class->get_property = gst_ffmpegaudenc_get_property;
+-
+- /* FIXME: could use -1 for a sensible per-codec defaults */
+- g_object_class_install_property (G_OBJECT_CLASS (klass), ARG_BIT_RATE,
+- g_param_spec_int ("bitrate", "Bit Rate",
+- "Target Audio Bitrate", 0, G_MAXINT, DEFAULT_AUDIO_BITRATE,
+- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+-
+- gstelement_class->change_state = gst_ffmpegaudenc_change_state;
+-
+- gobject_class->finalize = gst_ffmpegaudenc_finalize;
+-}
+-
+-static void
+-gst_ffmpegaudenc_init (GstFFMpegAudEnc * ffmpegaudenc)
+-{
+- GstFFMpegAudEncClass *oclass =
+- (GstFFMpegAudEncClass *) (G_OBJECT_GET_CLASS (ffmpegaudenc));
+-
+- /* setup pads */
+- ffmpegaudenc->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
+- gst_pad_set_event_function (ffmpegaudenc->sinkpad,
+- gst_ffmpegaudenc_event_sink);
+- gst_pad_set_query_function (ffmpegaudenc->sinkpad,
+- gst_ffmpegaudenc_query_sink);
+- gst_pad_set_chain_function (ffmpegaudenc->sinkpad,
+- gst_ffmpegaudenc_chain_audio);
+-
+- ffmpegaudenc->srcpad = gst_pad_new_from_template (oclass->srctempl, "src");
+- gst_pad_use_fixed_caps (ffmpegaudenc->srcpad);
+-
+- /* ffmpeg objects */
+- ffmpegaudenc->context = avcodec_alloc_context ();
+- ffmpegaudenc->opened = FALSE;
+-
+- gst_element_add_pad (GST_ELEMENT (ffmpegaudenc), ffmpegaudenc->sinkpad);
+- gst_element_add_pad (GST_ELEMENT (ffmpegaudenc), ffmpegaudenc->srcpad);
+-
+- ffmpegaudenc->adapter = gst_adapter_new ();
+-}
+-
+-static void
+-gst_ffmpegaudenc_finalize (GObject * object)
+-{
+- GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) object;
+-
+-
+- /* close old session */
+- if (ffmpegaudenc->opened) {
+- gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
+- ffmpegaudenc->opened = FALSE;
+- }
+-
+- /* clean up remaining allocated data */
+- av_free (ffmpegaudenc->context);
+-
+- g_object_unref (ffmpegaudenc->adapter);
+-
+- G_OBJECT_CLASS (parent_class)->finalize (object);
+-}
+-
+-static GstCaps *
+-gst_ffmpegaudenc_getcaps (GstFFMpegAudEnc * ffmpegaudenc, GstCaps * filter)
+-{
+- GstCaps *caps = NULL;
+-
+- GST_DEBUG_OBJECT (ffmpegaudenc, "getting caps");
+-
+- /* audio needs no special care */
+- caps = gst_pad_get_pad_template_caps (ffmpegaudenc->sinkpad);
+-
+- if (filter) {
+- GstCaps *tmp;
+- tmp = gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
+- gst_caps_unref (caps);
+- caps = tmp;
+- }
+-
+- GST_DEBUG_OBJECT (ffmpegaudenc,
+- "audio caps, return template %" GST_PTR_FORMAT, caps);
+-
+- return caps;
+-}
+-
+-static gboolean
+-gst_ffmpegaudenc_setcaps (GstFFMpegAudEnc * ffmpegaudenc, GstCaps * caps)
+-{
+- GstCaps *other_caps;
+- GstCaps *allowed_caps;
+- GstCaps *icaps;
+- GstFFMpegAudEncClass *oclass =
+- (GstFFMpegAudEncClass *) G_OBJECT_GET_CLASS (ffmpegaudenc);
+-
+- /* close old session */
+- if (ffmpegaudenc->opened) {
+- gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
+- ffmpegaudenc->opened = FALSE;
+- }
+-
+- /* set defaults */
+- avcodec_get_context_defaults (ffmpegaudenc->context);
+-
+- /* if we set it in _getcaps we should set it also in _link */
+- ffmpegaudenc->context->strict_std_compliance = -1;
+-
+- /* user defined properties */
+- if (ffmpegaudenc->bitrate > 0) {
+- GST_INFO_OBJECT (ffmpegaudenc, "Setting avcontext to bitrate %d",
+- ffmpegaudenc->bitrate);
+- ffmpegaudenc->context->bit_rate = ffmpegaudenc->bitrate;
+- ffmpegaudenc->context->bit_rate_tolerance = ffmpegaudenc->bitrate;
+- } else {
+- GST_INFO_OBJECT (ffmpegaudenc, "Using avcontext default bitrate %d",
+- ffmpegaudenc->context->bit_rate);
+- }
+-
+- /* RTP payload used for GOB production (for Asterisk) */
+- if (ffmpegaudenc->rtp_payload_size) {
+- ffmpegaudenc->context->rtp_payload_size = ffmpegaudenc->rtp_payload_size;
+- }
+-
+- /* some other defaults */
+- ffmpegaudenc->context->rc_strategy = 2;
+- ffmpegaudenc->context->b_frame_strategy = 0;
+- ffmpegaudenc->context->coder_type = 0;
+- ffmpegaudenc->context->context_model = 0;
+- ffmpegaudenc->context->scenechange_threshold = 0;
+- ffmpegaudenc->context->inter_threshold = 0;
+-
+-
+- /* fetch pix_fmt and so on */
+- gst_ffmpeg_caps_with_codectype (oclass->in_plugin->type,
+- caps, ffmpegaudenc->context);
+- if (!ffmpegaudenc->context->time_base.den) {
+- ffmpegaudenc->context->time_base.den = 25;
+- ffmpegaudenc->context->time_base.num = 1;
+- ffmpegaudenc->context->ticks_per_frame = 1;
+- }
+-
+- /* open codec */
+- if (gst_ffmpeg_avcodec_open (ffmpegaudenc->context, oclass->in_plugin) < 0) {
+- if (ffmpegaudenc->context->priv_data)
+- gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
+- if (ffmpegaudenc->context->stats_in)
+- g_free (ffmpegaudenc->context->stats_in);
+- GST_DEBUG_OBJECT (ffmpegaudenc, "avenc_%s: Failed to open FFMPEG codec",
+- oclass->in_plugin->name);
+- return FALSE;
+- }
+-
+- /* second pass stats buffer no longer needed */
+- if (ffmpegaudenc->context->stats_in)
+- g_free (ffmpegaudenc->context->stats_in);
+-
+- /* some codecs support more than one format, first auto-choose one */
+- GST_DEBUG_OBJECT (ffmpegaudenc, "picking an output format ...");
+- allowed_caps = gst_pad_get_allowed_caps (ffmpegaudenc->srcpad);
+- if (!allowed_caps) {
+- GST_DEBUG_OBJECT (ffmpegaudenc, "... but no peer, using template caps");
+- /* we need to copy because get_allowed_caps returns a ref, and
+- * get_pad_template_caps doesn't */
+- allowed_caps = gst_pad_get_pad_template_caps (ffmpegaudenc->srcpad);
+- }
+- GST_DEBUG_OBJECT (ffmpegaudenc, "chose caps %" GST_PTR_FORMAT, allowed_caps);
+- gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
+- oclass->in_plugin->type, allowed_caps, ffmpegaudenc->context);
+-
+- /* try to set this caps on the other side */
+- other_caps = gst_ffmpeg_codecid_to_caps (oclass->in_plugin->id,
+- ffmpegaudenc->context, TRUE);
+-
+- if (!other_caps) {
+- gst_caps_unref (allowed_caps);
+- gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
+- GST_DEBUG ("Unsupported codec - no caps found");
+- return FALSE;
+- }
+-
+- icaps = gst_caps_intersect (allowed_caps, other_caps);
+- gst_caps_unref (allowed_caps);
+- gst_caps_unref (other_caps);
+- if (gst_caps_is_empty (icaps)) {
+- gst_caps_unref (icaps);
+- return FALSE;
+- }
+-
+- if (gst_caps_get_size (icaps) > 1) {
+- GstCaps *newcaps;
+-
+- newcaps =
+- gst_caps_new_full (gst_structure_copy (gst_caps_get_structure (icaps,
+- 0)), NULL);
+- gst_caps_unref (icaps);
+- icaps = newcaps;
+- }
+-
+- if (!gst_pad_set_caps (ffmpegaudenc->srcpad, icaps)) {
+- gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
+- gst_caps_unref (icaps);
+- return FALSE;
+- }
+- gst_caps_unref (icaps);
+-
+- /* success! */
+- ffmpegaudenc->opened = TRUE;
+-
+- return TRUE;
+-}
+-
+-
+-static GstFlowReturn
+-gst_ffmpegaudenc_encode_audio (GstFFMpegAudEnc * ffmpegaudenc,
+- guint8 * audio_in, guint in_size, guint max_size, GstClockTime timestamp,
+- GstClockTime duration, gboolean discont)
+-{
+- GstBuffer *outbuf;
+- AVCodecContext *ctx;
+- GstMapInfo map;
+- gint res;
+- GstFlowReturn ret;
+-
+- ctx = ffmpegaudenc->context;
+-
+- /* We need to provide at least ffmpegs minimal buffer size */
+- outbuf = gst_buffer_new_and_alloc (max_size + FF_MIN_BUFFER_SIZE);
+- gst_buffer_map (outbuf, &map, GST_MAP_WRITE);
+-
+- GST_LOG_OBJECT (ffmpegaudenc, "encoding buffer of max size %d", max_size);
+- if (ffmpegaudenc->buffer_size != max_size)
+- ffmpegaudenc->buffer_size = max_size;
+-
+- res = avcodec_encode_audio (ctx, map.data, max_size, (short *) audio_in);
+-
+- if (res < 0) {
+- gst_buffer_unmap (outbuf, &map);
+- GST_ERROR_OBJECT (ffmpegaudenc, "Failed to encode buffer: %d", res);
+- gst_buffer_unref (outbuf);
+- return GST_FLOW_OK;
+- }
+- GST_LOG_OBJECT (ffmpegaudenc, "got output size %d", res);
+- gst_buffer_unmap (outbuf, &map);
+- gst_buffer_resize (outbuf, 0, res);
+-
+- GST_BUFFER_TIMESTAMP (outbuf) = timestamp;
+- GST_BUFFER_DURATION (outbuf) = duration;
+- if (discont)
+- GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
+-
+- GST_LOG_OBJECT (ffmpegaudenc, "pushing size %d, timestamp %" GST_TIME_FORMAT,
+- res, GST_TIME_ARGS (timestamp));
+-
+- ret = gst_pad_push (ffmpegaudenc->srcpad, outbuf);
+-
+- return ret;
+-}
+-
+-static GstFlowReturn
+-gst_ffmpegaudenc_chain_audio (GstPad * pad, GstObject * parent,
+- GstBuffer * inbuf)
+-{
+- GstFFMpegAudEnc *ffmpegaudenc;
+- GstFFMpegAudEncClass *oclass;
+- AVCodecContext *ctx;
+- GstClockTime timestamp, duration;
+- gsize size, frame_size;
+- gint osize;
+- GstFlowReturn ret;
+- gint out_size;
+- gboolean discont;
+- guint8 *in_data;
+-
+- ffmpegaudenc = (GstFFMpegAudEnc *) parent;
+- oclass = (GstFFMpegAudEncClass *) G_OBJECT_GET_CLASS (ffmpegaudenc);
+-
+- if (G_UNLIKELY (!ffmpegaudenc->opened))
+- goto not_negotiated;
+-
+- ctx = ffmpegaudenc->context;
+-
+- size = gst_buffer_get_size (inbuf);
+- timestamp = GST_BUFFER_TIMESTAMP (inbuf);
+- duration = GST_BUFFER_DURATION (inbuf);
+- discont = GST_BUFFER_IS_DISCONT (inbuf);
+-
+- GST_DEBUG_OBJECT (ffmpegaudenc,
+- "Received time %" GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT
+- ", size %" G_GSIZE_FORMAT, GST_TIME_ARGS (timestamp),
+- GST_TIME_ARGS (duration), size);
+-
+- frame_size = ctx->frame_size;
+- osize = av_get_bits_per_sample_format (ctx->sample_fmt) / 8;
+-
+- if (frame_size > 1) {
+- /* we have a frame_size, feed the encoder multiples of this frame size */
+- guint avail, frame_bytes;
+-
+- if (discont) {
+- GST_LOG_OBJECT (ffmpegaudenc, "DISCONT, clear adapter");
+- gst_adapter_clear (ffmpegaudenc->adapter);
+- ffmpegaudenc->discont = TRUE;
+- }
+-
+- if (gst_adapter_available (ffmpegaudenc->adapter) == 0) {
+- /* lock on to new timestamp */
+- GST_LOG_OBJECT (ffmpegaudenc, "taking buffer timestamp %" GST_TIME_FORMAT,
+- GST_TIME_ARGS (timestamp));
+- ffmpegaudenc->adapter_ts = timestamp;
+- ffmpegaudenc->adapter_consumed = 0;
+- } else {
+- GstClockTime upstream_time;
+- GstClockTime consumed_time;
+- guint64 bytes;
+-
+- /* use timestamp at head of the adapter */
+- consumed_time =
+- gst_util_uint64_scale (ffmpegaudenc->adapter_consumed, GST_SECOND,
+- ctx->sample_rate);
+- timestamp = ffmpegaudenc->adapter_ts + consumed_time;
+- GST_LOG_OBJECT (ffmpegaudenc, "taking adapter timestamp %" GST_TIME_FORMAT
+- " and adding consumed time %" GST_TIME_FORMAT,
+- GST_TIME_ARGS (ffmpegaudenc->adapter_ts),
+- GST_TIME_ARGS (consumed_time));
+-
+- /* check with upstream timestamps, if too much deviation,
+- * forego some timestamp perfection in favour of upstream syncing
+- * (particularly in case these do not happen to come in multiple
+- * of frame size) */
+- upstream_time =
+- gst_adapter_prev_timestamp (ffmpegaudenc->adapter, &bytes);
+- if (GST_CLOCK_TIME_IS_VALID (upstream_time)) {
+- GstClockTimeDiff diff;
+-
+- upstream_time +=
+- gst_util_uint64_scale (bytes, GST_SECOND,
+- ctx->sample_rate * osize * ctx->channels);
+- diff = upstream_time - timestamp;
+- /* relaxed difference, rather than half a sample or so ... */
+- if (diff > GST_SECOND / 10 || diff < -GST_SECOND / 10) {
+- GST_DEBUG_OBJECT (ffmpegaudenc, "adapter timestamp drifting, "
+- "taking upstream timestamp %" GST_TIME_FORMAT,
+- GST_TIME_ARGS (upstream_time));
+- timestamp = upstream_time;
+- /* samples corresponding to bytes */
+- ffmpegaudenc->adapter_consumed = bytes / (osize * ctx->channels);
+- ffmpegaudenc->adapter_ts = upstream_time -
+- gst_util_uint64_scale (ffmpegaudenc->adapter_consumed, GST_SECOND,
+- ctx->sample_rate);
+- ffmpegaudenc->discont = TRUE;
+- }
+- }
+- }
+-
+- GST_LOG_OBJECT (ffmpegaudenc, "pushing buffer in adapter");
+- gst_adapter_push (ffmpegaudenc->adapter, inbuf);
+-
+- /* first see how many bytes we need to feed to the decoder. */
+- frame_bytes = frame_size * osize * ctx->channels;
+- avail = gst_adapter_available (ffmpegaudenc->adapter);
+-
+- GST_LOG_OBJECT (ffmpegaudenc, "frame_bytes %u, avail %u", frame_bytes,
+- avail);
+-
+- /* while there is more than a frame size in the adapter, consume it */
+- while (avail >= frame_bytes) {
+- GST_LOG_OBJECT (ffmpegaudenc, "taking %u bytes from the adapter",
+- frame_bytes);
+-
+- /* Note that we take frame_bytes and add frame_size.
+- * Makes sense when resyncing because you don't have to count channels
+- * or samplesize to divide by the samplerate */
+-
+- /* take an audio buffer out of the adapter */
+- in_data = (guint8 *) gst_adapter_map (ffmpegaudenc->adapter, frame_bytes);
+- ffmpegaudenc->adapter_consumed += frame_size;
+-
+- /* calculate timestamp and duration relative to start of adapter and to
+- * the amount of samples we consumed */
+- duration =
+- gst_util_uint64_scale (ffmpegaudenc->adapter_consumed, GST_SECOND,
+- ctx->sample_rate);
+- duration -= (timestamp - ffmpegaudenc->adapter_ts);
+-
+- /* 4 times the input size should be big enough... */
+- out_size = frame_bytes * 4;
+-
+- ret =
+- gst_ffmpegaudenc_encode_audio (ffmpegaudenc, in_data, frame_bytes,
+- out_size, timestamp, duration, ffmpegaudenc->discont);
+-
+- gst_adapter_unmap (ffmpegaudenc->adapter);
+- gst_adapter_flush (ffmpegaudenc->adapter, frame_bytes);
+-
+- if (ret != GST_FLOW_OK)
+- goto push_failed;
+-
+- /* advance the adapter timestamp with the duration */
+- timestamp += duration;
+-
+- ffmpegaudenc->discont = FALSE;
+- avail = gst_adapter_available (ffmpegaudenc->adapter);
+- }
+- GST_LOG_OBJECT (ffmpegaudenc, "%u bytes left in the adapter", avail);
+- } else {
+- GstMapInfo map;
+- /* we have no frame_size, feed the encoder all the data and expect a fixed
+- * output size */
+- int coded_bps = av_get_bits_per_sample (oclass->in_plugin->id);
+-
+- GST_LOG_OBJECT (ffmpegaudenc, "coded bps %d, osize %d", coded_bps, osize);
+-
+- out_size = size / osize;
+- if (coded_bps)
+- out_size = (out_size * coded_bps) / 8;
+-
+- gst_buffer_map (inbuf, &map, GST_MAP_READ);
+- in_data = map.data;
+- size = map.size;
+- ret = gst_ffmpegaudenc_encode_audio (ffmpegaudenc, in_data, size, out_size,
+- timestamp, duration, discont);
+- gst_buffer_unmap (inbuf, &map);
+- gst_buffer_unref (inbuf);
+-
+- if (ret != GST_FLOW_OK)
+- goto push_failed;
+- }
+-
+- return GST_FLOW_OK;
+-
+- /* ERRORS */
+-not_negotiated:
+- {
+- GST_ELEMENT_ERROR (ffmpegaudenc, CORE, NEGOTIATION, (NULL),
+- ("not configured to input format before data start"));
+- gst_buffer_unref (inbuf);
+- return GST_FLOW_NOT_NEGOTIATED;
+- }
+-push_failed:
+- {
+- GST_DEBUG_OBJECT (ffmpegaudenc, "Failed to push buffer %d (%s)", ret,
+- gst_flow_get_name (ret));
+- return ret;
+- }
+-}
+-
+-static gboolean
+-gst_ffmpegaudenc_event_sink (GstPad * pad, GstObject * parent, GstEvent * event)
+-{
+- GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) parent;
+-
+- switch (GST_EVENT_TYPE (event)) {
+- case GST_EVENT_CAPS:
+- {
+- GstCaps *caps;
+- gboolean ret;
+-
+- gst_event_parse_caps (event, &caps);
+- ret = gst_ffmpegaudenc_setcaps (ffmpegaudenc, caps);
+- gst_event_unref (event);
+- return ret;
+- }
+- default:
+- break;
+- }
+-
+- return gst_pad_event_default (pad, parent, event);
+-}
+-
+-static gboolean
+-gst_ffmpegaudenc_query_sink (GstPad * pad, GstObject * parent, GstQuery * query)
+-{
+- GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) parent;
+- gboolean res = FALSE;
+-
+- switch (GST_QUERY_TYPE (query)) {
+- case GST_QUERY_CAPS:
+- {
+- GstCaps *filter, *caps;
+-
+- gst_query_parse_caps (query, &filter);
+- caps = gst_ffmpegaudenc_getcaps (ffmpegaudenc, filter);
+- gst_query_set_caps_result (query, caps);
+- gst_caps_unref (caps);
+- res = TRUE;
+- break;
+- }
+- default:
+- res = gst_pad_query_default (pad, parent, query);
+- break;
+- }
+-
+- return res;
+-}
+-
+-static void
+-gst_ffmpegaudenc_set_property (GObject * object,
+- guint prop_id, const GValue * value, GParamSpec * pspec)
+-{
+- GstFFMpegAudEnc *ffmpegaudenc;
+-
+- /* Get a pointer of the right type. */
+- ffmpegaudenc = (GstFFMpegAudEnc *) (object);
+-
+- if (ffmpegaudenc->opened) {
+- GST_WARNING_OBJECT (ffmpegaudenc,
+- "Can't change properties once decoder is setup !");
+- return;
+- }
+-
+- /* Check the argument id to see which argument we're setting. */
+- switch (prop_id) {
+- case ARG_BIT_RATE:
+- ffmpegaudenc->bitrate = g_value_get_int (value);
+- break;
+- case ARG_BUFSIZE:
+- break;
+- case ARG_RTP_PAYLOAD_SIZE:
+- ffmpegaudenc->rtp_payload_size = g_value_get_int (value);
+- break;
+- default:
+- G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+- break;
+- }
+-}
+-
+-/* The set function is simply the inverse of the get fuction. */
+-static void
+-gst_ffmpegaudenc_get_property (GObject * object,
+- guint prop_id, GValue * value, GParamSpec * pspec)
+-{
+- GstFFMpegAudEnc *ffmpegaudenc;
+-
+- /* It's not null if we got it, but it might not be ours */
+- ffmpegaudenc = (GstFFMpegAudEnc *) (object);
+-
+- switch (prop_id) {
+- case ARG_BIT_RATE:
+- g_value_set_int (value, ffmpegaudenc->bitrate);
+- break;
+- break;
+- case ARG_BUFSIZE:
+- g_value_set_int (value, ffmpegaudenc->buffer_size);
+- break;
+- case ARG_RTP_PAYLOAD_SIZE:
+- g_value_set_int (value, ffmpegaudenc->rtp_payload_size);
+- break;
+- default:
+- G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+- break;
+- }
+-}
+-
+-static GstStateChangeReturn
+-gst_ffmpegaudenc_change_state (GstElement * element, GstStateChange transition)
+-{
+- GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) element;
+- GstStateChangeReturn result;
+-
+- switch (transition) {
+- default:
+- break;
+- }
+-
+- result = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
+-
+- switch (transition) {
+- case GST_STATE_CHANGE_PAUSED_TO_READY:
+- if (ffmpegaudenc->opened) {
+- gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
+- ffmpegaudenc->opened = FALSE;
+- }
+- gst_adapter_clear (ffmpegaudenc->adapter);
+- break;
+- default:
+- break;
+- }
+- return result;
+-}
+-
+-gboolean
+-gst_ffmpegaudenc_register (GstPlugin * plugin)
+-{
+- GTypeInfo typeinfo = {
+- sizeof (GstFFMpegAudEncClass),
+- (GBaseInitFunc) gst_ffmpegaudenc_base_init,
+- NULL,
+- (GClassInitFunc) gst_ffmpegaudenc_class_init,
+- NULL,
+- NULL,
+- sizeof (GstFFMpegAudEnc),
+- 0,
+- (GInstanceInitFunc) gst_ffmpegaudenc_init,
+- };
+- GType type;
+- AVCodec *in_plugin;
+-
+-
+- GST_LOG ("Registering encoders");
+-
+- in_plugin = av_codec_next (NULL);
+- while (in_plugin) {
+- gchar *type_name;
+-
+- /* Skip non-AV codecs */
+- if (in_plugin->type != AVMEDIA_TYPE_AUDIO)
+- goto next;
+-
+- /* no quasi codecs, please */
+- if ((in_plugin->id >= CODEC_ID_PCM_S16LE &&
+- in_plugin->id <= CODEC_ID_PCM_BLURAY)) {
+- goto next;
+- }
+-
+- /* No encoders depending on external libraries (we don't build them, but
+- * people who build against an external ffmpeg might have them.
+- * We have native gstreamer plugins for all of those libraries anyway. */
+- if (!strncmp (in_plugin->name, "lib", 3)) {
+- GST_DEBUG
+- ("Not using external library encoder %s. Use the gstreamer-native ones instead.",
+- in_plugin->name);
+- goto next;
+- }
+-
+- /* only encoders */
+- if (!in_plugin->encode) {
+- goto next;
+- }
+-
+- /* FIXME : We should have a method to know cheaply whether we have a mapping
+- * for the given plugin or not */
+-
+- GST_DEBUG ("Trying plugin %s [%s]", in_plugin->name, in_plugin->long_name);
+-
+- /* no codecs for which we're GUARANTEED to have better alternatives */
+- if (!strcmp (in_plugin->name, "vorbis")
+- || !strcmp (in_plugin->name, "flac")) {
+- GST_LOG ("Ignoring encoder %s", in_plugin->name);
+- goto next;
+- }
+-
+- /* construct the type */
+- type_name = g_strdup_printf ("avenc_%s", in_plugin->name);
+-
+- type = g_type_from_name (type_name);
+-
+- if (!type) {
+-
+- /* create the glib type now */
+- type = g_type_register_static (GST_TYPE_ELEMENT, type_name, &typeinfo, 0);
+- g_type_set_qdata (type, GST_FFENC_PARAMS_QDATA, (gpointer) in_plugin);
+-
+- {
+- static const GInterfaceInfo preset_info = {
+- NULL,
+- NULL,
+- NULL
+- };
+- g_type_add_interface_static (type, GST_TYPE_PRESET, &preset_info);
+- }
+- }
+-
+- if (!gst_element_register (plugin, type_name, GST_RANK_SECONDARY, type)) {
+- g_free (type_name);
+- return FALSE;
+- }
+-
+- g_free (type_name);
+-
+- next:
+- in_plugin = av_codec_next (in_plugin);
+- }
+-
+- GST_LOG ("Finished registering encoders");
+-
+- return TRUE;
+-}
+diff --git a/ext/libav/gstavenc.h b/ext/libav/gstavenc.h
+deleted file mode 100644
+index 019b168..0000000
+--- a/ext/libav/gstavenc.h
++++ /dev/null
+@@ -1,82 +0,0 @@
+-/* GStreamer
+- * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
+- *
+- * This library is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU Library General Public
+- * License as published by the Free Software Foundation; either
+- * version 2 of the License, or (at your option) any later version.
+- *
+- * This library is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * Library General Public License for more details.
+- *
+- * You should have received a copy of the GNU Library General Public
+- * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
+- */
+-
+-/* First, include the header file for the plugin, to bring in the
+- * object definition and other useful things.
+- */
+-
+-#ifndef __GST_FFMPEGAUDENC_H__
+-#define __GST_FFMPEGAUDENC_H__
+-
+-G_BEGIN_DECLS
+-
+-#include <gst/base/gstadapter.h>
+-
+-typedef struct _GstFFMpegAudEnc GstFFMpegAudEnc;
+-
+-struct _GstFFMpegAudEnc
+-{
+- GstElement element;
+-
+- /* We need to keep track of our pads, so we do so here. */
+- GstPad *srcpad;
+- GstPad *sinkpad;
+-
+- AVCodecContext *context;
+- gboolean opened;
+- GstClockTime adapter_ts;
+- guint64 adapter_consumed;
+- GstAdapter *adapter;
+- gboolean discont;
+-
+- /* cache */
+- gint bitrate;
+- gint buffer_size;
+- gint rtp_payload_size;
+-
+- /* other settings are copied over straight,
+- * include a context here, rather than copy-and-past it from avcodec.h */
+- AVCodecContext config;
+-};
+-
+-typedef struct _GstFFMpegAudEncClass GstFFMpegAudEncClass;
+-
+-struct _GstFFMpegAudEncClass
+-{
+- GstElementClass parent_class;
+-
+- AVCodec *in_plugin;
+- GstPadTemplate *srctempl, *sinktempl;
+- GstCaps *sinkcaps;
+-};
+-
+-#define GST_TYPE_FFMPEGAUDENC \
+- (gst_ffmpegaudenc_get_type())
+-#define GST_FFMPEGAUDENC(obj) \
+- (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGAUDENC,GstFFMpegAudEnc))
+-#define GST_FFMPEGAUDENC_CLASS(klass) \
+- (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGAUDENC,GstFFMpegAudEncClass))
+-#define GST_IS_FFMPEGAUDENC(obj) \
+- (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGAUDENC))
+-#define GST_IS_FFMPEGAUDENC_CLASS(klass) \
+- (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGAUDENC))
+-
+-G_END_DECLS
+-
+-#endif /* __GST_FFMPEGAUDENC_H__ */
+diff --git a/ext/libav/gstavmux.c b/ext/libav/gstavmux.c
+index d9e8969..b302270 100644
+--- a/ext/libav/gstavmux.c
++++ b/ext/libav/gstavmux.c
+@@ -13,8 +13,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+@@ -24,12 +24,14 @@
+ #include <string.h>
+
+ #include <libavformat/avformat.h>
++#include <libavutil/opt.h>
+ #include <gst/gst.h>
+ #include <gst/base/gstcollectpads.h>
+
+ #include "gstav.h"
+ #include "gstavcodecmap.h"
+ #include "gstavutils.h"
++#include "gstavprotocol.h"
+
+ typedef struct _GstFFMpegMux GstFFMpegMux;
+ typedef struct _GstFFMpegMuxPad GstFFMpegMuxPad;
+@@ -57,8 +59,8 @@ struct _GstFFMpegMux
+ /*< private > */
+ /* event_function is the collectpads default eventfunction */
+ GstPadEventFunction event_function;
+- int preload;
+ int max_delay;
++ int preload;
+ };
+
+ typedef struct _GstFFMpegMuxClass GstFFMpegMuxClass;
+@@ -89,12 +91,6 @@ enum
+
+ enum
+ {
+- ARG_0,
+- /* FILL ME */
+-};
+-
+-enum
+-{
+ PROP_0,
+ PROP_PRELOAD,
+ PROP_MAXDELAY
+@@ -301,8 +297,8 @@ gst_ffmpegmux_class_init (GstFFMpegMuxClass * klass)
+
+ g_object_class_install_property (gobject_class, PROP_PRELOAD,
+ g_param_spec_int ("preload", "preload",
+- "Set the initial demux-decode delay (in microseconds)", 0, G_MAXINT,
+- 0, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
++ "Set the initial demux-decode delay (in microseconds)",
++ 0, G_MAXINT, 0, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_MAXDELAY,
+ g_param_spec_int ("maxdelay", "maxdelay",
+@@ -329,17 +325,13 @@ gst_ffmpegmux_init (GstFFMpegMux * ffmpegmux, GstFFMpegMuxClass * g_class)
+ gst_collect_pads_set_function (ffmpegmux->collect,
+ (GstCollectPadsFunction) gst_ffmpegmux_collected, ffmpegmux);
+
+- ffmpegmux->context = g_new0 (AVFormatContext, 1);
++ ffmpegmux->context = avformat_alloc_context ();
+ ffmpegmux->context->oformat = oclass->in_plugin;
+ ffmpegmux->context->nb_streams = 0;
+- g_snprintf (ffmpegmux->context->filename,
+- sizeof (ffmpegmux->context->filename),
+- "gstreamer://%p", ffmpegmux->srcpad);
+ ffmpegmux->opened = FALSE;
+
+ ffmpegmux->videopads = 0;
+ ffmpegmux->audiopads = 0;
+- ffmpegmux->preload = 0;
+ ffmpegmux->max_delay = 0;
+ }
+
+@@ -391,7 +383,9 @@ gst_ffmpegmux_finalize (GObject * object)
+ {
+ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) object;
+
+- g_free (ffmpegmux->context);
++ avformat_free_context (ffmpegmux->context);
++ ffmpegmux->context = NULL;
++
+ gst_object_unref (ffmpegmux->collect);
+
+ if (G_OBJECT_CLASS (parent_class)->finalize)
+@@ -445,10 +439,10 @@ gst_ffmpegmux_request_new_pad (GstElement * element,
+ gst_element_add_pad (element, pad);
+
+ /* AVStream needs to be created */
+- st = av_new_stream (ffmpegmux->context, collect_pad->padnum);
++ st = avformat_new_stream (ffmpegmux->context, NULL);
++ st->id = collect_pad->padnum;
+ st->codec->codec_type = type;
+ st->codec->codec_id = CODEC_ID_NONE; /* this is a check afterwards */
+- st->stream_copy = 1; /* we're not the actual encoder */
+ st->codec->bit_rate = bitrate;
+ st->codec->frame_size = framesize;
+ /* we fill in codec during capsnego */
+@@ -480,7 +474,7 @@ gst_ffmpegmux_setcaps (GstPad * pad, GstCaps * caps)
+ collect_pad = (GstFFMpegMuxPad *) gst_pad_get_element_private (pad);
+
+ st = ffmpegmux->context->streams[collect_pad->padnum];
+- ffmpegmux->context->preload = ffmpegmux->preload;
++ av_opt_set_int (&ffmpegmux->context, "preload", ffmpegmux->preload, 0);
+ ffmpegmux->context->max_delay = ffmpegmux->max_delay;
+
+ /* for the format-specific guesses, we'll go to
+@@ -554,7 +548,7 @@ gst_ffmpegmux_collected (GstCollectPads * pads, gpointer user_data)
+
+ /* open "file" (gstreamer protocol to next element) */
+ if (!ffmpegmux->opened) {
+- int open_flags = URL_WRONLY;
++ int open_flags = AVIO_FLAG_WRITE;
+
+ /* we do need all streams to have started capsnego,
+ * or things will go horribly wrong */
+@@ -648,21 +642,15 @@ gst_ffmpegmux_collected (GstCollectPads * pads, gpointer user_data)
+ open_flags |= GST_FFMPEG_URL_STREAMHEADER;
+ }
+
+- if (url_fopen (&ffmpegmux->context->pb,
+- ffmpegmux->context->filename, open_flags) < 0) {
++ if (gst_ffmpegdata_open (ffmpegmux->srcpad, open_flags,
++ &ffmpegmux->context->pb) < 0) {
+ GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, TOO_LAZY, (NULL),
+ ("Failed to open stream context in avmux"));
+ return GST_FLOW_ERROR;
+ }
+
+- if (av_set_parameters (ffmpegmux->context, NULL) < 0) {
+- GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, INIT, (NULL),
+- ("Failed to initialize muxer"));
+- return GST_FLOW_ERROR;
+- }
+-
+ /* now open the mux format */
+- if (av_write_header (ffmpegmux->context) < 0) {
++ if (avformat_write_header (ffmpegmux->context, NULL) < 0) {
+ GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, SETTINGS, (NULL),
+ ("Failed to write file header - check codec settings"));
+ return GST_FLOW_ERROR;
+@@ -672,7 +660,7 @@ gst_ffmpegmux_collected (GstCollectPads * pads, gpointer user_data)
+ ffmpegmux->opened = TRUE;
+
+ /* flush the header so it will be used as streamheader */
+- put_flush_packet (ffmpegmux->context->pb);
++ avio_flush (ffmpegmux->context->pb);
+ }
+
+ /* take the one with earliest timestamp,
+@@ -779,8 +767,8 @@ gst_ffmpegmux_collected (GstCollectPads * pads, gpointer user_data)
+ /* close down */
+ av_write_trailer (ffmpegmux->context);
+ ffmpegmux->opened = FALSE;
+- put_flush_packet (ffmpegmux->context->pb);
+- url_fclose (ffmpegmux->context->pb);
++ avio_flush (ffmpegmux->context->pb);
++ gst_ffmpegdata_close (ffmpegmux->context->pb);
+ gst_pad_push_event (ffmpegmux->srcpad, gst_event_new_eos ());
+ return GST_FLOW_EOS;
+ }
+@@ -818,7 +806,7 @@ gst_ffmpegmux_change_state (GstElement * element, GstStateChange transition)
+ gst_tag_setter_reset_tags (GST_TAG_SETTER (ffmpegmux));
+ if (ffmpegmux->opened) {
+ ffmpegmux->opened = FALSE;
+- url_fclose (ffmpegmux->context->pb);
++ gst_ffmpegdata_close (ffmpegmux->context->pb);
+ }
+ break;
+ case GST_STATE_CHANGE_READY_TO_NULL:
+diff --git a/ext/libav/gstavpipe.h b/ext/libav/gstavpipe.h
+deleted file mode 100644
+index 5ded77f..0000000
+--- a/ext/libav/gstavpipe.h
++++ /dev/null
+@@ -1,72 +0,0 @@
+-/* GStreamer
+- * Copyright (C) <2006> Mark Nauwelaerts <manauw@skynet.be>
+- *
+- * This library is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU Library General Public
+- * License as published by the Free Software Foundation; either
+- * version 2 of the License, or (at your option) any later version.
+- *
+- * This library is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * Library General Public License for more details.
+- *
+- * You should have received a copy of the GNU Library General Public
+- * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
+- */
+-
+-
+-#ifndef __GST_FFMPEGPIPE_H__
+-#define __GST_FFMPEGPIPE_H__
+-
+-#include <gst/base/gstadapter.h>
+-#include "gstav.h"
+-
+-G_BEGIN_DECLS
+-
+-/* pipe protocol helpers */
+-#define GST_FFMPEG_PIPE_MUTEX_LOCK(m) G_STMT_START { \
+- GST_LOG_OBJECT (m, "locking tlock from thread %p", g_thread_self ()); \
+- g_mutex_lock (&m->tlock); \
+- GST_LOG_OBJECT (m, "locked tlock from thread %p", g_thread_self ()); \
+-} G_STMT_END
+-
+-#define GST_FFMPEG_PIPE_MUTEX_UNLOCK(m) G_STMT_START { \
+- GST_LOG_OBJECT (m, "unlocking tlock from thread %p", g_thread_self ()); \
+- g_mutex_unlock (&m->tlock); \
+-} G_STMT_END
+-
+-#define GST_FFMPEG_PIPE_WAIT(m) G_STMT_START { \
+- GST_LOG_OBJECT (m, "thread %p waiting", g_thread_self ()); \
+- g_cond_wait (&m->cond, &m->tlock); \
+-} G_STMT_END
+-
+-#define GST_FFMPEG_PIPE_SIGNAL(m) G_STMT_START { \
+- GST_LOG_OBJECT (m, "signalling from thread %p", g_thread_self ()); \
+- g_cond_signal (&m->cond); \
+-} G_STMT_END
+-
+-typedef struct _GstFFMpegPipe GstFFMpegPipe;
+-
+-struct _GstFFMpegPipe
+-{
+- /* lock for syncing */
+- GMutex tlock;
+- /* with TLOCK */
+- /* signals counterpart thread to have a look */
+- GCond cond;
+- /* seen eos */
+- gboolean eos;
+- /* flowreturn obtained by src task */
+- GstFlowReturn srcresult;
+- /* adpater collecting data */
+- GstAdapter *adapter;
+- /* amount needed in adapter by src task */
+- guint needed;
+-};
+-
+-G_END_DECLS
+-
+-#endif /* __GST_FFMPEGPIPE_H__ */
+diff --git a/ext/libav/gstavprotocol.c b/ext/libav/gstavprotocol.c
+index 9c4b052..5d01eaa 100644
+--- a/ext/libav/gstavprotocol.c
++++ b/ext/libav/gstavprotocol.c
+@@ -14,8 +14,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+@@ -29,7 +29,7 @@
+ #include <gst/gst.h>
+
+ #include "gstav.h"
+-#include "gstavpipe.h"
++#include "gstavprotocol.h"
+
+ typedef struct _GstProtocolInfo GstProtocolInfo;
+
+@@ -43,63 +43,14 @@ struct _GstProtocolInfo
+ };
+
+ static int
+-gst_ffmpegdata_open (URLContext * h, const char *filename, int flags)
+-{
+- GstProtocolInfo *info;
+- GstPad *pad;
+-
+- GST_LOG ("Opening %s", filename);
+-
+- info = g_new0 (GstProtocolInfo, 1);
+-
+- info->set_streamheader = flags & GST_FFMPEG_URL_STREAMHEADER;
+- flags &= ~GST_FFMPEG_URL_STREAMHEADER;
+- h->flags &= ~GST_FFMPEG_URL_STREAMHEADER;
+-
+- /* we don't support R/W together */
+- if (flags != URL_RDONLY && flags != URL_WRONLY) {
+- GST_WARNING ("Only read-only or write-only are supported");
+- return -EINVAL;
+- }
+-
+- if (sscanf (&filename[12], "%p", &pad) != 1) {
+- GST_WARNING ("could not decode pad from %s", filename);
+- return -EIO;
+- }
+-
+- /* make sure we're a pad and that we're of the right type */
+- g_return_val_if_fail (GST_IS_PAD (pad), -EINVAL);
+-
+- switch (flags) {
+- case URL_RDONLY:
+- g_return_val_if_fail (GST_PAD_IS_SINK (pad), -EINVAL);
+- break;
+- case URL_WRONLY:
+- g_return_val_if_fail (GST_PAD_IS_SRC (pad), -EINVAL);
+- break;
+- }
+-
+- info->eos = FALSE;
+- info->pad = pad;
+- info->offset = 0;
+-
+- h->priv_data = (void *) info;
+- h->is_streamed = FALSE;
+- h->max_packet_size = 0;
+-
+- return 0;
+-}
+-
+-static int
+-gst_ffmpegdata_peek (URLContext * h, unsigned char *buf, int size)
++gst_ffmpegdata_peek (void *priv_data, unsigned char *buf, int size)
+ {
+ GstProtocolInfo *info;
+ GstBuffer *inbuf = NULL;
+ GstFlowReturn ret;
+ int total = 0;
+
+- g_return_val_if_fail (h->flags == URL_RDONLY, AVERROR (EIO));
+- info = (GstProtocolInfo *) h->priv_data;
++ info = (GstProtocolInfo *) priv_data;
+
+ GST_DEBUG ("Pulling %d bytes at position %" G_GUINT64_FORMAT, size,
+ info->offset);
+@@ -131,17 +82,17 @@ gst_ffmpegdata_peek (URLContext * h, unsigned char *buf, int size)
+ }
+
+ static int
+-gst_ffmpegdata_read (URLContext * h, unsigned char *buf, int size)
++gst_ffmpegdata_read (void *priv_data, unsigned char *buf, int size)
+ {
+ gint res;
+ GstProtocolInfo *info;
+
+- info = (GstProtocolInfo *) h->priv_data;
++ info = (GstProtocolInfo *) priv_data;
+
+ GST_DEBUG ("Reading %d bytes of data at position %" G_GUINT64_FORMAT, size,
+ info->offset);
+
+- res = gst_ffmpegdata_peek (h, buf, size);
++ res = gst_ffmpegdata_peek (priv_data, buf, size);
+ if (res >= 0)
+ info->offset += res;
+
+@@ -151,15 +102,13 @@ gst_ffmpegdata_read (URLContext * h, unsigned char *buf, int size)
+ }
+
+ static int
+-gst_ffmpegdata_write (URLContext * h, const unsigned char *buf, int size)
++gst_ffmpegdata_write (void *priv_data, uint8_t * buf, int size)
+ {
+ GstProtocolInfo *info;
+ GstBuffer *outbuf;
+
+ GST_DEBUG ("Writing %d bytes", size);
+- info = (GstProtocolInfo *) h->priv_data;
+-
+- g_return_val_if_fail (h->flags != URL_RDONLY, -EIO);
++ info = (GstProtocolInfo *) priv_data;
+
+ /* create buffer and push data further */
+ outbuf = gst_buffer_new_and_alloc (size);
+@@ -174,7 +123,7 @@ gst_ffmpegdata_write (URLContext * h, const unsigned char *buf, int size)
+ }
+
+ static int64_t
+-gst_ffmpegdata_seek (URLContext * h, int64_t pos, int whence)
++gst_ffmpegdata_seek (void *priv_data, int64_t pos, int whence)
+ {
+ GstProtocolInfo *info;
+ guint64 newpos = 0, oldpos;
+@@ -182,76 +131,68 @@ gst_ffmpegdata_seek (URLContext * h, int64_t pos, int whence)
+ GST_DEBUG ("Seeking to %" G_GINT64_FORMAT ", whence=%d",
+ (gint64) pos, whence);
+
+- info = (GstProtocolInfo *) h->priv_data;
++ info = (GstProtocolInfo *) priv_data;
+
+ /* TODO : if we are push-based, we need to return sensible info */
+
+- switch (h->flags) {
+- case URL_RDONLY:
+- {
+- /* sinkpad */
+- switch (whence) {
+- case SEEK_SET:
+- newpos = (guint64) pos;
+- break;
+- case SEEK_CUR:
+- newpos = info->offset + pos;
+- break;
+- case SEEK_END:
+- case AVSEEK_SIZE:
+- /* ffmpeg wants to know the current end position in bytes ! */
+- {
+- gint64 duration;
+-
+- GST_DEBUG ("Seek end");
+-
+- if (gst_pad_is_linked (info->pad))
+- if (gst_pad_query_duration (GST_PAD_PEER (info->pad),
+- GST_FORMAT_BYTES, &duration))
+- newpos = ((guint64) duration) + pos;
+- }
+- break;
+- default:
+- g_assert (0);
+- break;
++ if (GST_PAD_IS_SINK (info->pad)) {
++ /* sinkpad */
++ switch (whence) {
++ case SEEK_SET:
++ newpos = (guint64) pos;
++ break;
++ case SEEK_CUR:
++ newpos = info->offset + pos;
++ break;
++ case SEEK_END:
++ case AVSEEK_SIZE:
++ /* ffmpeg wants to know the current end position in bytes ! */
++ {
++ gint64 duration;
++
++ GST_DEBUG ("Seek end");
++
++ if (gst_pad_is_linked (info->pad))
++ if (gst_pad_query_duration (GST_PAD_PEER (info->pad),
++ GST_FORMAT_BYTES, &duration))
++ newpos = ((guint64) duration) + pos;
+ }
+- /* FIXME : implement case for push-based behaviour */
+- if (whence != AVSEEK_SIZE)
+- info->offset = newpos;
++ break;
++ default:
++ g_assert (0);
++ break;
+ }
+- break;
+- case URL_WRONLY:
+- {
+- GstSegment segment;
+-
+- oldpos = info->offset;
+-
+- /* srcpad */
+- switch (whence) {
+- case SEEK_SET:
+- {
+- info->offset = (guint64) pos;
+- break;
+- }
+- case SEEK_CUR:
+- info->offset += pos;
+- break;
+- default:
+- break;
++ /* FIXME : implement case for push-based behaviour */
++ if (whence != AVSEEK_SIZE)
++ info->offset = newpos;
++ } else if (GST_PAD_IS_SRC (info->pad)) {
++ GstSegment segment;
++
++ oldpos = info->offset;
++
++ /* srcpad */
++ switch (whence) {
++ case SEEK_SET:
++ {
++ info->offset = (guint64) pos;
++ break;
+ }
+- newpos = info->offset;
++ case SEEK_CUR:
++ info->offset += pos;
++ break;
++ default:
++ break;
++ }
++ newpos = info->offset;
+
+- if (newpos != oldpos) {
+- gst_segment_init (&segment, GST_FORMAT_BYTES);
+- segment.start = newpos;
+- segment.time = newpos;
+- gst_pad_push_event (info->pad, gst_event_new_segment (&segment));
+- }
+- break;
++ if (newpos != oldpos) {
++ gst_segment_init (&segment, GST_FORMAT_BYTES);
++ segment.start = newpos;
++ segment.time = newpos;
++ gst_pad_push_event (info->pad, gst_event_new_segment (&segment));
+ }
+- default:
+- g_assert (0);
+- break;
++ } else {
++ g_assert_not_reached ();
+ }
+
+ GST_DEBUG ("Now at offset %" G_GUINT64_FORMAT " (returning %" G_GUINT64_FORMAT
+@@ -259,84 +200,90 @@ gst_ffmpegdata_seek (URLContext * h, int64_t pos, int whence)
+ return newpos;
+ }
+
+-static int
+-gst_ffmpegdata_close (URLContext * h)
++int
++gst_ffmpegdata_close (AVIOContext * h)
+ {
+ GstProtocolInfo *info;
+
+- info = (GstProtocolInfo *) h->priv_data;
++ info = (GstProtocolInfo *) h->opaque;
+ if (info == NULL)
+ return 0;
+
+ GST_LOG ("Closing file");
+
+- switch (h->flags) {
+- case URL_WRONLY:
+- {
+- /* send EOS - that closes down the stream */
+- gst_pad_push_event (info->pad, gst_event_new_eos ());
+- break;
+- }
+- default:
+- break;
++ if (GST_PAD_IS_SRC (info->pad)) {
++ /* send EOS - that closes down the stream */
++ gst_pad_push_event (info->pad, gst_event_new_eos ());
+ }
+
+ /* clean up data */
+ g_free (info);
+- h->priv_data = NULL;
++ h->opaque = NULL;
++
++ av_freep (&h->buffer);
++ av_free (h);
+
+ return 0;
+ }
+
++int
++gst_ffmpegdata_open (GstPad * pad, int flags, AVIOContext ** context)
++{
++ GstProtocolInfo *info;
++ static const int buffer_size = 4096;
++ unsigned char *buffer = NULL;
+
+-URLProtocol gstreamer_protocol = {
+- /*.name = */ "gstreamer",
+- /*.url_open = */ gst_ffmpegdata_open,
+- /*.url_read = */ gst_ffmpegdata_read,
+- /*.url_write = */ gst_ffmpegdata_write,
+- /*.url_seek = */ gst_ffmpegdata_seek,
+- /*.url_close = */ gst_ffmpegdata_close,
+-};
++ info = g_new0 (GstProtocolInfo, 1);
+
++ info->set_streamheader = flags & GST_FFMPEG_URL_STREAMHEADER;
++ flags &= ~GST_FFMPEG_URL_STREAMHEADER;
+
+-/* specialized protocol for cross-thread pushing,
+- * based on ffmpeg's pipe protocol */
++ /* we don't support R/W together */
++ if ((flags & AVIO_FLAG_WRITE) && (flags & AVIO_FLAG_READ)) {
++ GST_WARNING ("Only read-only or write-only are supported");
++ return -EINVAL;
++ }
+
+-static int
+-gst_ffmpeg_pipe_open (URLContext * h, const char *filename, int flags)
+-{
+- GstFFMpegPipe *ffpipe;
++ /* make sure we're a pad and that we're of the right type */
++ g_return_val_if_fail (GST_IS_PAD (pad), -EINVAL);
+
+- GST_LOG ("Opening %s", filename);
++ if ((flags & AVIO_FLAG_READ))
++ g_return_val_if_fail (GST_PAD_IS_SINK (pad), -EINVAL);
++ if ((flags & AVIO_FLAG_WRITE))
++ g_return_val_if_fail (GST_PAD_IS_SRC (pad), -EINVAL);
+
+- /* we don't support W together */
+- if (flags != URL_RDONLY) {
+- GST_WARNING ("Only read-only is supported");
+- return -EINVAL;
+- }
++ info->eos = FALSE;
++ info->pad = pad;
++ info->offset = 0;
+
+- if (sscanf (&filename[10], "%p", &ffpipe) != 1) {
+- GST_WARNING ("could not decode pipe info from %s", filename);
+- return -EIO;
++ buffer = av_malloc (buffer_size);
++ if (buffer == NULL) {
++ GST_WARNING ("Failed to allocate buffer");
++ return -ENOMEM;
+ }
+
+- /* sanity check */
+- g_return_val_if_fail (GST_IS_ADAPTER (ffpipe->adapter), -EINVAL);
+-
+- h->priv_data = (void *) ffpipe;
+- h->is_streamed = TRUE;
+- h->max_packet_size = 0;
++ *context =
++ avio_alloc_context (buffer, buffer_size, flags, (void *) info,
++ gst_ffmpegdata_read, gst_ffmpegdata_write, gst_ffmpegdata_seek);
++ (*context)->seekable = AVIO_SEEKABLE_NORMAL;
++ if (!(flags & AVIO_FLAG_WRITE)) {
++ (*context)->buf_ptr = (*context)->buf_end;
++ (*context)->write_flag = 0;
++ }
+
+ return 0;
+ }
+
++/* specialized protocol for cross-thread pushing,
++ * based on ffmpeg's pipe protocol */
++
+ static int
+-gst_ffmpeg_pipe_read (URLContext * h, unsigned char *buf, int size)
++gst_ffmpeg_pipe_read (void *priv_data, uint8_t * buf, int size)
+ {
+ GstFFMpegPipe *ffpipe;
+ guint available;
+
+- ffpipe = (GstFFMpegPipe *) h->priv_data;
++ ffpipe = (GstFFMpegPipe *) priv_data;
+
+ GST_LOG ("requested size %d", size);
+
+@@ -366,21 +313,38 @@ gst_ffmpeg_pipe_read (URLContext * h, unsigned char *buf, int size)
+ return size;
+ }
+
+-static int
+-gst_ffmpeg_pipe_close (URLContext * h)
++int
++gst_ffmpeg_pipe_close (AVIOContext * h)
+ {
+ GST_LOG ("Closing pipe");
+
+- h->priv_data = NULL;
++ h->opaque = NULL;
++ av_freep (&h->buffer);
++ av_free (h);
+
+ return 0;
+ }
+
+-URLProtocol gstpipe_protocol = {
+- "gstpipe",
+- gst_ffmpeg_pipe_open,
+- gst_ffmpeg_pipe_read,
+- NULL,
+- NULL,
+- gst_ffmpeg_pipe_close,
+-};
++int
++gst_ffmpeg_pipe_open (GstFFMpegPipe * ffpipe, int flags, AVIOContext ** context)
++{
++ static const int buffer_size = 4096;
++ unsigned char *buffer = NULL;
++
++ /* sanity check */
++ g_return_val_if_fail (GST_IS_ADAPTER (ffpipe->adapter), -EINVAL);
++
++ buffer = av_malloc (buffer_size);
++ if (buffer == NULL) {
++ GST_WARNING ("Failed to allocate buffer");
++ return -ENOMEM;
++ }
++
++ *context =
++ avio_alloc_context (buffer, buffer_size, 0, (void *) ffpipe,
++ gst_ffmpeg_pipe_read, NULL, NULL);
++ (*context)->seekable = 0;
++ (*context)->buf_ptr = (*context)->buf_end;
++
++ return 0;
++}
+diff --git a/ext/libav/gstavprotocol.h b/ext/libav/gstavprotocol.h
+new file mode 100644
+index 0000000..f4b2ba8
+--- /dev/null
++++ b/ext/libav/gstavprotocol.h
+@@ -0,0 +1,78 @@
++/* GStreamer
++ * Copyright (C) <2006> Mark Nauwelaerts <manauw@skynet.be>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Library General Public
++ * License as published by the Free Software Foundation; either
++ * version 2 of the License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Library General Public License for more details.
++ *
++ * You should have received a copy of the GNU Library General Public
++ * License along with this library; if not, write to the
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
++ */
++
++
++#ifndef __GST_FFMPEGPROTOCOL_H__
++#define __GST_FFMPEGPROTOCOL_H__
++
++#include <gst/base/gstadapter.h>
++#include "gstav.h"
++
++G_BEGIN_DECLS
++
++/* pipe protocol helpers */
++#define GST_FFMPEG_PIPE_MUTEX_LOCK(m) G_STMT_START { \
++ GST_LOG_OBJECT (m, "locking tlock from thread %p", g_thread_self ()); \
++ g_mutex_lock (&m->tlock); \
++ GST_LOG_OBJECT (m, "locked tlock from thread %p", g_thread_self ()); \
++} G_STMT_END
++
++#define GST_FFMPEG_PIPE_MUTEX_UNLOCK(m) G_STMT_START { \
++ GST_LOG_OBJECT (m, "unlocking tlock from thread %p", g_thread_self ()); \
++ g_mutex_unlock (&m->tlock); \
++} G_STMT_END
++
++#define GST_FFMPEG_PIPE_WAIT(m) G_STMT_START { \
++ GST_LOG_OBJECT (m, "thread %p waiting", g_thread_self ()); \
++ g_cond_wait (&m->cond, &m->tlock); \
++} G_STMT_END
++
++#define GST_FFMPEG_PIPE_SIGNAL(m) G_STMT_START { \
++ GST_LOG_OBJECT (m, "signalling from thread %p", g_thread_self ()); \
++ g_cond_signal (&m->cond); \
++} G_STMT_END
++
++typedef struct _GstFFMpegPipe GstFFMpegPipe;
++
++struct _GstFFMpegPipe
++{
++ /* lock for syncing */
++ GMutex tlock;
++ /* with TLOCK */
++ /* signals counterpart thread to have a look */
++ GCond cond;
++ /* seen eos */
++ gboolean eos;
++ /* flowreturn obtained by src task */
++ GstFlowReturn srcresult;
++ /* adpater collecting data */
++ GstAdapter *adapter;
++ /* amount needed in adapter by src task */
++ guint needed;
++};
++
++int gst_ffmpeg_pipe_open (GstFFMpegPipe *ffpipe, int flags, AVIOContext ** context);
++int gst_ffmpeg_pipe_close (AVIOContext * h);
++
++int gst_ffmpegdata_open (GstPad * pad, int flags, AVIOContext ** context);
++int gst_ffmpegdata_close (AVIOContext * h);
++
++G_END_DECLS
++
++#endif /* __GST_FFMPEGPROTOCOL_H__ */
+diff --git a/ext/libav/gstavutils.c b/ext/libav/gstavutils.c
+index f7a80f6..403a6b6 100644
+--- a/ext/libav/gstavutils.c
++++ b/ext/libav/gstavutils.c
+@@ -13,8 +13,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+@@ -25,6 +25,11 @@
+ #ifdef __APPLE__
+ #include <sys/sysctl.h>
+ #endif
++#ifdef __MINGW32__
++#include <stdlib.h>
++#endif
++
++#include <libavutil/mem.h>
+
+ G_CONST_RETURN gchar *
+ gst_ffmpeg_get_codecid_longname (enum CodecID codec_id)
+@@ -44,16 +49,21 @@ av_smp_format_depth (enum AVSampleFormat smp_fmt)
+ gint depth = -1;
+ switch (smp_fmt) {
+ case AV_SAMPLE_FMT_U8:
++ case AV_SAMPLE_FMT_U8P:
+ depth = 1;
+ break;
+ case AV_SAMPLE_FMT_S16:
++ case AV_SAMPLE_FMT_S16P:
+ depth = 2;
+ break;
+ case AV_SAMPLE_FMT_S32:
++ case AV_SAMPLE_FMT_S32P:
+ case AV_SAMPLE_FMT_FLT:
++ case AV_SAMPLE_FMT_FLTP:
+ depth = 4;
+ break;
+ case AV_SAMPLE_FMT_DBL:
++ case AV_SAMPLE_FMT_DBLP:
+ depth = 8;
+ break;
+ default:
+@@ -476,7 +486,6 @@ gst_ffmpeg_auto_max_threads (void)
+ if (n < 1)
+ n = 1;
+
+- GST_INFO ("threads: %d", n);
+ g_once_init_leave (&n_threads, n);
+ }
+
+diff --git a/ext/libav/gstavutils.h b/ext/libav/gstavutils.h
+index ebe49fb..97415eb 100644
+--- a/ext/libav/gstavutils.h
++++ b/ext/libav/gstavutils.h
+@@ -13,8 +13,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifndef __GST_FFMPEG_UTILS_H__
+diff --git a/ext/libav/gstavviddec.c b/ext/libav/gstavviddec.c
+index 7675a71..f2b5b38 100644
+--- a/ext/libav/gstavviddec.c
++++ b/ext/libav/gstavviddec.c
+@@ -13,8 +13,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+@@ -35,78 +35,17 @@
+ #include "gstav.h"
+ #include "gstavcodecmap.h"
+ #include "gstavutils.h"
++#include "gstavviddec.h"
+
+ GST_DEBUG_CATEGORY_EXTERN (GST_CAT_PERFORMANCE);
+
+-typedef struct _GstFFMpegVidDec GstFFMpegVidDec;
+-
+ #define MAX_TS_MASK 0xff
+
+-struct _GstFFMpegVidDec
+-{
+- GstVideoDecoder parent;
+-
+- GstVideoCodecState *input_state;
+- GstVideoCodecState *output_state;
+-
+- /* decoding */
+- AVCodecContext *context;
+- AVFrame *picture;
+- gboolean opened;
+-
+- /* current context */
+- enum PixelFormat ctx_pix_fmt;
+- gint ctx_width;
+- gint ctx_height;
+- gint ctx_par_n;
+- gint ctx_par_d;
+- gint ctx_ticks;
+- gint ctx_time_d;
+- gint ctx_time_n;
+- gint ctx_interlaced;
+-
+- guint8 *padded;
+- guint padded_size;
+-
+- gboolean current_dr; /* if direct rendering is enabled */
+-
+- /* some properties */
+- enum AVDiscard skip_frame;
+- gint lowres;
+- gboolean direct_rendering;
+- gboolean debug_mv;
+- int max_threads;
+-
+- gboolean is_realvideo;
+-
+- GstCaps *last_caps;
+-};
+-
+-typedef struct _GstFFMpegVidDecClass GstFFMpegVidDecClass;
+-
+-struct _GstFFMpegVidDecClass
+-{
+- GstVideoDecoderClass parent_class;
+-
+- AVCodec *in_plugin;
+-};
+-
+-#define GST_TYPE_FFMPEGDEC \
+- (gst_ffmpegviddec_get_type())
+-#define GST_FFMPEGDEC(obj) \
+- (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGDEC,GstFFMpegVidDec))
+-#define GST_FFMPEGVIDDEC_CLASS(klass) \
+- (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGDEC,GstFFMpegVidDecClass))
+-#define GST_IS_FFMPEGDEC(obj) \
+- (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGDEC))
+-#define GST_IS_FFMPEGVIDDEC_CLASS(klass) \
+- (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGDEC))
+-
+ #define DEFAULT_LOWRES 0
+ #define DEFAULT_SKIPFRAME 0
+ #define DEFAULT_DIRECT_RENDERING TRUE
+ #define DEFAULT_DEBUG_MV FALSE
+-#define DEFAULT_MAX_THREADS 1
++#define DEFAULT_MAX_THREADS 0
+
+ enum
+ {
+@@ -234,7 +173,12 @@ gst_ffmpegviddec_base_init (GstFFMpegVidDecClass * klass)
+ GST_DEBUG ("Couldn't get sink caps for decoder '%s'", in_plugin->name);
+ sinkcaps = gst_caps_new_empty_simple ("unknown/unknown");
+ }
+- srccaps = gst_caps_new_empty_simple ("video/x-raw");
++ srccaps = gst_ffmpeg_codectype_to_video_caps (NULL,
++ in_plugin->id, FALSE, in_plugin);
++ if (!srccaps) {
++ GST_DEBUG ("Couldn't get source caps for decoder '%s'", in_plugin->name);
++ srccaps = gst_caps_from_string ("video/x-raw");
++ }
+
+ /* pad templates */
+ sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK,
+@@ -301,8 +245,11 @@ gst_ffmpegviddec_class_init (GstFFMpegVidDecClass * klass)
+ static void
+ gst_ffmpegviddec_init (GstFFMpegVidDec * ffmpegdec)
+ {
++ GstFFMpegVidDecClass *klass =
++ (GstFFMpegVidDecClass *) G_OBJECT_GET_CLASS (ffmpegdec);
++
+ /* some ffmpeg data */
+- ffmpegdec->context = avcodec_alloc_context ();
++ ffmpegdec->context = avcodec_alloc_context3 (klass->in_plugin);
+ ffmpegdec->picture = avcodec_alloc_frame ();
+ ffmpegdec->opened = FALSE;
+ ffmpegdec->skip_frame = ffmpegdec->lowres = 0;
+@@ -321,10 +268,7 @@ gst_ffmpegviddec_finalize (GObject * object)
+ ffmpegdec->context = NULL;
+ }
+
+- if (ffmpegdec->picture != NULL) {
+- av_free (ffmpegdec->picture);
+- ffmpegdec->picture = NULL;
+- }
++ avcodec_free_frame (&ffmpegdec->picture);
+
+ G_OBJECT_CLASS (parent_class)->finalize (object);
+ }
+@@ -334,9 +278,6 @@ gst_ffmpegviddec_finalize (GObject * object)
+ static void
+ gst_ffmpegviddec_close (GstFFMpegVidDec * ffmpegdec)
+ {
+- if (!ffmpegdec->opened)
+- return;
+-
+ GST_LOG_OBJECT (ffmpegdec, "closing ffmpeg codec");
+
+ gst_caps_replace (&ffmpegdec->last_caps, NULL);
+@@ -345,10 +286,7 @@ gst_ffmpegviddec_close (GstFFMpegVidDec * ffmpegdec)
+ gst_ffmpeg_avcodec_close (ffmpegdec->context);
+ ffmpegdec->opened = FALSE;
+
+- if (ffmpegdec->context->palctrl) {
+- av_free (ffmpegdec->context->palctrl);
+- ffmpegdec->context->palctrl = NULL;
+- }
++ gst_buffer_replace (&ffmpegdec->palette, NULL);
+
+ if (ffmpegdec->context->extradata) {
+ av_free (ffmpegdec->context->extradata);
+@@ -397,6 +335,25 @@ could_not_open:
+ }
+ }
+
++static void
++gst_ffmpegviddec_get_palette (GstFFMpegVidDec * ffmpegdec,
++ GstVideoCodecState * state)
++{
++ GstStructure *str = gst_caps_get_structure (state->caps, 0);
++ const GValue *palette_v;
++ GstBuffer *palette;
++
++ /* do we have a palette? */
++ if ((palette_v = gst_structure_get_value (str, "palette_data"))) {
++ palette = gst_value_get_buffer (palette_v);
++ GST_DEBUG ("got palette data %p", palette);
++ if (gst_buffer_get_size (palette) >= AVPALETTE_SIZE) {
++ gst_buffer_replace (&ffmpegdec->palette, palette);
++ }
++ }
++}
++
++
+ static gboolean
+ gst_ffmpegviddec_set_format (GstVideoDecoder * decoder,
+ GstVideoCodecState * state)
+@@ -428,9 +385,6 @@ gst_ffmpegviddec_set_format (GstVideoDecoder * decoder,
+ gst_ffmpegviddec_drain (ffmpegdec);
+ GST_OBJECT_LOCK (ffmpegdec);
+ gst_ffmpegviddec_close (ffmpegdec);
+-
+- /* and reset the defaults that were set when a context is created */
+- avcodec_get_context_defaults (ffmpegdec->context);
+ }
+
+ gst_caps_replace (&ffmpegdec->last_caps, state->caps);
+@@ -452,6 +406,8 @@ gst_ffmpegviddec_set_format (GstVideoDecoder * decoder,
+ GST_LOG_OBJECT (ffmpegdec, "size after %dx%d", ffmpegdec->context->width,
+ ffmpegdec->context->height);
+
++ gst_ffmpegviddec_get_palette (ffmpegdec, state);
++
+ if (!ffmpegdec->context->time_base.den || !ffmpegdec->context->time_base.num) {
+ GST_DEBUG_OBJECT (ffmpegdec, "forcing 25/1 framerate");
+ ffmpegdec->context->time_base.num = 1;
+@@ -460,7 +416,7 @@ gst_ffmpegviddec_set_format (GstVideoDecoder * decoder,
+
+ /* workaround encoder bugs */
+ ffmpegdec->context->workaround_bugs |= FF_BUG_AUTODETECT;
+- ffmpegdec->context->error_recognition = 1;
++ ffmpegdec->context->err_recognition = 1;
+
+ /* for slow cpus */
+ ffmpegdec->context->lowres = ffmpegdec->lowres;
+@@ -471,30 +427,16 @@ gst_ffmpegviddec_set_format (GstVideoDecoder * decoder,
+ ffmpegdec->context->debug_mv = ffmpegdec->debug_mv;
+
+ {
+- const gchar *env = g_getenv ("GST_AVVIDDEC_MAX_THREADS");
+- int max_threads = ffmpegdec->max_threads;
+ GstQuery *query;
+ gboolean is_live;
+
+- if (env != NULL) {
+- if (g_str_equal (env, "auto"))
+- max_threads = 0;
+- else
+- max_threads = MAX (atoi (env), 0);
+-
+- if (max_threads != 1) {
+- GST_WARNING_OBJECT (ffmpegdec, "max threads forced to %d, this might "
+- "lead to decoding errors or artefacts", max_threads);
+- }
+- }
+-
+- if (max_threads == 0) {
++ if (ffmpegdec->max_threads == 0) {
+ if (!(oclass->in_plugin->capabilities & CODEC_CAP_AUTO_THREADS))
+ ffmpegdec->context->thread_count = gst_ffmpeg_auto_max_threads ();
+ else
+ ffmpegdec->context->thread_count = 0;
+ } else
+- ffmpegdec->context->thread_count = max_threads;
++ ffmpegdec->context->thread_count = ffmpegdec->max_threads;
+
+ query = gst_query_new_latency ();
+ is_live = FALSE;
+@@ -505,11 +447,10 @@ gst_ffmpegviddec_set_format (GstVideoDecoder * decoder,
+ }
+ gst_query_unref (query);
+
+- /* Slice based threading is broken in libav 0.8 */
+ if (is_live)
+- ffmpegdec->context->thread_type = 0; /* FF_THREAD_SLICE */
++ ffmpegdec->context->thread_type = FF_THREAD_SLICE;
+ else
+- ffmpegdec->context->thread_type = /* FF_THREAD_SLICE | */ FF_THREAD_FRAME;
++ ffmpegdec->context->thread_type = FF_THREAD_SLICE | FF_THREAD_FRAME;
+ }
+
+ /* open codec - we don't select an output pix_fmt yet,
+@@ -683,6 +624,7 @@ fallback:
+ duplicate_frame:
+ {
+ GST_WARNING_OBJECT (ffmpegdec, "already alloc'ed output buffer for frame");
++ gst_video_codec_frame_unref (frame);
+ return -1;
+ }
+ no_frame:
+@@ -1155,12 +1097,12 @@ gst_ffmpegviddec_video_frame (GstFFMpegVidDec * ffmpegdec,
+ /* now decode the frame */
+ gst_avpacket_init (&packet, data, size);
+
+- if (ffmpegdec->context->palctrl) {
++ if (ffmpegdec->palette) {
+ guint8 *pal;
+
+ pal = av_packet_new_side_data (&packet, AV_PKT_DATA_PALETTE,
+ AVPALETTE_SIZE);
+- memcpy (pal, ffmpegdec->context->palctrl->palette, AVPALETTE_SIZE);
++ gst_buffer_extract (ffmpegdec->palette, 0, pal, AVPALETTE_SIZE);
+ GST_DEBUG_OBJECT (ffmpegdec, "copy pal %p %p", &packet, pal);
+ }
+
+@@ -1358,10 +1300,9 @@ gst_ffmpegviddec_handle_frame (GstVideoDecoder * decoder,
+ gboolean do_padding;
+
+ GST_LOG_OBJECT (ffmpegdec,
+- "Received new data of size %u, dts %" GST_TIME_FORMAT ", pts:%"
+- GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT,
+- gst_buffer_get_size (frame->input_buffer),
+- GST_TIME_ARGS (frame->dts),
++ "Received new data of size %" G_GSIZE_FORMAT ", dts %" GST_TIME_FORMAT
++ ", pts:%" GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT,
++ gst_buffer_get_size (frame->input_buffer), GST_TIME_ARGS (frame->dts),
+ GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->duration));
+
+ if (!gst_buffer_map (frame->input_buffer, &minfo, GST_MAP_READ)) {
+@@ -1723,7 +1664,8 @@ gst_ffmpegviddec_register (GstPlugin * plugin)
+ gchar *plugin_name;
+
+ /* only video decoders */
+- if (!in_plugin->decode || in_plugin->type != AVMEDIA_TYPE_VIDEO)
++ if (!av_codec_is_decoder (in_plugin)
++ || in_plugin->type != AVMEDIA_TYPE_VIDEO)
+ goto next;
+
+ /* no quasi-codecs, please */
+diff --git a/ext/libav/gstavviddec.h b/ext/libav/gstavviddec.h
+new file mode 100644
+index 0000000..c8649c4
+--- /dev/null
++++ b/ext/libav/gstavviddec.h
+@@ -0,0 +1,93 @@
++/* GStreamer
++ * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Library General Public
++ * License as published by the Free Software Foundation; either
++ * version 2 of the License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Library General Public License for more details.
++ *
++ * You should have received a copy of the GNU Library General Public
++ * License along with this library; if not, write to the
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
++ */
++#ifndef __GST_FFMPEGVIDDEC_H__
++#define __GST_FFMPEGVIDDEC_H__
++
++G_BEGIN_DECLS
++
++#include <gst/gst.h>
++#include <gst/video/video.h>
++#include <gst/video/gstvideodecoder.h>
++#include <libavcodec/avcodec.h>
++
++typedef struct _GstFFMpegVidDec GstFFMpegVidDec;
++struct _GstFFMpegVidDec
++{
++ GstVideoDecoder parent;
++
++ GstVideoCodecState *input_state;
++ GstVideoCodecState *output_state;
++
++ /* decoding */
++ AVCodecContext *context;
++ AVFrame *picture;
++ gboolean opened;
++
++ /* current context */
++ enum PixelFormat ctx_pix_fmt;
++ gint ctx_width;
++ gint ctx_height;
++ gint ctx_par_n;
++ gint ctx_par_d;
++ gint ctx_ticks;
++ gint ctx_time_d;
++ gint ctx_time_n;
++ gint ctx_interlaced;
++ GstBuffer *palette;
++
++ guint8 *padded;
++ guint padded_size;
++
++ gboolean current_dr; /* if direct rendering is enabled */
++
++ /* some properties */
++ enum AVDiscard skip_frame;
++ gint lowres;
++ gboolean direct_rendering;
++ gboolean debug_mv;
++ int max_threads;
++
++ gboolean is_realvideo;
++
++ GstCaps *last_caps;
++};
++
++typedef struct _GstFFMpegVidDecClass GstFFMpegVidDecClass;
++
++struct _GstFFMpegVidDecClass
++{
++ GstVideoDecoderClass parent_class;
++
++ AVCodec *in_plugin;
++};
++
++#define GST_TYPE_FFMPEGDEC \
++ (gst_ffmpegviddec_get_type())
++#define GST_FFMPEGDEC(obj) \
++ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGDEC,GstFFMpegVidDec))
++#define GST_FFMPEGVIDDEC_CLASS(klass) \
++ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGDEC,GstFFMpegVidDecClass))
++#define GST_IS_FFMPEGDEC(obj) \
++ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGDEC))
++#define GST_IS_FFMPEGVIDDEC_CLASS(klass) \
++ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGDEC))
++
++G_END_DECLS
++
++#endif
+diff --git a/ext/libav/gstavvidenc.c b/ext/libav/gstavvidenc.c
+index b747613..7b24c45 100644
+--- a/ext/libav/gstavvidenc.c
++++ b/ext/libav/gstavvidenc.c
+@@ -13,8 +13,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+@@ -56,13 +56,13 @@ enum
+
+ enum
+ {
+- ARG_0,
+- ARG_BIT_RATE,
+- ARG_GOP_SIZE,
+- ARG_ME_METHOD,
+- ARG_BUFSIZE,
+- ARG_RTP_PAYLOAD_SIZE,
+- ARG_CFG_BASE
++ PROP_0,
++ PROP_BIT_RATE,
++ PROP_GOP_SIZE,
++ PROP_ME_METHOD,
++ PROP_BUFSIZE,
++ PROP_RTP_PAYLOAD_SIZE,
++ PROP_CFG_BASE
+ };
+
+ #define GST_TYPE_ME_METHOD (gst_ffmpegvidenc_me_method_get_type())
+@@ -98,6 +98,8 @@ static gboolean gst_ffmpegvidenc_set_format (GstVideoEncoder * encoder,
+ GstVideoCodecState * state);
+ static gboolean gst_ffmpegvidenc_propose_allocation (GstVideoEncoder * encoder,
+ GstQuery * query);
++static gboolean gst_ffmpegvidenc_reset (GstVideoEncoder * encoder,
++ gboolean hard);
+
+ static GstCaps *gst_ffmpegvidenc_getcaps (GstVideoEncoder * encoder,
+ GstCaps * filter);
+@@ -144,7 +146,12 @@ gst_ffmpegvidenc_base_init (GstFFMpegVidEncClass * klass)
+ srccaps = gst_caps_new_empty_simple ("unknown/unknown");
+ }
+
+- sinkcaps = gst_caps_new_empty_simple ("video/x-raw");
++ sinkcaps = gst_ffmpeg_codectype_to_video_caps (NULL,
++ in_plugin->id, TRUE, in_plugin);
++ if (!sinkcaps) {
++ GST_DEBUG ("Couldn't get sink caps for encoder '%s'", in_plugin->name);
++ sinkcaps = gst_caps_new_empty_simple ("unknown/unknown");
++ }
+
+ /* pad templates */
+ sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK,
+@@ -157,7 +164,6 @@ gst_ffmpegvidenc_base_init (GstFFMpegVidEncClass * klass)
+ klass->in_plugin = in_plugin;
+ klass->srctempl = srctempl;
+ klass->sinktempl = sinktempl;
+- klass->sinkcaps = NULL;
+
+ return;
+ }
+@@ -178,30 +184,30 @@ gst_ffmpegvidenc_class_init (GstFFMpegVidEncClass * klass)
+
+ /* FIXME: could use -1 for a sensible per-codec default based on
+ * e.g. input resolution and framerate */
+- g_object_class_install_property (G_OBJECT_CLASS (klass), ARG_BIT_RATE,
++ g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BIT_RATE,
+ g_param_spec_int ("bitrate", "Bit Rate",
+ "Target Video Bitrate", 0, G_MAXINT, DEFAULT_VIDEO_BITRATE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+- g_object_class_install_property (G_OBJECT_CLASS (klass), ARG_GOP_SIZE,
++ g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_GOP_SIZE,
+ g_param_spec_int ("gop-size", "GOP Size",
+ "Number of frames within one GOP", 0, G_MAXINT,
+ DEFAULT_VIDEO_GOP_SIZE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+- g_object_class_install_property (G_OBJECT_CLASS (klass), ARG_ME_METHOD,
++ g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_ME_METHOD,
+ g_param_spec_enum ("me-method", "ME Method", "Motion Estimation Method",
+ GST_TYPE_ME_METHOD, ME_EPZS,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+- g_object_class_install_property (G_OBJECT_CLASS (klass), ARG_BUFSIZE,
++ g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BUFSIZE,
+ g_param_spec_int ("buffer-size", "Buffer Size",
+ "Size of the video buffers", 0, G_MAXINT, 0,
+ G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (G_OBJECT_CLASS (klass),
+- ARG_RTP_PAYLOAD_SIZE, g_param_spec_int ("rtp-payload-size",
++ PROP_RTP_PAYLOAD_SIZE, g_param_spec_int ("rtp-payload-size",
+ "RTP Payload Size", "Target GOB length", 0, G_MAXINT, 0,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ /* register additional properties, possibly dependent on the exact CODEC */
+- gst_ffmpeg_cfg_install_property (klass, ARG_CFG_BASE);
++ gst_ffmpeg_cfg_install_property (klass, PROP_CFG_BASE);
+
+ venc_class->stop = gst_ffmpegvidenc_stop;
+ venc_class->finish = gst_ffmpegvidenc_finish;
+@@ -209,6 +215,7 @@ gst_ffmpegvidenc_class_init (GstFFMpegVidEncClass * klass)
+ venc_class->getcaps = gst_ffmpegvidenc_getcaps;
+ venc_class->set_format = gst_ffmpegvidenc_set_format;
+ venc_class->propose_allocation = gst_ffmpegvidenc_propose_allocation;
++ venc_class->reset = gst_ffmpegvidenc_reset;
+
+ gobject_class->finalize = gst_ffmpegvidenc_finalize;
+ }
+@@ -216,8 +223,11 @@ gst_ffmpegvidenc_class_init (GstFFMpegVidEncClass * klass)
+ static void
+ gst_ffmpegvidenc_init (GstFFMpegVidEnc * ffmpegenc)
+ {
++ GstFFMpegVidEncClass *klass =
++ (GstFFMpegVidEncClass *) G_OBJECT_GET_CLASS (ffmpegenc);
++
+ /* ffmpeg objects */
+- ffmpegenc->context = avcodec_alloc_context ();
++ ffmpegenc->context = avcodec_alloc_context3 (klass->in_plugin);
+ ffmpegenc->picture = avcodec_alloc_frame ();
+ ffmpegenc->opened = FALSE;
+
+@@ -243,15 +253,9 @@ gst_ffmpegvidenc_finalize (GObject * object)
+
+ gst_ffmpeg_cfg_finalize (ffmpegenc);
+
+- /* close old session */
+- if (ffmpegenc->opened) {
+- gst_ffmpeg_avcodec_close (ffmpegenc->context);
+- ffmpegenc->opened = FALSE;
+- }
+-
+ /* clean up remaining allocated data */
+ av_free (ffmpegenc->context);
+- av_free (ffmpegenc->picture);
++ avcodec_free_frame (&ffmpegenc->picture);
+
+ g_free (ffmpegenc->filename);
+
+@@ -262,117 +266,13 @@ static GstCaps *
+ gst_ffmpegvidenc_getcaps (GstVideoEncoder * encoder, GstCaps * filter)
+ {
+ GstFFMpegVidEnc *ffmpegenc = (GstFFMpegVidEnc *) encoder;
+- GstFFMpegVidEncClass *oclass =
+- (GstFFMpegVidEncClass *) G_OBJECT_GET_CLASS (ffmpegenc);
+- AVCodecContext *ctx = NULL;
+- enum PixelFormat pixfmt;
+ GstCaps *caps = NULL;
+- gint i;
+
+ GST_DEBUG_OBJECT (ffmpegenc, "getting caps");
+
+- /* cached */
+- if (oclass->sinkcaps) {
+- caps = gst_video_encoder_proxy_getcaps (encoder, oclass->sinkcaps, filter);
+- GST_DEBUG_OBJECT (ffmpegenc, "return cached caps %" GST_PTR_FORMAT, caps);
+- return caps;
+- }
+-
+- /* create cache etc. */
+-
+- /* shut up the logging while we autoprobe; we don't want warnings and
+- * errors about unsupported formats */
+- /* FIXME: if someone cares about this disabling the logging for other
+- * instances/threads/..., one could investigate if there is a way to
+- * set this as a struct member on the av context, and check it from the
+- * log handler */
+-#ifndef GST_DISABLE_GST_DEBUG
+- _shut_up_I_am_probing = TRUE;
+-#endif
+- GST_DEBUG_OBJECT (ffmpegenc, "probing caps");
+- i = pixfmt = 0;
+- /* check pixfmt until deemed finished */
+- for (pixfmt = 0;; pixfmt++) {
+- GstCaps *tmpcaps;
+-
+- /* override looping all pixfmt if codec declares pixfmts;
+- * these may not properly check and report supported pixfmt during _init */
+- if (oclass->in_plugin->pix_fmts) {
+- if ((pixfmt = oclass->in_plugin->pix_fmts[i++]) == PIX_FMT_NONE) {
+- GST_DEBUG_OBJECT (ffmpegenc,
+- "At the end of official pixfmt for this codec, breaking out");
+- break;
+- }
+- GST_DEBUG_OBJECT (ffmpegenc,
+- "Got an official pixfmt [%d], attempting to get caps", pixfmt);
+- tmpcaps = gst_ffmpeg_pixfmt_to_caps (pixfmt, NULL, oclass->in_plugin->id);
+- if (tmpcaps) {
+- GST_DEBUG_OBJECT (ffmpegenc, "Got caps, breaking out");
+- if (!caps)
+- caps = gst_caps_new_empty ();
+- gst_caps_append (caps, tmpcaps);
+- continue;
+- }
+- GST_DEBUG_OBJECT (ffmpegenc,
+- "Couldn't figure out caps without context, trying again with a context");
+- }
+-
+- GST_DEBUG_OBJECT (ffmpegenc, "pixfmt :%d", pixfmt);
+- if (pixfmt >= PIX_FMT_NB) {
+- GST_WARNING ("Invalid pixfmt, breaking out");
+- break;
+- }
+-
+- /* need to start with a fresh codec_context each time around, since
+- * codec_close may have released stuff causing the next pass to segfault */
+- ctx = avcodec_alloc_context ();
+- if (!ctx) {
+- GST_DEBUG_OBJECT (ffmpegenc, "no context");
+- break;
+- }
+-
+- /* set some default properties */
+- ctx->width = DEFAULT_WIDTH;
+- ctx->height = DEFAULT_HEIGHT;
+- ctx->time_base.num = 1;
+- ctx->time_base.den = 25;
+- ctx->ticks_per_frame = 1;
+- ctx->bit_rate = DEFAULT_VIDEO_BITRATE;
+- /* makes it silent */
+- ctx->strict_std_compliance = -1;
+-
+- ctx->pix_fmt = pixfmt;
+-
+- GST_DEBUG ("Attempting to open codec");
+- if (gst_ffmpeg_avcodec_open (ctx, oclass->in_plugin) >= 0 &&
+- ctx->pix_fmt == pixfmt) {
+- ctx->width = -1;
+- if (!caps)
+- caps = gst_caps_new_empty ();
+- tmpcaps = gst_ffmpeg_codectype_to_caps (oclass->in_plugin->type, ctx,
+- oclass->in_plugin->id, TRUE);
+- if (tmpcaps)
+- gst_caps_append (caps, tmpcaps);
+- else
+- GST_LOG_OBJECT (ffmpegenc,
+- "Couldn't get caps for oclass->in_plugin->name:%s",
+- oclass->in_plugin->name);
+- gst_ffmpeg_avcodec_close (ctx);
+- } else {
+- GST_DEBUG_OBJECT (ffmpegenc, "Opening codec failed with pixfmt : %d",
+- pixfmt);
+- }
+- if (ctx->priv_data)
+- gst_ffmpeg_avcodec_close (ctx);
+- av_free (ctx);
+- }
+-#ifndef GST_DISABLE_GST_DEBUG
+- _shut_up_I_am_probing = FALSE;
+-#endif
+-
+- oclass->sinkcaps = caps;
+-
+- return gst_video_encoder_proxy_getcaps (encoder, caps, filter);
++ caps = gst_video_encoder_proxy_getcaps (encoder, NULL, filter);
++ GST_DEBUG_OBJECT (ffmpegenc, "return caps %" GST_PTR_FORMAT, caps);
++ return caps;
+ }
+
+ static gboolean
+@@ -394,9 +294,6 @@ gst_ffmpegvidenc_set_format (GstVideoEncoder * encoder,
+ ffmpegenc->opened = FALSE;
+ }
+
+- /* set defaults */
+- avcodec_get_context_defaults (ffmpegenc->context);
+-
+ /* if we set it in _getcaps we should set it also in _link */
+ ffmpegenc->context->strict_std_compliance = -1;
+
+@@ -546,16 +443,7 @@ gst_ffmpegvidenc_set_format (GstVideoEncoder * encoder,
+ gst_caps_unref (icaps);
+ return FALSE;
+ }
+-
+- if (gst_caps_get_size (icaps) > 1) {
+- GstCaps *newcaps;
+-
+- newcaps =
+- gst_caps_new_full (gst_structure_copy (gst_caps_get_structure (icaps,
+- 0)), NULL);
+- gst_caps_unref (icaps);
+- icaps = newcaps;
+- }
++ icaps = gst_caps_truncate (icaps);
+
+ /* Store input state and set output state */
+ if (ffmpegenc->input_state)
+@@ -796,24 +684,28 @@ gst_ffmpegvidenc_flush_buffers (GstFFMpegVidEnc * ffmpegenc, gboolean send)
+ (("Could not write to file \"%s\"."), ffmpegenc->filename),
+ GST_ERROR_SYSTEM);
+
+- if (gst_video_encoder_allocate_output_frame (GST_VIDEO_ENCODER (ffmpegenc),
+- frame, ret_size) != GST_FLOW_OK) {
++ if (send) {
++ if (gst_video_encoder_allocate_output_frame (GST_VIDEO_ENCODER
++ (ffmpegenc), frame, ret_size) != GST_FLOW_OK) {
+ #ifndef GST_DISABLE_GST_DEBUG
+- GstFFMpegVidEncClass *oclass =
+- (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
+- GST_WARNING_OBJECT (ffmpegenc,
+- "avenc_%s: failed to allocate buffer", oclass->in_plugin->name);
++ GstFFMpegVidEncClass *oclass =
++ (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
++ GST_WARNING_OBJECT (ffmpegenc,
++ "avenc_%s: failed to allocate buffer", oclass->in_plugin->name);
+ #endif /* GST_DISABLE_GST_DEBUG */
+- gst_video_codec_frame_unref (frame);
+- break;
+- }
+- outbuf = frame->output_buffer;
+- gst_buffer_fill (outbuf, 0, ffmpegenc->working_buf, ret_size);
++ gst_video_codec_frame_unref (frame);
++ break;
++ }
++ outbuf = frame->output_buffer;
++ gst_buffer_fill (outbuf, 0, ffmpegenc->working_buf, ret_size);
+
+- if (ffmpegenc->context->coded_frame->key_frame)
+- GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
++ if (ffmpegenc->context->coded_frame->key_frame)
++ GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
+
+- gst_video_encoder_finish_frame (GST_VIDEO_ENCODER (ffmpegenc), frame);
++ gst_video_encoder_finish_frame (GST_VIDEO_ENCODER (ffmpegenc), frame);
++ } else {
++ gst_video_codec_frame_unref (frame);
++ }
+ }
+ }
+
+@@ -835,18 +727,18 @@ gst_ffmpegvidenc_set_property (GObject * object,
+
+ /* Check the argument id to see which argument we're setting. */
+ switch (prop_id) {
+- case ARG_BIT_RATE:
++ case PROP_BIT_RATE:
+ ffmpegenc->bitrate = g_value_get_int (value);
+ break;
+- case ARG_GOP_SIZE:
++ case PROP_GOP_SIZE:
+ ffmpegenc->gop_size = g_value_get_int (value);
+ break;
+- case ARG_ME_METHOD:
++ case PROP_ME_METHOD:
+ ffmpegenc->me_method = g_value_get_enum (value);
+ break;
+- case ARG_BUFSIZE:
++ case PROP_BUFSIZE:
+ break;
+- case ARG_RTP_PAYLOAD_SIZE:
++ case PROP_RTP_PAYLOAD_SIZE:
+ ffmpegenc->rtp_payload_size = g_value_get_int (value);
+ break;
+ default:
+@@ -867,19 +759,19 @@ gst_ffmpegvidenc_get_property (GObject * object,
+ ffmpegenc = (GstFFMpegVidEnc *) (object);
+
+ switch (prop_id) {
+- case ARG_BIT_RATE:
++ case PROP_BIT_RATE:
+ g_value_set_int (value, ffmpegenc->bitrate);
+ break;
+- case ARG_GOP_SIZE:
++ case PROP_GOP_SIZE:
+ g_value_set_int (value, ffmpegenc->gop_size);
+ break;
+- case ARG_ME_METHOD:
++ case PROP_ME_METHOD:
+ g_value_set_enum (value, ffmpegenc->me_method);
+ break;
+- case ARG_BUFSIZE:
++ case PROP_BUFSIZE:
+ g_value_set_int (value, ffmpegenc->buffer_size);
+ break;
+- case ARG_RTP_PAYLOAD_SIZE:
++ case PROP_RTP_PAYLOAD_SIZE:
+ g_value_set_int (value, ffmpegenc->rtp_payload_size);
+ break;
+ default:
+@@ -890,15 +782,26 @@ gst_ffmpegvidenc_get_property (GObject * object,
+ }
+
+ static gboolean
+-gst_ffmpegvidenc_stop (GstVideoEncoder * encoder)
++gst_ffmpegvidenc_reset (GstVideoEncoder * encoder, gboolean hard)
+ {
+ GstFFMpegVidEnc *ffmpegenc = (GstFFMpegVidEnc *) encoder;
+
+- gst_ffmpegvidenc_flush_buffers (ffmpegenc, FALSE);
+ if (ffmpegenc->opened) {
+- gst_ffmpeg_avcodec_close (ffmpegenc->context);
+- ffmpegenc->opened = FALSE;
++ avcodec_flush_buffers (ffmpegenc->context);
+ }
++
++ return TRUE;
++}
++
++static gboolean
++gst_ffmpegvidenc_stop (GstVideoEncoder * encoder)
++{
++ GstFFMpegVidEnc *ffmpegenc = (GstFFMpegVidEnc *) encoder;
++
++ gst_ffmpegvidenc_flush_buffers (ffmpegenc, FALSE);
++ gst_ffmpeg_avcodec_close (ffmpegenc->context);
++ ffmpegenc->opened = FALSE;
++
+ if (ffmpegenc->file) {
+ fclose (ffmpegenc->file);
+ ffmpegenc->file = NULL;
+@@ -971,7 +874,8 @@ gst_ffmpegvidenc_register (GstPlugin * plugin)
+ }
+
+ /* only video encoders */
+- if (!in_plugin->encode || in_plugin->type != AVMEDIA_TYPE_VIDEO)
++ if (!av_codec_is_encoder (in_plugin)
++ || in_plugin->type != AVMEDIA_TYPE_VIDEO)
+ goto next;
+
+ /* FIXME : We should have a method to know cheaply whether we have a mapping
+diff --git a/ext/libav/gstavvidenc.h b/ext/libav/gstavvidenc.h
+index eb201b3..d19dc39 100644
+--- a/ext/libav/gstavvidenc.h
++++ b/ext/libav/gstavvidenc.h
+@@ -13,8 +13,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ /* First, include the header file for the plugin, to bring in the
+@@ -26,7 +26,9 @@
+
+ G_BEGIN_DECLS
+
++#include <gst/gst.h>
+ #include <gst/video/gstvideoencoder.h>
++#include <libavcodec/avcodec.h>
+
+ typedef struct _GstFFMpegVidEnc GstFFMpegVidEnc;
+
+@@ -76,7 +78,6 @@ struct _GstFFMpegVidEncClass
+
+ AVCodec *in_plugin;
+ GstPadTemplate *srctempl, *sinktempl;
+- GstCaps *sinkcaps;
+ };
+
+ #define GST_TYPE_FFMPEGVIDENC \
+diff --git a/ext/libswscale/gstffmpegscale.c b/ext/libswscale/gstffmpegscale.c
+index ce980a8..a2b0248 100644
+--- a/ext/libswscale/gstffmpegscale.c
++++ b/ext/libswscale/gstffmpegscale.c
+@@ -15,8 +15,8 @@
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+- * Boston, MA 02111-1307, USA.
++ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
++ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+@@ -246,44 +246,15 @@ gst_ffmpegscale_caps_remove_format_info (GstCaps * caps)
+ {
+ int i;
+ GstStructure *structure;
+- GstCaps *rgbcaps;
+- GstCaps *graycaps;
+
+ caps = gst_caps_copy (caps);
+
+ for (i = 0; i < gst_caps_get_size (caps); i++) {
+ structure = gst_caps_get_structure (caps, i);
+
+- gst_structure_set_name (structure, "video/x-raw-yuv");
+ gst_structure_remove_field (structure, "format");
+- gst_structure_remove_field (structure, "endianness");
+- gst_structure_remove_field (structure, "depth");
+- gst_structure_remove_field (structure, "bpp");
+- gst_structure_remove_field (structure, "red_mask");
+- gst_structure_remove_field (structure, "green_mask");
+- gst_structure_remove_field (structure, "blue_mask");
+- gst_structure_remove_field (structure, "alpha_mask");
+- gst_structure_remove_field (structure, "palette_data");
+ }
+
+- rgbcaps = gst_caps_copy (caps);
+-
+- for (i = 0; i < gst_caps_get_size (rgbcaps); i++) {
+- structure = gst_caps_get_structure (rgbcaps, i);
+-
+- gst_structure_set_name (structure, "video/x-raw-rgb");
+- }
+- graycaps = gst_caps_copy (caps);
+-
+- for (i = 0; i < gst_caps_get_size (graycaps); i++) {
+- structure = gst_caps_get_structure (graycaps, i);
+-
+- gst_structure_set_name (structure, "video/x-raw-gray");
+- }
+-
+- gst_caps_append (caps, graycaps);
+- gst_caps_append (caps, rgbcaps);
+-
+ return caps;
+ }
+
+diff --git a/gst-libs/ext/Makefile.am b/gst-libs/ext/Makefile.am
+index b1054e1..12f35c8 100644
+--- a/gst-libs/ext/Makefile.am
++++ b/gst-libs/ext/Makefile.am
+@@ -23,7 +23,7 @@ clean-local:
+
+ dist-clean:
+ cd libav && $(MAKE) distclean
+- rm -rf $(TMP_DIST_DIR)
++ rm -rf libav/$(TMP_DIST_DIR)
+ rm -f Makefile
+ rm -f libav/.version
+ rm -f libav/.config
+@@ -36,19 +36,19 @@ maintainer-clean: distclean
+ maintainerclean: maintainer-clean
+
+ dist-local:
+- GIT_DIR=libav/.git git checkout-index --prefix=../$(TMP_DIST_DIR)/libav/ -a
+- touch $(TMP_DIST_DIR)/libav/config.mak
++ GIT_DIR=libav/.git git checkout-index --prefix=$(TMP_DIST_DIR)/libav/ -a
++ touch libav/$(TMP_DIST_DIR)/libav/config.mak
+ echo "Patching libav ./configure"
+- sed -e '/Unknown option/ {N;N;s/exit 1//; }' $(TMP_DIST_DIR)/libav/configure > $(TMP_DIST_DIR)/libav/configure.tmp
+- mv $(TMP_DIST_DIR)/libav/configure.tmp $(TMP_DIST_DIR)/libav/configure
+- chmod +x $(TMP_DIST_DIR)/libav/configure
++ sed -e '/Unknown option/ {N;N;s/exit 1//; }' libav/$(TMP_DIST_DIR)/libav/configure > libav/$(TMP_DIST_DIR)/libav/configure.tmp
++ mv libav/$(TMP_DIST_DIR)/libav/configure.tmp libav/$(TMP_DIST_DIR)/libav/configure
++ chmod +x libav/$(TMP_DIST_DIR)/libav/configure
+
+ distdir: dist-local
+- cp -r $(TMP_DIST_DIR)/libav ${distdir}
++ cp -r libav/$(TMP_DIST_DIR)/libav ${distdir}
+ cp -f $(top_srcdir)/gst-libs/ext/Makefile.am $(top_srcdir)/gst-libs/ext/Makefile.in ${distdir}
+- rm -rf $(TMP_DIST_DIR)
++ rm -rf libav/$(TMP_DIST_DIR)
+
+ dist: dist-local
+- cd $(TMP_DIST_DIR) && tar -czf libav.tar.gz libav
+- mv $(TMP_DIST_DIR)/libav.tar.gz ./
+- rm -rf $(TMP_DIST_DIR)
++ cd libav/$(TMP_DIST_DIR) && tar -czf libav.tar.gz libav
++ mv libav/$(TMP_DIST_DIR)/libav.tar.gz ./
++ rm -rf libav/$(TMP_DIST_DIR)
diff --git a/debian/patches/series b/debian/patches/series
index cea5087..ee866e6 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,2 +1,3 @@
02_plugin-dependencies.patch
+03_git-2013-04-26.patch
99_ltmain_as-needed.patch