summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThiago Santos <ts.santos@sisa.samsung.com>2014-02-20 17:25:35 -0300
committerThiago Santos <ts.santos@sisa.samsung.com>2014-02-20 17:32:12 -0300
commit845b874575480800e0bdb3209ef7dcc0337a0a16 (patch)
treeea60e38127e9dc06a40f6c61dd261664d2b78afd
parent7f5132b34fef8f0879d8cad480261f0136531aac (diff)
avaudenc: fix audio encoder flushing according to libav docs
* @param[in] frame AVFrame containing the raw audio data to be encoded. * May be NULL when flushing an encoder that has the * CODEC_CAP_DELAY capability set. The AVFrame itself should be null, not the frame.data pointer https://bugzilla.gnome.org/show_bug.cgi?id=724536
-rw-r--r--ext/libav/gstavaudenc.c148
1 files changed, 79 insertions, 69 deletions
diff --git a/ext/libav/gstavaudenc.c b/ext/libav/gstavaudenc.c
index 39d9f38..2ad8164 100644
--- a/ext/libav/gstavaudenc.c
+++ b/ext/libav/gstavaudenc.c
@@ -425,98 +425,108 @@ gst_ffmpegaudenc_encode_audio (GstFFMpegAudEnc * ffmpegaudenc,
ctx = ffmpegaudenc->context;
- GST_LOG_OBJECT (ffmpegaudenc, "encoding buffer ");
+ GST_LOG_OBJECT (ffmpegaudenc, "encoding buffer %p size:%u", audio_in,
+ in_size);
memset (&pkt, 0, sizeof (pkt));
- memset (&frame, 0, sizeof (frame));
- avcodec_get_frame_defaults (&frame);
- info = gst_audio_encoder_get_audio_info (enc);
- planar = av_sample_fmt_is_planar (ffmpegaudenc->context->sample_fmt);
+ if (audio_in != NULL) {
+ memset (&frame, 0, sizeof (frame));
+ avcodec_get_frame_defaults (&frame);
- if (planar && info->channels > 1) {
- gint channels, nsamples;
- gint i, j;
+ info = gst_audio_encoder_get_audio_info (enc);
+ planar = av_sample_fmt_is_planar (ffmpegaudenc->context->sample_fmt);
- nsamples = frame.nb_samples = in_size / info->bpf;
- channels = info->channels;
+ if (planar && info->channels > 1) {
+ gint channels, nsamples;
+ gint i, j;
- if (info->channels > AV_NUM_DATA_POINTERS) {
- frame.extended_data = g_new (uint8_t *, info->channels);
- } else {
- frame.extended_data = frame.data;
- }
+ nsamples = frame.nb_samples = in_size / info->bpf;
+ channels = info->channels;
- frame.extended_data[0] = g_malloc (in_size);
- frame.linesize[0] = in_size / channels;
- for (i = 1; i < channels; i++)
- frame.extended_data[i] = frame.extended_data[i - 1] + frame.linesize[0];
+ if (info->channels > AV_NUM_DATA_POINTERS) {
+ frame.extended_data = g_new (uint8_t *, info->channels);
+ } else {
+ frame.extended_data = frame.data;
+ }
- switch (info->finfo->width) {
- case 8:{
- const guint8 *idata = (const guint8 *) audio_in;
+ frame.extended_data[0] = g_malloc (in_size);
+ frame.linesize[0] = in_size / channels;
+ for (i = 1; i < channels; i++)
+ frame.extended_data[i] = frame.extended_data[i - 1] + frame.linesize[0];
- for (i = 0; i < nsamples; i++) {
- for (j = 0; j < channels; j++) {
- ((guint8 *) frame.extended_data[j])[i] = idata[j];
- }
- idata += channels;
- }
- break;
- }
- case 16:{
- const guint16 *idata = (const guint16 *) audio_in;
+ switch (info->finfo->width) {
+ case 8:{
+ const guint8 *idata = (const guint8 *) audio_in;
- for (i = 0; i < nsamples; i++) {
- for (j = 0; j < channels; j++) {
- ((guint16 *) frame.extended_data[j])[i] = idata[j];
+ for (i = 0; i < nsamples; i++) {
+ for (j = 0; j < channels; j++) {
+ ((guint8 *) frame.extended_data[j])[i] = idata[j];
+ }
+ idata += channels;
}
- idata += channels;
+ break;
}
- break;
- }
- case 32:{
- const guint32 *idata = (const guint32 *) audio_in;
-
- for (i = 0; i < nsamples; i++) {
- for (j = 0; j < channels; j++) {
- ((guint32 *) frame.extended_data[j])[i] = idata[j];
+ case 16:{
+ const guint16 *idata = (const guint16 *) audio_in;
+
+ for (i = 0; i < nsamples; i++) {
+ for (j = 0; j < channels; j++) {
+ ((guint16 *) frame.extended_data[j])[i] = idata[j];
+ }
+ idata += channels;
}
- idata += channels;
+ break;
}
-
- break;
- }
- case 64:{
- const guint64 *idata = (const guint64 *) audio_in;
-
- for (i = 0; i < nsamples; i++) {
- for (j = 0; j < channels; j++) {
- ((guint64 *) frame.extended_data[j])[i] = idata[j];
+ case 32:{
+ const guint32 *idata = (const guint32 *) audio_in;
+
+ for (i = 0; i < nsamples; i++) {
+ for (j = 0; j < channels; j++) {
+ ((guint32 *) frame.extended_data[j])[i] = idata[j];
+ }
+ idata += channels;
}
- idata += channels;
+
+ break;
}
+ case 64:{
+ const guint64 *idata = (const guint64 *) audio_in;
+
+ for (i = 0; i < nsamples; i++) {
+ for (j = 0; j < channels; j++) {
+ ((guint64 *) frame.extended_data[j])[i] = idata[j];
+ }
+ idata += channels;
+ }
- break;
+ break;
+ }
+ default:
+ g_assert_not_reached ();
+ break;
}
- default:
- g_assert_not_reached ();
- break;
+
+ } else {
+ frame.data[0] = audio_in;
+ frame.extended_data = frame.data;
+ frame.linesize[0] = in_size;
+ frame.nb_samples = in_size / info->bpf;
}
+ /* we have a frame to feed the encoder */
+ res = avcodec_encode_audio2 (ctx, &pkt, &frame, have_data);
+
+ if (planar && info->channels > 1)
+ g_free (frame.data[0]);
+ if (frame.extended_data != frame.data)
+ g_free (frame.extended_data);
+
} else {
- frame.data[0] = audio_in;
- frame.extended_data = frame.data;
- frame.linesize[0] = in_size;
- frame.nb_samples = in_size / info->bpf;
+ /* flushing the encoder */
+ res = avcodec_encode_audio2 (ctx, &pkt, NULL, have_data);
}
- res = avcodec_encode_audio2 (ctx, &pkt, &frame, have_data);
- if (planar && info->channels > 1)
- g_free (frame.data[0]);
- if (frame.extended_data != frame.data)
- g_free (frame.extended_data);
-
if (res < 0) {
char error_str[128] = { 0, };