diff options
author | Christian König <deathsimple@vodafone.de> | 2011-08-09 18:45:13 +0200 |
---|---|---|
committer | Christian König <deathsimple@vodafone.de> | 2011-08-26 12:10:34 +0200 |
commit | 1d1d038c85ebb37f1da4540f092563e8ecab7dfb (patch) | |
tree | 6e3a55e43ddad1bfd70dad96756ecd166b646373 /src | |
parent | 6fb12bf031fdceadebc8a3d7b7756bc822fbf6e4 (diff) |
g3dvl: Rework the decoder interface part 1/5
First of all get ride of the decode_buffer structure, while still giving
the decoder the ability to organize it's buffers depending on the needs
of the state tracker.
Signed-off-by: Christian König <deathsimple@vodafone.de>
Reviewed-by: Younes Manton <younes.m@gmail.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/gallium/auxiliary/vl/vl_mpeg12_decoder.c | 465 | ||||
-rw-r--r-- | src/gallium/auxiliary/vl/vl_mpeg12_decoder.h | 9 | ||||
-rw-r--r-- | src/gallium/include/pipe/p_video_decoder.h | 70 | ||||
-rw-r--r-- | src/gallium/state_trackers/vdpau/decode.c | 53 | ||||
-rw-r--r-- | src/gallium/state_trackers/vdpau/vdpau_private.h | 2 | ||||
-rw-r--r-- | src/gallium/state_trackers/xorg/xvmc/surface.c | 115 | ||||
-rw-r--r-- | src/gallium/state_trackers/xorg/xvmc/xvmc_private.h | 6 |
7 files changed, 404 insertions, 316 deletions
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c index 61d947ca4c..228a386ce4 100644 --- a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c +++ b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c @@ -76,20 +76,16 @@ static const unsigned num_mc_format_configs = sizeof(mc_format_config) / sizeof(struct format_config); static bool -init_zscan_buffer(struct vl_mpeg12_buffer *buffer) +init_zscan_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer) { enum pipe_format formats[3]; struct pipe_sampler_view **source; struct pipe_surface **destination; - struct vl_mpeg12_decoder *dec; - unsigned i; - assert(buffer); - - dec = (struct vl_mpeg12_decoder*)buffer->base.decoder; + assert(dec && buffer); formats[0] = formats[1] = formats[2] = dec->zscan_source_format; buffer->zscan_source = vl_video_buffer_create_ex @@ -147,17 +143,13 @@ cleanup_zscan_buffer(struct vl_mpeg12_buffer *buffer) } static bool -init_idct_buffer(struct vl_mpeg12_buffer *buffer) +init_idct_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer) { struct pipe_sampler_view **idct_source_sv, **mc_source_sv; - struct vl_mpeg12_decoder *dec; - unsigned i; - assert(buffer); - - dec = (struct vl_mpeg12_decoder*)buffer->base.decoder; + assert(dec && buffer); idct_source_sv = dec->idct_source->get_sampler_view_planes(dec->idct_source); if (!idct_source_sv) @@ -187,27 +179,18 @@ error_source_sv: static void cleanup_idct_buffer(struct vl_mpeg12_buffer *buf) { - struct vl_mpeg12_decoder *dec; unsigned i; assert(buf); - dec = (struct vl_mpeg12_decoder*)buf->base.decoder; - assert(dec); - for (i = 0; i < 3; ++i) vl_idct_cleanup_buffer(&buf->idct[0]); } static bool -init_mc_buffer(struct vl_mpeg12_buffer *buf) +init_mc_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buf) { - struct vl_mpeg12_decoder *dec; - - assert(buf); - - dec = (struct vl_mpeg12_decoder*)buf->base.decoder; - assert(dec); + assert(dec && buf); if(!vl_mc_init_buffer(&dec->mc_y, &buf->mc[0])) goto error_mc_y; @@ -242,16 +225,103 @@ cleanup_mc_buffer(struct vl_mpeg12_buffer *buf) } static void -vl_mpeg12_buffer_destroy(struct pipe_video_decode_buffer *buffer) +vl_mpeg12_destroy(struct pipe_video_decoder *decoder) { - struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer; - struct vl_mpeg12_decoder *dec; + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder; - assert(buf); + assert(decoder); + + /* Asserted in softpipe_delete_fs_state() for some reason */ + dec->base.context->bind_vs_state(dec->base.context, NULL); + dec->base.context->bind_fs_state(dec->base.context, NULL); + + dec->base.context->delete_depth_stencil_alpha_state(dec->base.context, dec->dsa); + dec->base.context->delete_sampler_state(dec->base.context, dec->sampler_ycbcr); + + vl_mc_cleanup(&dec->mc_y); + vl_mc_cleanup(&dec->mc_c); + dec->mc_source->destroy(dec->mc_source); + + if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) { + vl_idct_cleanup(&dec->idct_y); + vl_idct_cleanup(&dec->idct_c); + dec->idct_source->destroy(dec->idct_source); + } + + vl_zscan_cleanup(&dec->zscan_y); + vl_zscan_cleanup(&dec->zscan_c); + + dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_ycbcr); + dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_mv); + + pipe_resource_reference(&dec->quads.buffer, NULL); + pipe_resource_reference(&dec->pos.buffer, NULL); + pipe_resource_reference(&dec->block_num.buffer, NULL); + + pipe_sampler_view_reference(&dec->zscan_linear, NULL); + pipe_sampler_view_reference(&dec->zscan_normal, NULL); + pipe_sampler_view_reference(&dec->zscan_alternate, NULL); + + FREE(dec); +} + +static void * +vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder) +{ + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder; + struct vl_mpeg12_buffer *buffer; - dec = (struct vl_mpeg12_decoder*)buf->base.decoder; assert(dec); + buffer = CALLOC_STRUCT(vl_mpeg12_buffer); + if (buffer == NULL) + return NULL; + + if (!vl_vb_init(&buffer->vertex_stream, dec->base.context, + dec->base.width / MACROBLOCK_WIDTH, + dec->base.height / MACROBLOCK_HEIGHT)) + goto error_vertex_buffer; + + if (!init_mc_buffer(dec, buffer)) + goto error_mc; + + if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) + if (!init_idct_buffer(dec, buffer)) + goto error_idct; + + if (!init_zscan_buffer(dec, buffer)) + goto error_zscan; + + if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) + vl_mpg12_bs_init(&buffer->bs, + dec->base.width / MACROBLOCK_WIDTH, + dec->base.height / MACROBLOCK_HEIGHT); + + return buffer; + +error_zscan: + if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) + cleanup_idct_buffer(buffer); + +error_idct: + cleanup_mc_buffer(buffer); + +error_mc: + vl_vb_cleanup(&buffer->vertex_stream); + +error_vertex_buffer: + FREE(buffer); + return NULL; +} + +static void +vl_mpeg12_destroy_buffer(struct pipe_video_decoder *decoder, void *buffer) +{ + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder; + struct vl_mpeg12_buffer *buf = buffer; + + assert(dec && buf); + cleanup_zscan_buffer(buf); if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) @@ -265,18 +335,96 @@ vl_mpeg12_buffer_destroy(struct pipe_video_decode_buffer *buffer) } static void -vl_mpeg12_buffer_begin_frame(struct pipe_video_decode_buffer *buffer) +vl_mpeg12_set_decode_buffer(struct pipe_video_decoder *decoder, void *buffer) { - struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer; - struct vl_mpeg12_decoder *dec; + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; + + assert(dec && buffer); + + dec->current_buffer = buffer; +} + +static void +vl_mpeg12_set_picture_parameters(struct pipe_video_decoder *decoder, + struct pipe_picture_desc *picture) +{ + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; + struct pipe_mpeg12_picture_desc *pic = (struct pipe_mpeg12_picture_desc *)picture; + + assert(dec && pic); + + dec->picture_desc = *pic; +} + +static void +vl_mpeg12_set_quant_matrix(struct pipe_video_decoder *decoder, + const uint8_t intra_matrix[64], + const uint8_t non_intra_matrix[64]) +{ + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; + + assert(dec); + + memcpy(dec->intra_matrix, intra_matrix, 64); + memcpy(dec->non_intra_matrix, non_intra_matrix, 64); +} + +static void +vl_mpeg12_set_decode_target(struct pipe_video_decoder *decoder, + struct pipe_video_buffer *target) +{ + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; + struct pipe_surface **surfaces; + unsigned i; + + assert(dec); + + surfaces = target->get_surfaces(target); + for (i = 0; i < VL_MAX_PLANES; ++i) + pipe_surface_reference(&dec->target_surfaces[i], surfaces[i]); +} + +static void +vl_mpeg12_set_reference_frames(struct pipe_video_decoder *decoder, + struct pipe_video_buffer **ref_frames, + unsigned num_ref_frames) +{ + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; + struct pipe_sampler_view **sv; + unsigned i,j; + + assert(dec); + assert(num_ref_frames <= VL_MAX_REF_FRAMES); + + for (i = 0; i < num_ref_frames; ++i) { + sv = ref_frames[i]->get_sampler_view_planes(ref_frames[i]); + for (j = 0; j < VL_MAX_PLANES; ++j) + pipe_sampler_view_reference(&dec->ref_frames[i][j], sv[j]); + } + + for (; i < VL_MAX_REF_FRAMES; ++i) + for (j = 0; j < VL_MAX_PLANES; ++j) + pipe_sampler_view_reference(&dec->ref_frames[i][j], NULL); +} + +static void +vl_mpeg12_begin_frame(struct pipe_video_decoder *decoder) +{ + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; + struct vl_mpeg12_buffer *buf; struct pipe_sampler_view **sampler_views; unsigned i; + assert(dec); + + buf = dec->current_buffer; assert(buf); - dec = (struct vl_mpeg12_decoder *)buf->base.decoder; - assert(dec); + for (i = 0; i < VL_MAX_PLANES; ++i) { + vl_zscan_upload_quant(&buf->zscan[i], dec->intra_matrix, true); + vl_zscan_upload_quant(&buf->zscan[i], dec->non_intra_matrix, false); + } vl_vb_map(&buf->vertex_stream, dec->base.context); @@ -322,95 +470,84 @@ vl_mpeg12_buffer_begin_frame(struct pipe_video_decode_buffer *buffer) } } -static void -vl_mpeg12_buffer_set_quant_matrix(struct pipe_video_decode_buffer *buffer, - const uint8_t intra_matrix[64], - const uint8_t non_intra_matrix[64]) -{ - struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer; - unsigned i; - - for (i = 0; i < VL_MAX_PLANES; ++i) { - vl_zscan_upload_quant(&buf->zscan[i], intra_matrix, true); - vl_zscan_upload_quant(&buf->zscan[i], non_intra_matrix, false); - } -} - static struct pipe_ycbcr_block * -vl_mpeg12_buffer_get_ycbcr_stream(struct pipe_video_decode_buffer *buffer, int component) +vl_mpeg12_get_ycbcr_stream(struct pipe_video_decoder *decoder, int component) { - struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer; + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; - assert(buf); + assert(dec && dec->current_buffer); + assert(component < VL_MAX_PLANES); - return vl_vb_get_ycbcr_stream(&buf->vertex_stream, component); + return vl_vb_get_ycbcr_stream(&dec->current_buffer->vertex_stream, component); } static short * -vl_mpeg12_buffer_get_ycbcr_buffer(struct pipe_video_decode_buffer *buffer, int component) +vl_mpeg12_get_ycbcr_buffer(struct pipe_video_decoder *decoder, int component) { - struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer; + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; - assert(buf); + assert(dec && dec->current_buffer); assert(component < VL_MAX_PLANES); - return buf->texels[component]; + return dec->current_buffer->texels[component]; } static unsigned -vl_mpeg12_buffer_get_mv_stream_stride(struct pipe_video_decode_buffer *buffer) +vl_mpeg12_get_mv_stream_stride(struct pipe_video_decoder *decoder) { - struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer; + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; - assert(buf); + assert(dec && dec->current_buffer); - return vl_vb_get_mv_stream_stride(&buf->vertex_stream); + return vl_vb_get_mv_stream_stride(&dec->current_buffer->vertex_stream); } static struct pipe_motionvector * -vl_mpeg12_buffer_get_mv_stream(struct pipe_video_decode_buffer *buffer, int ref_frame) +vl_mpeg12_get_mv_stream(struct pipe_video_decoder *decoder, int ref_frame) { - struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer; + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; - assert(buf); + assert(dec && dec->current_buffer); - return vl_vb_get_mv_stream(&buf->vertex_stream, ref_frame); + return vl_vb_get_mv_stream(&dec->current_buffer->vertex_stream, ref_frame); } static void -vl_mpeg12_buffer_decode_bitstream(struct pipe_video_decode_buffer *buffer, - unsigned num_bytes, const void *data, - struct pipe_picture_desc *picture, - unsigned num_ycbcr_blocks[3]) +vl_mpeg12_decode_bitstream(struct pipe_video_decoder *decoder, + unsigned num_bytes, const void *data, + unsigned num_ycbcr_blocks[3]) { - struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer; - struct pipe_mpeg12_picture_desc *pic = (struct pipe_mpeg12_picture_desc *)picture; + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; + struct vl_mpeg12_buffer *buf; - struct vl_mpeg12_decoder *dec; unsigned i; - assert(buf); + assert(dec && dec->current_buffer); - dec = (struct vl_mpeg12_decoder *)buf->base.decoder; - assert(dec); + buf = dec->current_buffer; + assert(buf); for (i = 0; i < VL_MAX_PLANES; ++i) - vl_zscan_set_layout(&buf->zscan[i], pic->alternate_scan ? dec->zscan_alternate : dec->zscan_normal); + vl_zscan_set_layout(&buf->zscan[i], dec->picture_desc.alternate_scan ? + dec->zscan_alternate : dec->zscan_normal); - vl_mpg12_bs_decode(&buf->bs, num_bytes, data, pic, num_ycbcr_blocks); + vl_mpg12_bs_decode(&buf->bs, num_bytes, data, &dec->picture_desc, num_ycbcr_blocks); } static void -vl_mpeg12_buffer_end_frame(struct pipe_video_decode_buffer *buffer) +vl_mpeg12_end_frame(struct pipe_video_decoder *decoder, unsigned num_ycbcr_blocks[3]) { - struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer; - struct vl_mpeg12_decoder *dec; - unsigned i; + struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; + struct pipe_sampler_view **mc_source_sv; + struct pipe_vertex_buffer vb[3]; + struct vl_mpeg12_buffer *buf; - assert(buf); + unsigned i, j, component; + unsigned nr_components; - dec = (struct vl_mpeg12_decoder *)buf->base.decoder; - assert(dec); + assert(dec && dec->current_buffer); + + buf = dec->current_buffer; vl_vb_unmap(&buf->vertex_stream, dec->base.context); @@ -418,152 +555,23 @@ vl_mpeg12_buffer_end_frame(struct pipe_video_decode_buffer *buffer) dec->base.context->transfer_unmap(dec->base.context, buf->tex_transfer[i]); dec->base.context->transfer_destroy(dec->base.context, buf->tex_transfer[i]); } -} - -static void -vl_mpeg12_destroy(struct pipe_video_decoder *decoder) -{ - struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder; - - assert(decoder); - - /* Asserted in softpipe_delete_fs_state() for some reason */ - dec->base.context->bind_vs_state(dec->base.context, NULL); - dec->base.context->bind_fs_state(dec->base.context, NULL); - - dec->base.context->delete_depth_stencil_alpha_state(dec->base.context, dec->dsa); - dec->base.context->delete_sampler_state(dec->base.context, dec->sampler_ycbcr); - - vl_mc_cleanup(&dec->mc_y); - vl_mc_cleanup(&dec->mc_c); - dec->mc_source->destroy(dec->mc_source); - - if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) { - vl_idct_cleanup(&dec->idct_y); - vl_idct_cleanup(&dec->idct_c); - dec->idct_source->destroy(dec->idct_source); - } - - vl_zscan_cleanup(&dec->zscan_y); - vl_zscan_cleanup(&dec->zscan_c); - - dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_ycbcr); - dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_mv); - - pipe_resource_reference(&dec->quads.buffer, NULL); - pipe_resource_reference(&dec->pos.buffer, NULL); - pipe_resource_reference(&dec->block_num.buffer, NULL); - - pipe_sampler_view_reference(&dec->zscan_linear, NULL); - pipe_sampler_view_reference(&dec->zscan_normal, NULL); - pipe_sampler_view_reference(&dec->zscan_alternate, NULL); - - FREE(dec); -} - -static struct pipe_video_decode_buffer * -vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder) -{ - struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder; - struct vl_mpeg12_buffer *buffer; - - assert(dec); - - buffer = CALLOC_STRUCT(vl_mpeg12_buffer); - if (buffer == NULL) - return NULL; - - buffer->base.decoder = decoder; - buffer->base.destroy = vl_mpeg12_buffer_destroy; - buffer->base.begin_frame = vl_mpeg12_buffer_begin_frame; - buffer->base.set_quant_matrix = vl_mpeg12_buffer_set_quant_matrix; - buffer->base.get_ycbcr_stream = vl_mpeg12_buffer_get_ycbcr_stream; - buffer->base.get_ycbcr_buffer = vl_mpeg12_buffer_get_ycbcr_buffer; - buffer->base.get_mv_stream_stride = vl_mpeg12_buffer_get_mv_stream_stride; - buffer->base.get_mv_stream = vl_mpeg12_buffer_get_mv_stream; - buffer->base.decode_bitstream = vl_mpeg12_buffer_decode_bitstream; - buffer->base.end_frame = vl_mpeg12_buffer_end_frame; - - if (!vl_vb_init(&buffer->vertex_stream, dec->base.context, - dec->base.width / MACROBLOCK_WIDTH, - dec->base.height / MACROBLOCK_HEIGHT)) - goto error_vertex_buffer; - - if (!init_mc_buffer(buffer)) - goto error_mc; - - if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) - if (!init_idct_buffer(buffer)) - goto error_idct; - - if (!init_zscan_buffer(buffer)) - goto error_zscan; - - if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) - vl_mpg12_bs_init(&buffer->bs, - dec->base.width / MACROBLOCK_WIDTH, - dec->base.height / MACROBLOCK_HEIGHT); - - return &buffer->base; - -error_zscan: - if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) - cleanup_idct_buffer(buffer); - -error_idct: - cleanup_mc_buffer(buffer); - -error_mc: - vl_vb_cleanup(&buffer->vertex_stream); - -error_vertex_buffer: - FREE(buffer); - return NULL; -} - -static void -vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer, - unsigned num_ycbcr_blocks[3], - struct pipe_video_buffer *refs[2], - struct pipe_video_buffer *dst) -{ - struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer *)buffer; - struct vl_mpeg12_decoder *dec; - - struct pipe_sampler_view **sv[VL_MAX_REF_FRAMES], **mc_source_sv; - struct pipe_surface **surfaces; - - struct pipe_vertex_buffer vb[3]; - - unsigned i, j, component; - unsigned nr_components; - - assert(buf); - - dec = (struct vl_mpeg12_decoder *)buf->base.decoder; - assert(dec); - - for (i = 0; i < 2; ++i) - sv[i] = refs[i] ? refs[i]->get_sampler_view_planes(refs[i]) : NULL; vb[0] = dec->quads; vb[1] = dec->pos; - surfaces = dst->get_surfaces(dst); - dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv); for (i = 0; i < VL_MAX_PLANES; ++i) { - if (!surfaces[i]) continue; + if (!dec->target_surfaces[i]) continue; - vl_mc_set_surface(&buf->mc[i], surfaces[i]); + vl_mc_set_surface(&buf->mc[i], dec->target_surfaces[i]); for (j = 0; j < VL_MAX_REF_FRAMES; ++j) { - if (!sv[j]) continue; + if (!dec->ref_frames[j][i]) continue; vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);; dec->base.context->set_vertex_buffers(dec->base.context, 3, vb); - vl_mc_render_ref(&buf->mc[i], sv[j][i]); + vl_mc_render_ref(&buf->mc[i], dec->ref_frames[j][i]); } } @@ -584,9 +592,9 @@ vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer, mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source); for (i = 0, component = 0; i < VL_MAX_PLANES; ++i) { - if (!surfaces[i]) continue; + if (!dec->target_surfaces[i]) continue; - nr_components = util_format_get_nr_components(surfaces[i]->texture->format); + nr_components = util_format_get_nr_components(dec->target_surfaces[i]->texture->format); for (j = 0; j < nr_components; ++j, ++component) { if (!num_ycbcr_blocks[i]) continue; @@ -604,6 +612,14 @@ vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer, } } +static void +vl_mpeg12_flush(struct pipe_video_decoder *decoder) +{ + assert(decoder); + + //Noop, for shaders it is much faster to flush everything in end_frame +} + static bool init_pipe_state(struct vl_mpeg12_decoder *dec) { @@ -870,7 +886,20 @@ vl_create_mpeg12_decoder(struct pipe_context *context, dec->base.destroy = vl_mpeg12_destroy; dec->base.create_buffer = vl_mpeg12_create_buffer; - dec->base.flush_buffer = vl_mpeg12_decoder_flush_buffer; + dec->base.destroy_buffer = vl_mpeg12_destroy_buffer; + dec->base.set_decode_buffer = vl_mpeg12_set_decode_buffer; + dec->base.set_picture_parameters = vl_mpeg12_set_picture_parameters; + dec->base.set_quant_matrix = vl_mpeg12_set_quant_matrix; + dec->base.set_decode_target = vl_mpeg12_set_decode_target; + dec->base.set_reference_frames = vl_mpeg12_set_reference_frames; + dec->base.begin_frame = vl_mpeg12_begin_frame; + dec->base.get_ycbcr_stream = vl_mpeg12_get_ycbcr_stream; + dec->base.get_ycbcr_buffer = vl_mpeg12_get_ycbcr_buffer; + dec->base.get_mv_stream_stride = vl_mpeg12_get_mv_stream_stride; + dec->base.get_mv_stream = vl_mpeg12_get_mv_stream; + dec->base.decode_bitstream = vl_mpeg12_decode_bitstream; + dec->base.end_frame = vl_mpeg12_end_frame; + dec->base.flush = vl_mpeg12_flush; dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4); dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels; diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h index 01265e368a..85c84fc1c4 100644 --- a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h +++ b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h @@ -73,12 +73,17 @@ struct vl_mpeg12_decoder struct vl_mc mc_y, mc_c; void *dsa; + + struct vl_mpeg12_buffer *current_buffer; + struct pipe_mpeg12_picture_desc picture_desc; + uint8_t intra_matrix[64]; + uint8_t non_intra_matrix[64]; + struct pipe_sampler_view *ref_frames[VL_MAX_REF_FRAMES][VL_MAX_PLANES]; + struct pipe_surface *target_surfaces[VL_MAX_PLANES]; }; struct vl_mpeg12_buffer { - struct pipe_video_decode_buffer base; - struct vl_vertex_buffer vertex_stream; struct pipe_video_buffer *zscan_source; diff --git a/src/gallium/include/pipe/p_video_decoder.h b/src/gallium/include/pipe/p_video_decoder.h index f063d8f3a1..ae071136ba 100644 --- a/src/gallium/include/pipe/p_video_decoder.h +++ b/src/gallium/include/pipe/p_video_decoder.h @@ -59,75 +59,89 @@ struct pipe_video_decoder void (*destroy)(struct pipe_video_decoder *decoder); /** - * Creates a buffer as decoding input + * Creates a decoder buffer */ - struct pipe_video_decode_buffer *(*create_buffer)(struct pipe_video_decoder *decoder); + void *(*create_buffer)(struct pipe_video_decoder *decoder); /** - * flush decoder buffer to video hardware + * Destroys a decoder buffer */ - void (*flush_buffer)(struct pipe_video_decode_buffer *decbuf, - unsigned num_ycbcr_blocks[3], - struct pipe_video_buffer *ref_frames[2], - struct pipe_video_buffer *dst); -}; - -/** - * input buffer for a decoder - */ -struct pipe_video_decode_buffer -{ - struct pipe_video_decoder *decoder; + void (*destroy_buffer)(struct pipe_video_decoder *decoder, void *buffer); /** - * destroy this decode buffer + * set the current decoder buffer */ - void (*destroy)(struct pipe_video_decode_buffer *decbuf); + void (*set_decode_buffer)(struct pipe_video_decoder *decoder, void *buffer); /** - * map the input buffer into memory before starting decoding + * set the picture parameters for the next frame + * only used for bitstream decoding */ - void (*begin_frame)(struct pipe_video_decode_buffer *decbuf); + void (*set_picture_parameters)(struct pipe_video_decoder *decoder, + struct pipe_picture_desc *picture); /** * set the quantification matrixes */ - void (*set_quant_matrix)(struct pipe_video_decode_buffer *decbuf, + void (*set_quant_matrix)(struct pipe_video_decoder *decoder, const uint8_t intra_matrix[64], const uint8_t non_intra_matrix[64]); /** + * set target where video data is decoded to + */ + void (*set_decode_target)(struct pipe_video_decoder *decoder, + struct pipe_video_buffer *target); + + /** + * set reference frames for motion compensation + */ + void (*set_reference_frames)(struct pipe_video_decoder *decoder, + struct pipe_video_buffer **ref_frames, + unsigned num_ref_frames); + + /** + * start decoding of a new frame + */ + void (*begin_frame)(struct pipe_video_decoder *decoder); + + /** * get the pointer where to put the ycbcr blocks of a component */ - struct pipe_ycbcr_block *(*get_ycbcr_stream)(struct pipe_video_decode_buffer *, int component); + struct pipe_ycbcr_block *(*get_ycbcr_stream)(struct pipe_video_decoder *decoder, int component); /** * get the pointer where to put the ycbcr dct block data of a component */ - short *(*get_ycbcr_buffer)(struct pipe_video_decode_buffer *, int component); + short *(*get_ycbcr_buffer)(struct pipe_video_decoder *decoder, int component); /** * get the stride of the mv buffer */ - unsigned (*get_mv_stream_stride)(struct pipe_video_decode_buffer *decbuf); + unsigned (*get_mv_stream_stride)(struct pipe_video_decoder *decoder); /** * get the pointer where to put the motion vectors of a ref frame */ - struct pipe_motionvector *(*get_mv_stream)(struct pipe_video_decode_buffer *decbuf, int ref_frame); + struct pipe_motionvector *(*get_mv_stream)(struct pipe_video_decoder *decoder, int ref_frame); /** * decode a bitstream */ - void (*decode_bitstream)(struct pipe_video_decode_buffer *decbuf, + void (*decode_bitstream)(struct pipe_video_decoder *decoder, unsigned num_bytes, const void *data, - struct pipe_picture_desc *picture, unsigned num_ycbcr_blocks[3]); /** - * unmap decoder buffer before flushing + * end decoding of the current frame + */ + void (*end_frame)(struct pipe_video_decoder *decoder, unsigned num_ycbcr_blocks[3]); + + /** + * flush any outstanding command buffers to the hardware + * should be called before a video_buffer is acessed by the state tracker again */ - void (*end_frame)(struct pipe_video_decode_buffer *decbuf); + void (*flush)(struct pipe_video_decoder *decoder); }; /** diff --git a/src/gallium/state_trackers/vdpau/decode.c b/src/gallium/state_trackers/vdpau/decode.c index 96542f874d..3bf05bea21 100644 --- a/src/gallium/state_trackers/vdpau/decode.c +++ b/src/gallium/state_trackers/vdpau/decode.c @@ -107,7 +107,7 @@ error_buffer: for (i = 0; i < VL_NUM_DECODE_BUFFERS; ++i) if (vldecoder->buffer[i]) - vldecoder->buffer[i]->destroy(vldecoder->buffer[i]); + vldecoder->decoder->destroy_buffer(vldecoder->decoder, vldecoder->buffer[i]); vldecoder->decoder->destroy(vldecoder->decoder); @@ -130,7 +130,7 @@ vlVdpDecoderDestroy(VdpDecoder decoder) for (i = 0; i < VL_NUM_DECODE_BUFFERS; ++i) if (vldecoder->buffer[i]) - vldecoder->buffer[i]->destroy(vldecoder->buffer[i]); + vldecoder->decoder->destroy_buffer(vldecoder->decoder, vldecoder->buffer[i]); vldecoder->decoder->destroy(vldecoder->decoder); @@ -162,8 +162,6 @@ vlVdpDecoderGetParameters(VdpDecoder decoder, static VdpStatus vlVdpDecoderRenderMpeg12(struct pipe_video_decoder *decoder, - struct pipe_video_decode_buffer *buffer, - struct pipe_video_buffer *target, VdpPictureInfoMPEG1Or2 *picture_info, uint32_t bitstream_buffer_count, VdpBitstreamBuffer const *bitstream_buffers) @@ -176,23 +174,25 @@ vlVdpDecoderRenderMpeg12(struct pipe_video_decoder *decoder, VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding MPEG2\n"); + i = 0; + /* if surfaces equals VDP_STATUS_INVALID_HANDLE, they are not used */ - if (picture_info->forward_reference == VDP_INVALID_HANDLE) - ref_frames[0] = NULL; - else { - ref_frames[0] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->forward_reference))->video_buffer; - if (!ref_frames[0]) + if (picture_info->forward_reference != VDP_INVALID_HANDLE) { + ref_frames[i] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->forward_reference))->video_buffer; + if (!ref_frames[i]) return VDP_STATUS_INVALID_HANDLE; + ++i; } - if (picture_info->backward_reference == VDP_INVALID_HANDLE) - ref_frames[1] = NULL; - else { - ref_frames[1] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->backward_reference))->video_buffer; - if (!ref_frames[1]) + if (picture_info->backward_reference != VDP_INVALID_HANDLE) { + ref_frames[i] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->backward_reference))->video_buffer; + if (!ref_frames[i]) return VDP_STATUS_INVALID_HANDLE; + ++i; } + decoder->set_reference_frames(decoder, ref_frames, i); + memset(&picture, 0, sizeof(picture)); picture.base.profile = decoder->profile; picture.picture_coding_type = picture_info->picture_coding_type; @@ -207,19 +207,19 @@ vlVdpDecoderRenderMpeg12(struct pipe_video_decoder *decoder, picture.f_code[1][0] = picture_info->f_code[1][0] - 1; picture.f_code[1][1] = picture_info->f_code[1][1] - 1; - buffer->begin_frame(buffer); + decoder->set_picture_parameters(decoder, &picture.base); memcpy(intra_quantizer_matrix, picture_info->intra_quantizer_matrix, sizeof(intra_quantizer_matrix)); intra_quantizer_matrix[0] = 1 << (7 - picture_info->intra_dc_precision); - buffer->set_quant_matrix(buffer, intra_quantizer_matrix, picture_info->non_intra_quantizer_matrix); + decoder->set_quant_matrix(decoder, intra_quantizer_matrix, picture_info->non_intra_quantizer_matrix); - for (i = 0; i < bitstream_buffer_count; ++i) - buffer->decode_bitstream(buffer, bitstream_buffers[i].bitstream_bytes, - bitstream_buffers[i].bitstream, &picture.base, num_ycbcr_blocks); + decoder->begin_frame(decoder); - buffer->end_frame(buffer); + for (i = 0; i < bitstream_buffer_count; ++i) + decoder->decode_bitstream(decoder, bitstream_buffers[i].bitstream_bytes, + bitstream_buffers[i].bitstream, num_ycbcr_blocks); - decoder->flush_buffer(buffer, num_ycbcr_blocks, ref_frames, target); + decoder->end_frame(decoder, num_ycbcr_blocks); return VDP_STATUS_OK; } @@ -261,11 +261,12 @@ vlVdpDecoderRender(VdpDecoder decoder, case PIPE_VIDEO_PROFILE_MPEG2_MAIN: ++vldecoder->cur_buffer; vldecoder->cur_buffer %= VL_NUM_DECODE_BUFFERS; - return vlVdpDecoderRenderMpeg12(vldecoder->decoder, - vldecoder->buffer[vldecoder->cur_buffer], - vlsurf->video_buffer, - (VdpPictureInfoMPEG1Or2 *)picture_info, - bitstream_buffer_count,bitstream_buffers); + + vldecoder->decoder->set_decode_buffer(vldecoder->decoder, vldecoder->buffer[vldecoder->cur_buffer]); + vldecoder->decoder->set_decode_target(vldecoder->decoder, vlsurf->video_buffer); + + return vlVdpDecoderRenderMpeg12(vldecoder->decoder, (VdpPictureInfoMPEG1Or2 *)picture_info, + bitstream_buffer_count, bitstream_buffers); break; default: diff --git a/src/gallium/state_trackers/vdpau/vdpau_private.h b/src/gallium/state_trackers/vdpau/vdpau_private.h index e5d945629f..5c68cd7c47 100644 --- a/src/gallium/state_trackers/vdpau/vdpau_private.h +++ b/src/gallium/state_trackers/vdpau/vdpau_private.h @@ -256,7 +256,7 @@ typedef struct { vlVdpDevice *device; struct pipe_video_decoder *decoder; - struct pipe_video_decode_buffer *buffer[VL_NUM_DECODE_BUFFERS]; + void *buffer[VL_NUM_DECODE_BUFFERS]; unsigned cur_buffer; } vlVdpDecoder; diff --git a/src/gallium/state_trackers/xorg/xvmc/surface.c b/src/gallium/state_trackers/xorg/xvmc/surface.c index 0c53b73028..002c35ae44 100644 --- a/src/gallium/state_trackers/xorg/xvmc/surface.c +++ b/src/gallium/state_trackers/xorg/xvmc/surface.c @@ -252,9 +252,37 @@ MacroBlocksToPipe(XvMCSurfacePrivate *surface, } static void -unmap_and_flush_surface(XvMCSurfacePrivate *surface) +SetDecoderStatus(XvMCSurfacePrivate *surface) { + struct pipe_video_decoder *decoder; struct pipe_video_buffer *ref_frames[2]; + + XvMCContextPrivate *context_priv; + + unsigned i, num_refs = 0; + + assert(surface); + + context_priv = surface->context->privData; + decoder = context_priv->decoder; + + decoder->set_decode_buffer(decoder, surface->decode_buffer); + decoder->set_decode_target(decoder, surface->video_buffer); + + for (i = 0; i < 2; ++i) { + if (surface->ref[i].surface) { + XvMCSurfacePrivate *ref = surface->ref[i].surface->privData; + + if (ref) + ref_frames[num_refs++] = ref->video_buffer; + } + } + decoder->set_reference_frames(decoder, ref_frames, num_refs); +} + +static void +RecursiveEndFrame(XvMCSurfacePrivate *surface) +{ XvMCContextPrivate *context_priv; unsigned i, num_ycbcr_blocks[3]; @@ -264,27 +292,27 @@ unmap_and_flush_surface(XvMCSurfacePrivate *surface) for ( i = 0; i < 2; ++i ) { if (surface->ref[i].surface) { - XvMCSurfacePrivate *ref = surface->ref[i].surface->privData; + XvMCSurface *ref = surface->ref[i].surface; assert(ref); - unmap_and_flush_surface(ref); surface->ref[i].surface = NULL; - ref_frames[i] = ref->video_buffer; - } else { - ref_frames[i] = NULL; + RecursiveEndFrame(ref->privData); + surface->ref[i].surface = ref; } } - if (surface->mapped) { - surface->decode_buffer->end_frame(surface->decode_buffer); + if (surface->frame_started) { + surface->frame_started = 0; + SetDecoderStatus(surface); + for (i = 0; i < 3; ++i) num_ycbcr_blocks[i] = surface->ycbcr[i].num_blocks_added; - context_priv->decoder->flush_buffer(surface->decode_buffer, - num_ycbcr_blocks, - ref_frames, - surface->video_buffer); - surface->mapped = 0; + + for (i = 0; i < 2; ++i) + surface->ref[i].surface = NULL; + + context_priv->decoder->end_frame(context_priv->decoder, num_ycbcr_blocks); } } @@ -323,9 +351,7 @@ Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surfac return BadAlloc; surface_priv->decode_buffer = context_priv->decoder->create_buffer(context_priv->decoder); - surface_priv->decode_buffer->set_quant_matrix(surface_priv->decode_buffer, dummy_quant, dummy_quant); - - surface_priv->mv_stride = surface_priv->decode_buffer->get_mv_stream_stride(surface_priv->decode_buffer); + context_priv->decoder->set_quant_matrix(context_priv->decoder, dummy_quant, dummy_quant); surface_priv->video_buffer = pipe->create_video_buffer ( pipe, PIPE_FORMAT_NV12, context_priv->decoder->chroma_format, @@ -355,8 +381,9 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur XvMCMacroBlockArray *macroblocks, XvMCBlockArray *blocks ) { - struct pipe_video_decode_buffer *t_buffer; + struct pipe_video_decoder *decoder; + XvMCContextPrivate *context_priv; XvMCSurfacePrivate *target_surface_priv; XvMCSurfacePrivate *past_surface_priv; XvMCSurfacePrivate *future_surface_priv; @@ -394,6 +421,9 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur assert(flags == 0 || flags == XVMC_SECOND_FIELD); + context_priv = context->privData; + decoder = context_priv->decoder; + target_surface_priv = target_surface->privData; past_surface_priv = past_surface ? past_surface->privData : NULL; future_surface_priv = future_surface ? future_surface->privData : NULL; @@ -402,47 +432,48 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur assert(!past_surface || past_surface_priv->context == context); assert(!future_surface || future_surface_priv->context == context); - t_buffer = target_surface_priv->decode_buffer; - - // enshure that all reference frames are flushed - // not really nessasary, but speeds ups rendering + // call end frame on all referenced frames if (past_surface) - unmap_and_flush_surface(past_surface->privData); + RecursiveEndFrame(past_surface->privData); if (future_surface) - unmap_and_flush_surface(future_surface->privData); + RecursiveEndFrame(future_surface->privData); xvmc_mb = macroblocks->macro_blocks + first_macroblock; /* If the surface we're rendering hasn't changed the ref frames shouldn't change. */ - if (target_surface_priv->mapped && ( + if (target_surface_priv->frame_started && ( target_surface_priv->ref[0].surface != past_surface || target_surface_priv->ref[1].surface != future_surface || (xvmc_mb->x == 0 && xvmc_mb->y == 0))) { - // If they change anyway we need to clear our surface - unmap_and_flush_surface(target_surface_priv); + // If they change anyway we must assume that the current frame is ended + RecursiveEndFrame(target_surface_priv); } - if (!target_surface_priv->mapped) { - t_buffer->begin_frame(t_buffer); + target_surface_priv->ref[0].surface = past_surface; + target_surface_priv->ref[1].surface = future_surface; + + SetDecoderStatus(target_surface_priv); + + if (!target_surface_priv->frame_started) { + decoder->begin_frame(decoder); + target_surface_priv->mv_stride = decoder->get_mv_stream_stride(decoder); for (i = 0; i < 3; ++i) { target_surface_priv->ycbcr[i].num_blocks_added = 0; - target_surface_priv->ycbcr[i].stream = t_buffer->get_ycbcr_stream(t_buffer, i); - target_surface_priv->ycbcr[i].buffer = t_buffer->get_ycbcr_buffer(t_buffer, i); + target_surface_priv->ycbcr[i].stream = decoder->get_ycbcr_stream(decoder, i); + target_surface_priv->ycbcr[i].buffer = decoder->get_ycbcr_buffer(decoder, i); } for (i = 0; i < 2; ++i) { - target_surface_priv->ref[i].surface = i == 0 ? past_surface : future_surface; - if (target_surface_priv->ref[i].surface) - target_surface_priv->ref[i].mv = t_buffer->get_mv_stream(t_buffer, i); + target_surface_priv->ref[i].mv = decoder->get_mv_stream(decoder, i); else target_surface_priv->ref[i].mv = NULL; } - target_surface_priv->mapped = 1; + target_surface_priv->frame_started = 1; } MacroBlocksToPipe(target_surface_priv, picture_structure, xvmc_mb, blocks, num_macroblocks); @@ -543,7 +574,9 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable, assert(desty + desth - 1 < drawable_surface->height); */ - unmap_and_flush_surface(surface_priv); + RecursiveEndFrame(surface_priv); + + context_priv->decoder->flush(context_priv->decoder); vl_compositor_clear_layers(compositor); vl_compositor_set_buffer_layer(compositor, 0, surface_priv->video_buffer, &src_rect, NULL); @@ -630,6 +663,9 @@ PUBLIC Status XvMCDestroySurface(Display *dpy, XvMCSurface *surface) { XvMCSurfacePrivate *surface_priv; + XvMCContextPrivate *context_priv; + + unsigned num_ycbcr_buffers[3] = { 0, 0, 0 }; XVMC_MSG(XVMC_TRACE, "[XvMC] Destroying surface %p.\n", surface); @@ -639,10 +675,13 @@ Status XvMCDestroySurface(Display *dpy, XvMCSurface *surface) return XvMCBadSurface; surface_priv = surface->privData; + context_priv = surface_priv->context->privData; - if (surface_priv->mapped) - surface_priv->decode_buffer->end_frame(surface_priv->decode_buffer); - surface_priv->decode_buffer->destroy(surface_priv->decode_buffer); + if (surface_priv->frame_started) { + SetDecoderStatus(surface_priv); + context_priv->decoder->end_frame(context_priv->decoder, num_ycbcr_buffers); + } + context_priv->decoder->destroy_buffer(context_priv->decoder, surface_priv->decode_buffer); surface_priv->video_buffer->destroy(surface_priv->video_buffer); FREE(surface_priv); surface->privData = NULL; diff --git a/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h b/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h index 5f8d9d13cb..5b3debdb78 100644 --- a/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h +++ b/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h @@ -45,7 +45,6 @@ struct vl_context; struct pipe_video_decoder; -struct pipe_video_decode_buffer; struct pipe_video_buffer; struct pipe_sampler_view; @@ -70,10 +69,11 @@ typedef struct typedef struct { - struct pipe_video_decode_buffer *decode_buffer; + void *decode_buffer; struct pipe_video_buffer *video_buffer; - bool mapped; // are we still mapped to memory? + // have we allready told the decoder to start a frame + bool frame_started; struct { unsigned num_blocks_added; |