summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEmeric Grange <emeric.grange@gmail.com>2012-01-28 01:30:58 +0100
committerEmeric Grange <emeric.grange@gmail.com>2012-06-24 16:57:33 +0200
commit78274d7b571acc8c81d18a6b72c348c1e2bf3b88 (patch)
treede5d9bf5a6c50e1f71a7409b262306bc78a4c711
parentf8b01453b9ba62c99de7b7f37f7c93cdc3b28a14 (diff)
g3dvl: vp8 minor cleanup
Signed-off-by: Emeric Grange <emeric.grange@gmail.com>
-rw-r--r--src/gallium/auxiliary/vl/vp8/alloccommon.c2
-rw-r--r--src/gallium/auxiliary/vl/vp8/blockd.c62
-rw-r--r--src/gallium/auxiliary/vl/vp8/blockd.h70
-rw-r--r--src/gallium/auxiliary/vl/vp8/entropy.h2
-rw-r--r--src/gallium/auxiliary/vl/vp8/invtrans.c32
-rw-r--r--src/gallium/auxiliary/vl/vp8/invtrans.h6
-rw-r--r--src/gallium/auxiliary/vl/vp8/modecounts.h12
-rw-r--r--src/gallium/auxiliary/vl/vp8/mv.h3
-rw-r--r--src/gallium/auxiliary/vl/vp8/reconinter.c124
-rw-r--r--src/gallium/auxiliary/vl/vp8/reconinter.h14
-rw-r--r--src/gallium/auxiliary/vl/vp8/reconintra.c98
-rw-r--r--src/gallium/auxiliary/vl/vp8/reconintra.h11
-rw-r--r--src/gallium/auxiliary/vl/vp8/reconintra4x4.c10
-rw-r--r--src/gallium/auxiliary/vl/vp8/reconintra4x4.h3
-rw-r--r--src/gallium/auxiliary/vl/vp8/vp8_mem.c7
-rw-r--r--src/gallium/auxiliary/vl/vp8/yv12utils.c80
-rw-r--r--src/gallium/auxiliary/vl/vp8/yv12utils.h1
17 files changed, 272 insertions, 265 deletions
diff --git a/src/gallium/auxiliary/vl/vp8/alloccommon.c b/src/gallium/auxiliary/vl/vp8/alloccommon.c
index 4a971adb3d..691b3a155c 100644
--- a/src/gallium/auxiliary/vl/vp8/alloccommon.c
+++ b/src/gallium/auxiliary/vl/vp8/alloccommon.c
@@ -27,7 +27,7 @@ static void update_mode_info_border(MODE_INFO *mi, int rows, int cols)
/* TODO(holmer): Bug? This updates the last element of each row
* rather than the border element!
*/
- memset(&mi[i*cols-1], 0, sizeof(MODE_INFO));
+ memset(&mi[i*cols - 1], 0, sizeof(MODE_INFO));
}
}
diff --git a/src/gallium/auxiliary/vl/vp8/blockd.c b/src/gallium/auxiliary/vl/vp8/blockd.c
index cd93ae3ebc..ae250d8e85 100644
--- a/src/gallium/auxiliary/vl/vp8/blockd.c
+++ b/src/gallium/auxiliary/vl/vp8/blockd.c
@@ -38,41 +38,41 @@ static void setup_block(BLOCKD *b,
}
}
-static void setup_macroblock(MACROBLOCKD *x, BLOCKSET bs)
+static void setup_macroblock(MACROBLOCKD *mb, BLOCKSET bs)
{
int block;
unsigned char **y, **u, **v;
if (bs == DEST)
{
- y = &x->dst.y_buffer;
- u = &x->dst.u_buffer;
- v = &x->dst.v_buffer;
+ y = &mb->dst.y_buffer;
+ u = &mb->dst.u_buffer;
+ v = &mb->dst.v_buffer;
}
else
{
- y = &x->pre.y_buffer;
- u = &x->pre.u_buffer;
- v = &x->pre.v_buffer;
+ y = &mb->pre.y_buffer;
+ u = &mb->pre.u_buffer;
+ v = &mb->pre.v_buffer;
}
for (block = 0; block < 16; block++) /* Y blocks */
{
- setup_block(&x->block[block], x->dst.y_stride, y, x->dst.y_stride,
- (block >> 2) * 4 * x->dst.y_stride + (block & 3) * 4, bs);
+ setup_block(&mb->block[block], mb->dst.y_stride, y, mb->dst.y_stride,
+ (block >> 2) * 4 * mb->dst.y_stride + (block & 3) * 4, bs);
}
for (block = 16; block < 20; block++) /* U and V blocks */
{
- setup_block(&x->block[block], x->dst.uv_stride, u, x->dst.uv_stride,
- ((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4, bs);
+ setup_block(&mb->block[block], mb->dst.uv_stride, u, mb->dst.uv_stride,
+ ((block - 16) >> 1) * 4 * mb->dst.uv_stride + (block & 1) * 4, bs);
- setup_block(&x->block[block+4], x->dst.uv_stride, v, x->dst.uv_stride,
- ((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4, bs);
+ setup_block(&mb->block[block+4], mb->dst.uv_stride, v, mb->dst.uv_stride,
+ ((block - 16) >> 1) * 4 * mb->dst.uv_stride + (block & 1) * 4, bs);
}
}
-void vp8_setup_block_dptrs(MACROBLOCKD *x)
+void vp8_setup_block_dptrs(MACROBLOCKD *mb)
{
int r, c;
@@ -80,8 +80,8 @@ void vp8_setup_block_dptrs(MACROBLOCKD *x)
{
for (c = 0; c < 4; c++)
{
- x->block[r*4+c].diff = &x->diff[r * 4 * 16 + c * 4];
- x->block[r*4+c].predictor = x->predictor + r * 4 * 16 + c * 4;
+ mb->block[r*4+c].diff = &mb->diff[r * 4 * 16 + c * 4];
+ mb->block[r*4+c].predictor = mb->predictor + r * 4 * 16 + c * 4;
}
}
@@ -89,8 +89,8 @@ void vp8_setup_block_dptrs(MACROBLOCKD *x)
{
for (c = 0; c < 2; c++)
{
- x->block[16+r*2+c].diff = &x->diff[256 + r * 4 * 8 + c * 4];
- x->block[16+r*2+c].predictor = x->predictor + 256 + r * 4 * 8 + c * 4;
+ mb->block[16+r*2+c].diff = &mb->diff[256 + r * 4 * 8 + c * 4];
+ mb->block[16+r*2+c].predictor = mb->predictor + 256 + r * 4 * 8 + c * 4;
}
}
@@ -98,38 +98,38 @@ void vp8_setup_block_dptrs(MACROBLOCKD *x)
{
for (c = 0; c < 2; c++)
{
- x->block[20+r*2+c].diff = &x->diff[320+ r * 4 * 8 + c * 4];
- x->block[20+r*2+c].predictor = x->predictor + 320 + r * 4 * 8 + c * 4;
+ mb->block[20+r*2+c].diff = &mb->diff[320+ r * 4 * 8 + c * 4];
+ mb->block[20+r*2+c].predictor = mb->predictor + 320 + r * 4 * 8 + c * 4;
}
}
- x->block[24].diff = &x->diff[384];
+ mb->block[24].diff = &mb->diff[384];
for (r = 0; r < 25; r++)
{
- x->block[r].qcoeff = x->qcoeff + r * 16;
- x->block[r].dqcoeff = x->dqcoeff + r * 16;
+ mb->block[r].qcoeff = mb->qcoeff + r * 16;
+ mb->block[r].dqcoeff = mb->dqcoeff + r * 16;
}
}
-void vp8_setup_block_doffsets(MACROBLOCKD *x)
+void vp8_setup_block_doffsets(MACROBLOCKD *mb)
{
- /* handle the destination pitch features */
- setup_macroblock(x, DEST);
- setup_macroblock(x, PRED);
+ /* Handle the destination pitch features */
+ setup_macroblock(mb, DEST);
+ setup_macroblock(mb, PRED);
}
-void update_blockd_bmi(MACROBLOCKD *xd)
+void update_blockd_bmi(MACROBLOCKD *mb)
{
int i;
/* If the block size is 4x4. */
- if (xd->mode_info_context->mbmi.mode == SPLITMV ||
- xd->mode_info_context->mbmi.mode == B_PRED)
+ if (mb->mode_info_context->mbmi.mode == SPLITMV ||
+ mb->mode_info_context->mbmi.mode == B_PRED)
{
for (i = 0; i < 16; i++)
{
- xd->block[i].bmi = xd->mode_info_context->bmi[i];
+ mb->block[i].bmi = mb->mode_info_context->bmi[i];
}
}
}
diff --git a/src/gallium/auxiliary/vl/vp8/blockd.h b/src/gallium/auxiliary/vl/vp8/blockd.h
index c6daea15bc..5b563fd271 100644
--- a/src/gallium/auxiliary/vl/vp8/blockd.h
+++ b/src/gallium/auxiliary/vl/vp8/blockd.h
@@ -29,6 +29,18 @@
#define SEGMENT_DELTADATA 0
#define SEGMENT_ABSDATA 1
+/* Segment Feature Masks */
+#define SEGMENT_ALTQ 0x01
+#define SEGMENT_ALT_LF 0x02
+
+#define VP8_YMODES (B_PRED + 1)
+#define VP8_UV_MODES (TM_PRED + 1)
+
+#define VP8_MVREFS (1 + SPLITMV - NEARESTMV)
+
+#define VP8_BINTRAMODES (B_HU_PRED + 1) /* 10 */
+#define VP8_SUBMVREFS (1 + NEW4X4 - LEFT4X4)
+
typedef char ENTROPY_CONTEXT;
typedef struct
@@ -47,11 +59,11 @@ typedef enum
typedef enum
{
- DC_PRED, /* average of above and left pixels */
- V_PRED, /* vertical prediction */
- H_PRED, /* horizontal prediction */
- TM_PRED, /* Truemotion prediction */
- B_PRED, /* block based prediction, each block has its own prediction mode */
+ DC_PRED, /**< Average of above and left pixels */
+ V_PRED, /**< Vertical prediction */
+ H_PRED, /**< Horizontal prediction */
+ TM_PRED, /**< Truemotion prediction */
+ B_PRED, /**< Block based prediction, each block has its own prediction mode */
NEARESTMV,
NEARMV,
@@ -62,30 +74,21 @@ typedef enum
MB_MODE_COUNT
} MB_PREDICTION_MODE;
-/* Macroblock level features */
+/** Macroblock level features */
typedef enum
{
- MB_LVL_ALT_Q = 0, /* Use alternate Quantizer .... */
- MB_LVL_ALT_LF, /* Use alternate loop filter value... */
- MB_LVL_MAX /* Number of MB level features supported */
+ MB_LVL_ALT_Q = 0, /**< Use alternate Quantizer .... */
+ MB_LVL_ALT_LF, /**< Use alternate loop filter value... */
+ MB_LVL_MAX /**< Number of MB level features supported */
} MB_LVL_FEATURES;
-/* Segment Feature Masks */
-#define SEGMENT_ALTQ 0x01
-#define SEGMENT_ALT_LF 0x02
-
-#define VP8_YMODES (B_PRED + 1)
-#define VP8_UV_MODES (TM_PRED + 1)
-
-#define VP8_MVREFS (1 + SPLITMV - NEARESTMV)
-
typedef enum
{
- B_DC_PRED, /* average of above and left pixels */
+ B_DC_PRED, /**< Average of above and left pixels */
B_TM_PRED,
- B_VE_PRED, /* vertical prediction */
- B_HE_PRED, /* horizontal prediction */
+ B_VE_PRED, /**< Vertical prediction */
+ B_HE_PRED, /**< Horizontal prediction */
B_LD_PRED,
B_RD_PRED,
@@ -103,18 +106,15 @@ typedef enum
B_MODE_COUNT
} B_PREDICTION_MODE;
-#define VP8_BINTRAMODES (B_HU_PRED + 1) /* 10 */
-#define VP8_SUBMVREFS (1 + NEW4X4 - LEFT4X4)
-
/* For keyframes, intra block modes are predicted by the (already decoded)
modes for the Y blocks to the left and above us; for interframes, there
is a single probability table. */
-union b_mode_info
+typedef union
{
B_PREDICTION_MODE as_mode;
int_mv mv;
-};
+} B_MODE_INFO;
typedef enum
{
@@ -132,15 +132,15 @@ typedef struct
int_mv mv;
unsigned char partitioning;
- unsigned char mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */
+ unsigned char mb_skip_coeff; /**< Does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */
unsigned char need_to_clamp_mvs;
- unsigned char segment_id; /* Which set of segmentation parameters should be used for this MB */
+ unsigned char segment_id; /**< Which set of segmentation parameters should be used for this MB */
} MB_MODE_INFO;
typedef struct
{
MB_MODE_INFO mbmi;
- union b_mode_info bmi[16];
+ B_MODE_INFO bmi[16];
} MODE_INFO;
typedef struct
@@ -162,7 +162,7 @@ typedef struct
int eob;
- union b_mode_info bmi;
+ B_MODE_INFO bmi;
} BLOCKD;
typedef struct
@@ -176,7 +176,7 @@ typedef struct
/* 16 Y blocks, 4 U, 4 V, 1 DC 2nd order block, each with 16 entries. */
BLOCKD block[25];
- YV12_BUFFER_CONFIG pre; /* Filtered copy of previous frame reconstruction */
+ YV12_BUFFER_CONFIG pre; /**< Filtered copy of previous frame reconstruction */
YV12_BUFFER_CONFIG dst;
MODE_INFO *mode_info_context;
@@ -225,8 +225,6 @@ typedef struct
int mb_to_top_edge;
int mb_to_bottom_edge;
- int ref_frame_cost[MAX_REF_FRAMES];
-
unsigned int frames_since_golden;
unsigned int frames_till_alt_ref_frame;
vp8_filter_fn_t filter_predict4x4;
@@ -240,8 +238,8 @@ typedef struct
} MACROBLOCKD;
-void vp8_setup_block_doffsets(MACROBLOCKD *x);
-void vp8_setup_block_dptrs(MACROBLOCKD *x);
-void update_blockd_bmi(MACROBLOCKD *xd);
+void vp8_setup_block_doffsets(MACROBLOCKD *mb);
+void vp8_setup_block_dptrs(MACROBLOCKD *mb);
+void update_blockd_bmi(MACROBLOCKD *mb);
#endif /* BLOCKD_H */
diff --git a/src/gallium/auxiliary/vl/vp8/entropy.h b/src/gallium/auxiliary/vl/vp8/entropy.h
index 88819ddda7..ea5dfea15a 100644
--- a/src/gallium/auxiliary/vl/vp8/entropy.h
+++ b/src/gallium/auxiliary/vl/vp8/entropy.h
@@ -69,7 +69,7 @@ extern DECLARE_ALIGNED(16, const unsigned char, vp8_coef_bands[16]);
extern DECLARE_ALIGNED(16, const unsigned char, vp8_prev_token_class[MAX_ENTROPY_TOKENS]);
-extern const vp8_prob vp8_coef_update_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
+extern const vp8_prob vp8_coef_update_probs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
struct VP8Common;
void vp8_default_coef_probs(struct VP8Common *);
diff --git a/src/gallium/auxiliary/vl/vp8/invtrans.c b/src/gallium/auxiliary/vl/vp8/invtrans.c
index 8538126945..0d220f3f41 100644
--- a/src/gallium/auxiliary/vl/vp8/invtrans.c
+++ b/src/gallium/auxiliary/vl/vp8/invtrans.c
@@ -11,14 +11,14 @@
#include "invtrans.h"
-static void recon_dcblock(MACROBLOCKD *x)
+static void recon_dcblock(MACROBLOCKD *mb)
{
- BLOCKD *b = &x->block[24];
+ BLOCKD *b = &mb->block[24];
int i;
for (i = 0; i < 16; i++)
{
- x->block[i].dqcoeff[0] = b->diff[i];
+ mb->block[i].dqcoeff[0] = b->diff[i];
}
}
@@ -30,50 +30,50 @@ void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int
IDCT_INVOKE(rtcd, idct1)(b->dqcoeff, b->diff, pitch);
}
-void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
+void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *mb)
{
int i;
/* do 2nd order transform on the dc block */
- IDCT_INVOKE(rtcd, iwalsh16)(x->block[24].dqcoeff, x->block[24].diff);
+ IDCT_INVOKE(rtcd, iwalsh16)(mb->block[24].dqcoeff, mb->block[24].diff);
- recon_dcblock(x);
+ recon_dcblock(mb);
for (i = 0; i < 16; i++)
{
- vp8_inverse_transform_b(rtcd, &x->block[i], 32);
+ vp8_inverse_transform_b(rtcd, &mb->block[i], 32);
}
}
-void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
+void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *mb)
{
int i;
for (i = 16; i < 24; i++)
{
- vp8_inverse_transform_b(rtcd, &x->block[i], 16);
+ vp8_inverse_transform_b(rtcd, &mb->block[i], 16);
}
}
-void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
+void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *mb)
{
int i;
- if (x->mode_info_context->mbmi.mode != B_PRED &&
- x->mode_info_context->mbmi.mode != SPLITMV)
+ if (mb->mode_info_context->mbmi.mode != B_PRED &&
+ mb->mode_info_context->mbmi.mode != SPLITMV)
{
/* do 2nd order transform on the dc block */
- IDCT_INVOKE(rtcd, iwalsh16)(&x->block[24].dqcoeff[0], x->block[24].diff);
- recon_dcblock(x);
+ IDCT_INVOKE(rtcd, iwalsh16)(&mb->block[24].dqcoeff[0], mb->block[24].diff);
+ recon_dcblock(mb);
}
for (i = 0; i < 16; i++)
{
- vp8_inverse_transform_b(rtcd, &x->block[i], 32);
+ vp8_inverse_transform_b(rtcd, &mb->block[i], 32);
}
for (i = 16; i < 24; i++)
{
- vp8_inverse_transform_b(rtcd, &x->block[i], 16);
+ vp8_inverse_transform_b(rtcd, &mb->block[i], 16);
}
}
diff --git a/src/gallium/auxiliary/vl/vp8/invtrans.h b/src/gallium/auxiliary/vl/vp8/invtrans.h
index 1d62b15e15..b36a2c9e97 100644
--- a/src/gallium/auxiliary/vl/vp8/invtrans.h
+++ b/src/gallium/auxiliary/vl/vp8/invtrans.h
@@ -16,8 +16,8 @@
#include "blockd.h"
extern void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
-extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
-extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
-extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
+extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *mb);
+extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *mb);
+extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *mb);
#endif /* INVTRANS_H */
diff --git a/src/gallium/auxiliary/vl/vp8/modecounts.h b/src/gallium/auxiliary/vl/vp8/modecounts.h
index 543de8d164..9be8590d05 100644
--- a/src/gallium/auxiliary/vl/vp8/modecounts.h
+++ b/src/gallium/auxiliary/vl/vp8/modecounts.h
@@ -13,32 +13,32 @@
#include "blockd.h"
-const unsigned int kf_y_mode_cts [VP8_YMODES] =
+const unsigned int kf_y_mode_cts[VP8_YMODES] =
{
1607, 915, 812, 811, 5455
};
-const unsigned int y_mode_cts [VP8_YMODES] =
+const unsigned int y_mode_cts[VP8_YMODES] =
{
8080, 1908, 1582, 1007, 5874
};
-const unsigned int uv_mode_cts [VP8_UV_MODES] =
+const unsigned int uv_mode_cts[VP8_UV_MODES] =
{
59483, 13605, 16492, 4230
};
-const unsigned int kf_uv_mode_cts [VP8_UV_MODES] =
+const unsigned int kf_uv_mode_cts[VP8_UV_MODES] =
{
5319, 1904, 1703, 674
};
-const unsigned int bmode_cts [VP8_BINTRAMODES] =
+const unsigned int bmode_cts[VP8_BINTRAMODES] =
{
43891, 17694, 10036, 3920, 3363, 2546, 5119, 3221, 2471, 1723
};
-const unsigned int kf_default_bmode_cts [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =
+const unsigned int kf_default_bmode_cts[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES] =
{
{
/* Above Mode : 0 */
diff --git a/src/gallium/auxiliary/vl/vp8/mv.h b/src/gallium/auxiliary/vl/vp8/mv.h
index 49dcf3472a..4a789862fc 100644
--- a/src/gallium/auxiliary/vl/vp8/mv.h
+++ b/src/gallium/auxiliary/vl/vp8/mv.h
@@ -20,10 +20,11 @@ typedef struct
short col;
} MV;
+/** Facilitates faster equality tests and copies */
typedef union
{
uint32_t as_int;
MV as_mv;
-} int_mv; /* facilitates faster equality tests and copies */
+} int_mv;
#endif /* MV_H */
diff --git a/src/gallium/auxiliary/vl/vp8/reconinter.c b/src/gallium/auxiliary/vl/vp8/reconinter.c
index b40fe64f38..b1a71814c1 100644
--- a/src/gallium/auxiliary/vl/vp8/reconinter.c
+++ b/src/gallium/auxiliary/vl/vp8/reconinter.c
@@ -135,7 +135,7 @@ void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_filter_fn_t sppf)
}
}
-static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch)
+static void build_inter_predictors4b(MACROBLOCKD *mb, BLOCKD *d, int pitch)
{
unsigned char *ptr_base = *(d->base_pre);
unsigned char *ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
@@ -143,15 +143,15 @@ static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch)
if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
{
- x->filter_predict8x8(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
+ mb->filter_predict8x8(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
}
else
{
- RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, pred_ptr, pitch);
+ RECON_INVOKE(&mb->rtcd->recon, copy8x8)(ptr, d->pre_stride, pred_ptr, pitch);
}
}
-static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, int pitch)
+static void build_inter_predictors2b(MACROBLOCKD *mb, BLOCKD *d, int pitch)
{
unsigned char *ptr_base = *(d->base_pre);
unsigned char *ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
@@ -159,15 +159,15 @@ static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, int pitch)
if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
{
- x->filter_predict8x4(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
+ mb->filter_predict8x4(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
}
else
{
- RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
+ RECON_INVOKE(&mb->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
}
}
-void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
+void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *mb,
unsigned char *dst_y,
unsigned char *dst_u,
unsigned char *dst_v,
@@ -176,105 +176,105 @@ void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
{
int offset;
- int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
- int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
+ int mv_row = mb->mode_info_context->mbmi.mv.as_mv.row;
+ int mv_col = mb->mode_info_context->mbmi.mv.as_mv.col;
- unsigned char *ptr_base = x->pre.y_buffer;
- int pre_stride = x->block[0].pre_stride;
+ unsigned char *ptr_base = mb->pre.y_buffer;
+ int pre_stride = mb->block[0].pre_stride;
unsigned char *ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
unsigned char *uptr, *vptr;
if ((mv_row | mv_col) & 7)
{
- x->filter_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, dst_y, dst_ystride);
+ mb->filter_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, dst_y, dst_ystride);
}
else
{
- RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_y, dst_ystride);
+ RECON_INVOKE(&mb->rtcd->recon, copy16x16)(ptr, pre_stride, dst_y, dst_ystride);
}
- mv_row = x->block[16].bmi.mv.as_mv.row;
- mv_col = x->block[16].bmi.mv.as_mv.col;
+ mv_row = mb->block[16].bmi.mv.as_mv.row;
+ mv_col = mb->block[16].bmi.mv.as_mv.col;
pre_stride >>= 1;
offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
- uptr = x->pre.u_buffer + offset;
- vptr = x->pre.v_buffer + offset;
+ uptr = mb->pre.u_buffer + offset;
+ vptr = mb->pre.v_buffer + offset;
if ((mv_row | mv_col) & 7)
{
- x->filter_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, dst_u, dst_uvstride);
- x->filter_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, dst_v, dst_uvstride);
+ mb->filter_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, dst_u, dst_uvstride);
+ mb->filter_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, dst_v, dst_uvstride);
}
else
{
- RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, dst_u, dst_uvstride);
- RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, dst_v, dst_uvstride);
+ RECON_INVOKE(&mb->rtcd->recon, copy8x8)(uptr, pre_stride, dst_u, dst_uvstride);
+ RECON_INVOKE(&mb->rtcd->recon, copy8x8)(vptr, pre_stride, dst_v, dst_uvstride);
}
}
-void vp8_build_inter4x4_predictors_mb(MACROBLOCKD *x)
+void vp8_build_inter4x4_predictors_mb(MACROBLOCKD *mb)
{
int i;
- if (x->mode_info_context->mbmi.partitioning < 3)
+ if (mb->mode_info_context->mbmi.partitioning < 3)
{
for (i = 0; i < 4; i++)
{
- BLOCKD *d = &x->block[bbb[i]];
- build_inter_predictors4b(x, d, 16);
+ BLOCKD *d = &mb->block[bbb[i]];
+ build_inter_predictors4b(mb, d, 16);
}
}
else
{
for (i = 0; i < 16; i += 2)
{
- BLOCKD *d0 = &x->block[i];
- BLOCKD *d1 = &x->block[i+1];
+ BLOCKD *d0 = &mb->block[i];
+ BLOCKD *d1 = &mb->block[i+1];
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
- build_inter_predictors2b(x, d0, 16);
+ build_inter_predictors2b(mb, d0, 16);
else
{
- vp8_build_inter_predictors_b(d0, 16, x->filter_predict4x4);
- vp8_build_inter_predictors_b(d1, 16, x->filter_predict4x4);
+ vp8_build_inter_predictors_b(d0, 16, mb->filter_predict4x4);
+ vp8_build_inter_predictors_b(d1, 16, mb->filter_predict4x4);
}
}
}
for (i = 16; i < 24; i += 2)
{
- BLOCKD *d0 = &x->block[i];
- BLOCKD *d1 = &x->block[i+1];
+ BLOCKD *d0 = &mb->block[i];
+ BLOCKD *d1 = &mb->block[i+1];
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
- build_inter_predictors2b(x, d0, 8);
+ build_inter_predictors2b(mb, d0, 8);
else
{
- vp8_build_inter_predictors_b(d0, 8, x->filter_predict4x4);
- vp8_build_inter_predictors_b(d1, 8, x->filter_predict4x4);
+ vp8_build_inter_predictors_b(d0, 8, mb->filter_predict4x4);
+ vp8_build_inter_predictors_b(d1, 8, mb->filter_predict4x4);
}
}
}
-void vp8_build_inter_predictors_mb(MACROBLOCKD *x)
+void vp8_build_inter_predictors_mb(MACROBLOCKD *mb)
{
- if (x->mode_info_context->mbmi.mode != SPLITMV)
+ if (mb->mode_info_context->mbmi.mode != SPLITMV)
{
- vp8_build_inter16x16_predictors_mb(x, x->predictor, &x->predictor[256],
- &x->predictor[320], 16, 8);
+ vp8_build_inter16x16_predictors_mb(mb, mb->predictor, &mb->predictor[256],
+ &mb->predictor[320], 16, 8);
}
else
{
- vp8_build_inter4x4_predictors_mb(x);
+ vp8_build_inter4x4_predictors_mb(mb);
}
}
-void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel)
+void vp8_build_uvmvs(MACROBLOCKD *mb, int fullpixel)
{
int i, j;
- if (x->mode_info_context->mbmi.mode == SPLITMV)
+ if (mb->mode_info_context->mbmi.mode == SPLITMV)
{
for (i = 0; i < 2; i++)
{
@@ -284,45 +284,45 @@ void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel)
int uoffset = 16 + i * 2 + j;
int voffset = 20 + i * 2 + j;
- int temp = x->block[yoffset].bmi.mv.as_mv.row
- + x->block[yoffset+1].bmi.mv.as_mv.row
- + x->block[yoffset+4].bmi.mv.as_mv.row
- + x->block[yoffset+5].bmi.mv.as_mv.row;
+ int temp = mb->block[yoffset].bmi.mv.as_mv.row
+ + mb->block[yoffset+1].bmi.mv.as_mv.row
+ + mb->block[yoffset+4].bmi.mv.as_mv.row
+ + mb->block[yoffset+5].bmi.mv.as_mv.row;
if (temp < 0)
temp -= 4;
else
temp += 4;
- x->block[uoffset].bmi.mv.as_mv.row = temp / 8;
+ mb->block[uoffset].bmi.mv.as_mv.row = temp / 8;
if (fullpixel)
- x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & 0xfffffff8;
+ mb->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & 0xfffffff8;
- temp = x->block[yoffset ].bmi.mv.as_mv.col
- + x->block[yoffset+1].bmi.mv.as_mv.col
- + x->block[yoffset+4].bmi.mv.as_mv.col
- + x->block[yoffset+5].bmi.mv.as_mv.col;
+ temp = mb->block[yoffset ].bmi.mv.as_mv.col
+ + mb->block[yoffset+1].bmi.mv.as_mv.col
+ + mb->block[yoffset+4].bmi.mv.as_mv.col
+ + mb->block[yoffset+5].bmi.mv.as_mv.col;
if (temp < 0)
temp -= 4;
else
temp += 4;
- x->block[uoffset].bmi.mv.as_mv.col = temp / 8;
+ mb->block[uoffset].bmi.mv.as_mv.col = temp / 8;
if (fullpixel)
- x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & 0xfffffff8;
+ mb->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & 0xfffffff8;
- x->block[voffset].bmi.mv.as_mv.row = x->block[uoffset].bmi.mv.as_mv.row;
- x->block[voffset].bmi.mv.as_mv.col = x->block[uoffset].bmi.mv.as_mv.col;
+ mb->block[voffset].bmi.mv.as_mv.row = mb->block[uoffset].bmi.mv.as_mv.row;
+ mb->block[voffset].bmi.mv.as_mv.col = mb->block[uoffset].bmi.mv.as_mv.col;
}
}
}
else
{
- int mvrow = x->mode_info_context->mbmi.mv.as_mv.row;
- int mvcol = x->mode_info_context->mbmi.mv.as_mv.col;
+ int mvrow = mb->mode_info_context->mbmi.mv.as_mv.row;
+ int mvcol = mb->mode_info_context->mbmi.mv.as_mv.col;
if (mvrow < 0)
mvrow -= 1;
@@ -339,13 +339,13 @@ void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel)
for (i = 0; i < 8; i++)
{
- x->block[16 + i].bmi.mv.as_mv.row = mvrow;
- x->block[16 + i].bmi.mv.as_mv.col = mvcol;
+ mb->block[16 + i].bmi.mv.as_mv.row = mvrow;
+ mb->block[16 + i].bmi.mv.as_mv.col = mvcol;
if (fullpixel)
{
- x->block[16 + i].bmi.mv.as_mv.row = mvrow & 0xfffffff8;
- x->block[16 + i].bmi.mv.as_mv.col = mvcol & 0xfffffff8;
+ mb->block[16 + i].bmi.mv.as_mv.row = mvrow & 0xfffffff8;
+ mb->block[16 + i].bmi.mv.as_mv.col = mvcol & 0xfffffff8;
}
}
}
diff --git a/src/gallium/auxiliary/vl/vp8/reconinter.h b/src/gallium/auxiliary/vl/vp8/reconinter.h
index a1ef79bdfb..f9fbecb621 100644
--- a/src/gallium/auxiliary/vl/vp8/reconinter.h
+++ b/src/gallium/auxiliary/vl/vp8/reconinter.h
@@ -12,17 +12,21 @@
#ifndef RECONINTER_H
#define RECONINTER_H
-extern void vp8_build_inter_predictors_mb(MACROBLOCKD *x);
-extern void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
+extern void vp8_build_inter_predictors_mb(MACROBLOCKD *mb);
+
+extern void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *mb,
unsigned char *dst_y,
unsigned char *dst_u,
unsigned char *dst_v,
int dst_ystride,
int dst_uvstride);
-extern void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x);
-extern void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel);
+extern void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *mb);
+
+extern void vp8_build_uvmvs(MACROBLOCKD *mb, int fullpixel);
+
extern void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_filter_fn_t sppf);
-extern void vp8_build_inter_predictors_mbuv(MACROBLOCKD *x);
+
+extern void vp8_build_inter_predictors_mbuv(MACROBLOCKD *mb);
#endif /* RECONINTER_H */
diff --git a/src/gallium/auxiliary/vl/vp8/reconintra.c b/src/gallium/auxiliary/vl/vp8/reconintra.c
index 8565c9922b..82de750e8a 100644
--- a/src/gallium/auxiliary/vl/vp8/reconintra.c
+++ b/src/gallium/auxiliary/vl/vp8/reconintra.c
@@ -33,34 +33,34 @@ void vp8_setup_intra_recon(YV12_BUFFER_CONFIG *ybf)
}
/**
- * For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *x)
- * and vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x).
+ * For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *mb)
+ * and vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *mb).
*/
-void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
+void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *mb)
{
int i;
for (i = 16; i < 24; i += 2)
{
- BLOCKD *b = &x->block[i];
+ BLOCKD *b = &mb->block[i];
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
}
-void vp8_build_intra_predictors_mby(MACROBLOCKD *x)
+void vp8_build_intra_predictors_mby(MACROBLOCKD *mb)
{
int r, c, i;
- unsigned char *yabove_row = x->dst.y_buffer - x->dst.y_stride;
+ unsigned char *yabove_row = mb->dst.y_buffer - mb->dst.y_stride;
unsigned char yleft_col[16];
unsigned char ytop_left = yabove_row[-1];
- unsigned char *ypred_ptr = x->predictor;
+ unsigned char *ypred_ptr = mb->predictor;
for (i = 0; i < 16; i++)
{
- yleft_col[i] = x->dst.y_buffer [i* x->dst.y_stride -1];
+ yleft_col[i] = mb->dst.y_buffer [i* mb->dst.y_stride -1];
}
/* for Y */
- switch (x->mode_info_context->mbmi.mode)
+ switch (mb->mode_info_context->mbmi.mode)
{
case DC_PRED:
{
@@ -69,9 +69,9 @@ void vp8_build_intra_predictors_mby(MACROBLOCKD *x)
int shift;
int average = 0;
- if (x->up_available || x->left_available)
+ if (mb->up_available || mb->left_available)
{
- if (x->up_available)
+ if (mb->up_available)
{
for (i = 0; i < 16; i++)
{
@@ -79,7 +79,7 @@ void vp8_build_intra_predictors_mby(MACROBLOCKD *x)
}
}
- if (x->left_available)
+ if (mb->left_available)
{
for (i = 0; i < 16; i++)
@@ -88,7 +88,7 @@ void vp8_build_intra_predictors_mby(MACROBLOCKD *x)
}
}
- shift = 3 + x->up_available + x->left_available;
+ shift = 3 + mb->up_available + mb->left_available;
expected_dc = (average + (1 << (shift - 1))) >> shift;
}
else
@@ -156,24 +156,24 @@ void vp8_build_intra_predictors_mby(MACROBLOCKD *x)
}
}
-void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x)
+void vp8_build_intra_predictors_mby_s(MACROBLOCKD *mb)
{
- unsigned char *yabove_row = x->dst.y_buffer - x->dst.y_stride;
+ unsigned char *yabove_row = mb->dst.y_buffer - mb->dst.y_stride;
unsigned char yleft_col[16];
unsigned char ytop_left = yabove_row[-1];
- unsigned char *ypred_ptr = x->predictor;
+ unsigned char *ypred_ptr = mb->predictor;
int r, c, i;
- int y_stride = x->dst.y_stride;
- ypred_ptr = x->dst.y_buffer; /*x->predictor;*/
+ int y_stride = mb->dst.y_stride;
+ ypred_ptr = mb->dst.y_buffer; /*mb->predictor;*/
for (i = 0; i < 16; i++)
{
- yleft_col[i] = x->dst.y_buffer [i* x->dst.y_stride -1];
+ yleft_col[i] = mb->dst.y_buffer [i* mb->dst.y_stride -1];
}
/* for Y */
- switch (x->mode_info_context->mbmi.mode)
+ switch (mb->mode_info_context->mbmi.mode)
{
case DC_PRED:
{
@@ -182,9 +182,9 @@ void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x)
int shift;
int average = 0;
- if (x->up_available || x->left_available)
+ if (mb->up_available || mb->left_available)
{
- if (x->up_available)
+ if (mb->up_available)
{
for (i = 0; i < 16; i++)
{
@@ -192,7 +192,7 @@ void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x)
}
}
- if (x->left_available)
+ if (mb->left_available)
{
for (i = 0; i < 16; i++)
@@ -201,7 +201,7 @@ void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x)
}
}
- shift = 3 + x->up_available + x->left_available;
+ shift = 3 + mb->up_available + mb->left_available;
expected_dc = (average + (1 << (shift - 1))) >> shift;
}
else
@@ -274,25 +274,25 @@ void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x)
}
}
-void vp8_build_intra_predictors_mbuv(MACROBLOCKD *x)
+void vp8_build_intra_predictors_mbuv(MACROBLOCKD *mb)
{
- unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
+ unsigned char *uabove_row = mb->dst.u_buffer - mb->dst.uv_stride;
unsigned char uleft_col[16];
unsigned char utop_left = uabove_row[-1];
- unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
+ unsigned char *vabove_row = mb->dst.v_buffer - mb->dst.uv_stride;
unsigned char vleft_col[20];
unsigned char vtop_left = vabove_row[-1];
- unsigned char *upred_ptr = &x->predictor[256];
- unsigned char *vpred_ptr = &x->predictor[320];
+ unsigned char *upred_ptr = &mb->predictor[256];
+ unsigned char *vpred_ptr = &mb->predictor[320];
int i, j;
for (i = 0; i < 8; i++)
{
- uleft_col[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
- vleft_col[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
+ uleft_col[i] = mb->dst.u_buffer [i*mb->dst.uv_stride - 1];
+ vleft_col[i] = mb->dst.v_buffer [i*mb->dst.uv_stride - 1];
}
- switch (x->mode_info_context->mbmi.uv_mode)
+ switch (mb->mode_info_context->mbmi.uv_mode)
{
case DC_PRED:
{
@@ -303,7 +303,7 @@ void vp8_build_intra_predictors_mbuv(MACROBLOCKD *x)
int Uaverage = 0;
int Vaverage = 0;
- if (x->up_available)
+ if (mb->up_available)
{
for (i = 0; i < 8; i++)
{
@@ -312,7 +312,7 @@ void vp8_build_intra_predictors_mbuv(MACROBLOCKD *x)
}
}
- if (x->left_available)
+ if (mb->left_available)
{
for (i = 0; i < 8; i++)
{
@@ -321,14 +321,14 @@ void vp8_build_intra_predictors_mbuv(MACROBLOCKD *x)
}
}
- if (!x->up_available && !x->left_available)
+ if (!mb->up_available && !mb->left_available)
{
expected_udc = 128;
expected_vdc = 128;
}
else
{
- shift = 2 + x->up_available + x->left_available;
+ shift = 2 + mb->up_available + mb->left_available;
expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
}
@@ -408,27 +408,27 @@ void vp8_build_intra_predictors_mbuv(MACROBLOCKD *x)
}
}
-void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x)
+void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *mb)
{
- unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
+ unsigned char *uabove_row = mb->dst.u_buffer - mb->dst.uv_stride;
unsigned char uleft_col[16];
unsigned char utop_left = uabove_row[-1];
- unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
+ unsigned char *vabove_row = mb->dst.v_buffer - mb->dst.uv_stride;
unsigned char vleft_col[20];
unsigned char vtop_left = vabove_row[-1];
- unsigned char *upred_ptr = x->dst.u_buffer; /*&x->predictor[256];*/
- unsigned char *vpred_ptr = x->dst.v_buffer; /*&x->predictor[320];*/
- int uv_stride = x->dst.uv_stride;
+ unsigned char *upred_ptr = mb->dst.u_buffer; /*&mb->predictor[256];*/
+ unsigned char *vpred_ptr = mb->dst.v_buffer; /*&mb->predictor[320];*/
+ int uv_stride = mb->dst.uv_stride;
int i, j;
for (i = 0; i < 8; i++)
{
- uleft_col[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
- vleft_col[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
+ uleft_col[i] = mb->dst.u_buffer [i * mb->dst.uv_stride - 1];
+ vleft_col[i] = mb->dst.v_buffer [i * mb->dst.uv_stride - 1];
}
- switch (x->mode_info_context->mbmi.uv_mode)
+ switch (mb->mode_info_context->mbmi.uv_mode)
{
case DC_PRED:
{
@@ -439,7 +439,7 @@ void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x)
int Uaverage = 0;
int Vaverage = 0;
- if (x->up_available)
+ if (mb->up_available)
{
for (i = 0; i < 8; i++)
{
@@ -448,7 +448,7 @@ void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x)
}
}
- if (x->left_available)
+ if (mb->left_available)
{
for (i = 0; i < 8; i++)
{
@@ -457,14 +457,14 @@ void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x)
}
}
- if (!x->up_available && !x->left_available)
+ if (!mb->up_available && !mb->left_available)
{
expected_udc = 128;
expected_vdc = 128;
}
else
{
- shift = 2 + x->up_available + x->left_available;
+ shift = 2 + mb->up_available + mb->left_available;
expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
}
diff --git a/src/gallium/auxiliary/vl/vp8/reconintra.h b/src/gallium/auxiliary/vl/vp8/reconintra.h
index 8de8d8d85c..06f4375326 100644
--- a/src/gallium/auxiliary/vl/vp8/reconintra.h
+++ b/src/gallium/auxiliary/vl/vp8/reconintra.h
@@ -18,12 +18,11 @@
extern void vp8_setup_intra_recon(YV12_BUFFER_CONFIG *ybf);
extern void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd,
- MACROBLOCKD *x);
+ MACROBLOCKD *mb);
-extern void vp8_build_intra_predictors_mby(MACROBLOCKD *x);
-extern void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x);
-
-extern void vp8_build_intra_predictors_mbuv(MACROBLOCKD *x);
-extern void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x);
+extern void vp8_build_intra_predictors_mby(MACROBLOCKD *mb);
+extern void vp8_build_intra_predictors_mby_s(MACROBLOCKD *mb);
+extern void vp8_build_intra_predictors_mbuv(MACROBLOCKD *mb);
+extern void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *mb);
#endif /* RECONINTRA_H */
diff --git a/src/gallium/auxiliary/vl/vp8/reconintra4x4.c b/src/gallium/auxiliary/vl/vp8/reconintra4x4.c
index ed7c681a50..5bf6f82f34 100644
--- a/src/gallium/auxiliary/vl/vp8/reconintra4x4.c
+++ b/src/gallium/auxiliary/vl/vp8/reconintra4x4.c
@@ -285,14 +285,14 @@ void vp8_intra4x4_predict(BLOCKD *x, int b_mode, unsigned char *predictor)
* Copy 4 bytes from the above right down so that the 4x4 prediction modes using
* pixels above and to the right prediction have filled in pixels to use.
*/
-void vp8_intra_prediction_down_copy(MACROBLOCKD *x)
+void vp8_intra_prediction_down_copy(MACROBLOCKD *mb)
{
- unsigned char *above_right = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16;
+ unsigned char *above_right = *(mb->block[0].base_dst) + mb->block[0].dst - mb->block[0].dst_stride + 16;
unsigned int *src_ptr = (unsigned int *)above_right;
- unsigned int *dst_ptr0 = (unsigned int *)(above_right + 4 * x->block[0].dst_stride);
- unsigned int *dst_ptr1 = (unsigned int *)(above_right + 8 * x->block[0].dst_stride);
- unsigned int *dst_ptr2 = (unsigned int *)(above_right + 12 * x->block[0].dst_stride);
+ unsigned int *dst_ptr0 = (unsigned int *)(above_right + 4 * mb->block[0].dst_stride);
+ unsigned int *dst_ptr1 = (unsigned int *)(above_right + 8 * mb->block[0].dst_stride);
+ unsigned int *dst_ptr2 = (unsigned int *)(above_right + 12 * mb->block[0].dst_stride);
*dst_ptr0 = *src_ptr;
*dst_ptr1 = *src_ptr;
diff --git a/src/gallium/auxiliary/vl/vp8/reconintra4x4.h b/src/gallium/auxiliary/vl/vp8/reconintra4x4.h
index 96c6e4d88c..1185e3c35c 100644
--- a/src/gallium/auxiliary/vl/vp8/reconintra4x4.h
+++ b/src/gallium/auxiliary/vl/vp8/reconintra4x4.h
@@ -15,6 +15,7 @@
#include "blockd.h"
extern void vp8_intra4x4_predict(BLOCKD *x, int b_mode, unsigned char *predictor);
-extern void vp8_intra_prediction_down_copy(MACROBLOCKD *x);
+
+extern void vp8_intra_prediction_down_copy(MACROBLOCKD *mb);
#endif /* RECONINTRA4x4_H */
diff --git a/src/gallium/auxiliary/vl/vp8/vp8_mem.c b/src/gallium/auxiliary/vl/vp8/vp8_mem.c
index 55030b7cb1..c023bf3544 100644
--- a/src/gallium/auxiliary/vl/vp8/vp8_mem.c
+++ b/src/gallium/auxiliary/vl/vp8/vp8_mem.c
@@ -12,9 +12,9 @@
#include "vp8_mem.h"
#define ADDRESS_STORAGE_SIZE sizeof(size_t)
-#define DEFAULT_ALIGNMENT 32 // must be >= 1 !
+#define DEFAULT_ALIGNMENT 32 /**< Must be superior or equal to 1 ! */
-/* returns an addr aligned to the byte boundary specified by align */
+/** Returns an addr aligned to the byte boundary specified by align */
#define align_addr(addr,align) (void*)(((size_t)(addr) + ((align) - 1)) & (size_t)-(align))
void *vpx_memalign(size_t align, size_t size)
@@ -25,8 +25,7 @@ void *vpx_memalign(size_t align, size_t size)
if (addr)
{
x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align);
- // save the actual malloc address
- ((size_t *)x)[-1] = (size_t)addr;
+ ((size_t *)x)[-1] = (size_t)addr; // Save the actual malloc address
}
return x;
diff --git a/src/gallium/auxiliary/vl/vp8/yv12utils.c b/src/gallium/auxiliary/vl/vp8/yv12utils.c
index 10161adf33..bf5f8abee1 100644
--- a/src/gallium/auxiliary/vl/vp8/yv12utils.c
+++ b/src/gallium/auxiliary/vl/vp8/yv12utils.c
@@ -12,15 +12,16 @@
#include "vp8_mem.h"
#include "yv12utils.h"
+/**
+ * \note buffer_alloc isn't accessed by most functions. Rather y_buffer,
+ * u_buffer and v_buffer point to buffer_alloc and are used. Clear out
+ * all of this so that a freed pointer isn't inadvertently used
+ */
int vp8_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf)
{
if (ybf)
{
vpx_free(ybf->buffer_alloc);
-
- /* buffer_alloc isn't accessed by most functions. Rather y_buffer,
- u_buffer and v_buffer point to buffer_alloc and are used. Clear out
- all of this so that a freed pointer isn't inadvertently used */
memset (ybf, 0, sizeof(YV12_BUFFER_CONFIG));
}
else
@@ -31,27 +32,30 @@ int vp8_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf)
return 0;
}
+/**
+ * There is currently a bunch of code which assumes uv_stride == y_stride/2,
+ * so enforce this here.
+ *
+ * Only support allocating buffers that have a height and width that are
+ * multiples of 16, and a border that's a multiple of 32.
+ * The border restriction is required to get 16-byte alignment of the start of
+ * the chroma rows without intoducing an arbitrary gap between planes.
+ */
int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
int width, int height, int border)
{
if (ybf)
{
int y_stride = ((width + 2 * border) + 31) & ~31;
- int yplane_size = (height + 2 * border) * y_stride;
+ int y_size = (height + 2 * border) * y_stride;
+
int uv_width = width >> 1;
int uv_height = height >> 1;
- /** There is currently a bunch of code which assumes
- * uv_stride == y_stride/2, so enforce this here. */
int uv_stride = y_stride >> 1;
- int uvplane_size = (uv_height + border) * uv_stride;
+ int uv_size = (uv_height + border) * uv_stride;
vp8_yv12_de_alloc_frame_buffer(ybf);
- /** Only support allocating buffers that have a height and width that
- * are multiples of 16, and a border that's a multiple of 32.
- * The border restriction is required to get 16-byte alignment of the
- * start of the chroma rows without intoducing an arbitrary gap
- * between planes. */
if ((width & 0xf) | (height & 0xf) | (border & 0x1f))
return -3;
@@ -64,16 +68,18 @@ int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
ybf->uv_stride = uv_stride;
ybf->border = border;
- ybf->frame_size = yplane_size + 2 * uvplane_size;
+ ybf->frame_size = y_size + 2 * uv_size;
ybf->buffer_alloc = (unsigned char *)vpx_memalign(32, ybf->frame_size);
if (ybf->buffer_alloc == NULL)
+ {
return -1;
+ }
ybf->y_buffer = ybf->buffer_alloc + (border * y_stride) + border;
- ybf->u_buffer = ybf->buffer_alloc + yplane_size + (border / 2 * uv_stride) + border / 2;
- ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size + (border / 2 * uv_stride) + border / 2;
+ ybf->u_buffer = ybf->buffer_alloc + y_size + (border / 2 * uv_stride) + border / 2;
+ ybf->v_buffer = ybf->buffer_alloc + y_size + uv_size + (border / 2 * uv_stride) + border / 2;
ybf->corrupted = 0; /* Assume not currupted by errors */
}
@@ -91,7 +97,7 @@ void vp8_yv12_extend_frame_borders(YV12_BUFFER_CONFIG *ybf)
unsigned char *src_ptr1, *src_ptr2;
unsigned char *dest_ptr1, *dest_ptr2;
- unsigned int Border;
+ unsigned int border;
int plane_stride;
int plane_height;
int plane_width;
@@ -100,7 +106,7 @@ void vp8_yv12_extend_frame_borders(YV12_BUFFER_CONFIG *ybf)
/* Y Plane */
/***********/
- Border = ybf->border;
+ border = ybf->border;
plane_stride = ybf->y_stride;
plane_height = ybf->y_height;
plane_width = ybf->y_width;
@@ -108,13 +114,13 @@ void vp8_yv12_extend_frame_borders(YV12_BUFFER_CONFIG *ybf)
/* Copy the left and right most columns out */
src_ptr1 = ybf->y_buffer;
src_ptr2 = src_ptr1 + plane_width - 1;
- dest_ptr1 = src_ptr1 - Border;
+ dest_ptr1 = src_ptr1 - border;
dest_ptr2 = src_ptr2 + 1;
for (i = 0; i < plane_height; i++)
{
- memset(dest_ptr1, src_ptr1[0], Border);
- memset(dest_ptr2, src_ptr2[0], Border);
+ memset(dest_ptr1, src_ptr1[0], border);
+ memset(dest_ptr2, src_ptr2[0], border);
src_ptr1 += plane_stride;
src_ptr2 += plane_stride;
dest_ptr1 += plane_stride;
@@ -122,12 +128,12 @@ void vp8_yv12_extend_frame_borders(YV12_BUFFER_CONFIG *ybf)
}
/* Now copy the top and bottom source lines into each line of the respective borders */
- src_ptr1 = ybf->y_buffer - Border;
+ src_ptr1 = ybf->y_buffer - border;
src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
- dest_ptr1 = src_ptr1 - (Border * plane_stride);
+ dest_ptr1 = src_ptr1 - (border * plane_stride);
dest_ptr2 = src_ptr2 + plane_stride;
- for (i = 0; i < (int)Border; i++)
+ for (i = 0; i < (int)border; i++)
{
memcpy(dest_ptr1, src_ptr1, plane_stride);
memcpy(dest_ptr2, src_ptr2, plane_stride);
@@ -142,18 +148,18 @@ void vp8_yv12_extend_frame_borders(YV12_BUFFER_CONFIG *ybf)
plane_stride = ybf->uv_stride;
plane_height = ybf->uv_height;
plane_width = ybf->uv_width;
- Border /= 2;
+ border /= 2;
/* Copy the left and right most columns out */
src_ptr1 = ybf->u_buffer;
src_ptr2 = src_ptr1 + plane_width - 1;
- dest_ptr1 = src_ptr1 - Border;
+ dest_ptr1 = src_ptr1 - border;
dest_ptr2 = src_ptr2 + 1;
for (i = 0; i < plane_height; i++)
{
- memset(dest_ptr1, src_ptr1[0], Border);
- memset(dest_ptr2, src_ptr2[0], Border);
+ memset(dest_ptr1, src_ptr1[0], border);
+ memset(dest_ptr2, src_ptr2[0], border);
src_ptr1 += plane_stride;
src_ptr2 += plane_stride;
dest_ptr1 += plane_stride;
@@ -161,12 +167,12 @@ void vp8_yv12_extend_frame_borders(YV12_BUFFER_CONFIG *ybf)
}
/* Now copy the top and bottom source lines into each line of the respective borders */
- src_ptr1 = ybf->u_buffer - Border;
+ src_ptr1 = ybf->u_buffer - border;
src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
- dest_ptr1 = src_ptr1 - (Border * plane_stride);
+ dest_ptr1 = src_ptr1 - (border * plane_stride);
dest_ptr2 = src_ptr2 + plane_stride;
- for (i = 0; i < (int)(Border); i++)
+ for (i = 0; i < (int)(border); i++)
{
memcpy(dest_ptr1, src_ptr1, plane_stride);
memcpy(dest_ptr2, src_ptr2, plane_stride);
@@ -181,13 +187,13 @@ void vp8_yv12_extend_frame_borders(YV12_BUFFER_CONFIG *ybf)
/* Copy the left and right most columns out */
src_ptr1 = ybf->v_buffer;
src_ptr2 = src_ptr1 + plane_width - 1;
- dest_ptr1 = src_ptr1 - Border;
+ dest_ptr1 = src_ptr1 - border;
dest_ptr2 = src_ptr2 + 1;
for (i = 0; i < plane_height; i++)
{
- memset(dest_ptr1, src_ptr1[0], Border);
- memset(dest_ptr2, src_ptr2[0], Border);
+ memset(dest_ptr1, src_ptr1[0], border);
+ memset(dest_ptr2, src_ptr2[0], border);
src_ptr1 += plane_stride;
src_ptr2 += plane_stride;
dest_ptr1 += plane_stride;
@@ -195,12 +201,12 @@ void vp8_yv12_extend_frame_borders(YV12_BUFFER_CONFIG *ybf)
}
/* Now copy the top and bottom source lines into each line of the respective borders */
- src_ptr1 = ybf->v_buffer - Border;
+ src_ptr1 = ybf->v_buffer - border;
src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
- dest_ptr1 = src_ptr1 - (Border * plane_stride);
+ dest_ptr1 = src_ptr1 - (border * plane_stride);
dest_ptr2 = src_ptr2 + plane_stride;
- for (i = 0; i < (int)(Border); i++)
+ for (i = 0; i < (int)(border); i++)
{
memcpy(dest_ptr1, src_ptr1, plane_stride);
memcpy(dest_ptr2, src_ptr2, plane_stride);
diff --git a/src/gallium/auxiliary/vl/vp8/yv12utils.h b/src/gallium/auxiliary/vl/vp8/yv12utils.h
index a7a1ef12b8..195a31e7ea 100644
--- a/src/gallium/auxiliary/vl/vp8/yv12utils.h
+++ b/src/gallium/auxiliary/vl/vp8/yv12utils.h
@@ -61,7 +61,6 @@ typedef struct
int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height, int border);
int vp8_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf);
-
void vp8_yv12_extend_frame_borders(YV12_BUFFER_CONFIG *ybf);
#ifdef __cplusplus