summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTaekyun Kim <tkq.kim@samsung.com>2011-09-20 19:46:25 +0900
committerTaekyun Kim <tkq.kim@samsung.com>2011-10-18 13:00:02 +0900
commitb5e4355fa4973e3edd4abeb11bdc47c42371cc76 (patch)
treee9f1241535955beffb302496abe1476bc61cf61a
parentbb7142d361d56d66ac40debb60a7c4d099764ba8 (diff)
ARM: NEON: Some cleanup of bilinear scanline functions
Use STRIDE and initial horizontal weight update is done before entering interpolation loop. Cache preload for mask and dst.
-rw-r--r--pixman/pixman-arm-neon-asm-bilinear.S128
1 files changed, 67 insertions, 61 deletions
diff --git a/pixman/pixman-arm-neon-asm-bilinear.S b/pixman/pixman-arm-neon-asm-bilinear.S
index 3c7fe0f..c5ba929 100644
--- a/pixman/pixman-arm-neon-asm-bilinear.S
+++ b/pixman/pixman-arm-neon-asm-bilinear.S
@@ -44,10 +44,6 @@
* All temp registers can be used freely outside the code block.
* Assume that symbol(register .req) OUT and MASK are defined at caller of these macro blocks.
*
- * TODOs
- * Support 0565 pixel format
- * Optimization for two and last pixel cases
- *
* Remarks
* There can be lots of pipeline stalls inside code block and between code blocks.
* Further optimizations will be done by new macro templates using head/tail_head/tail scheme.
@@ -92,21 +88,19 @@ fname:
*/
.macro bilinear_load_8888 reg1, reg2, tmp
- mov TMP2, X, asr #16
+ mov TMP1, X, asr #16
add X, X, UX
- add TMP1, TOP, TMP2, asl #2
- add TMP2, BOTTOM, TMP2, asl #2
- vld1.32 {reg1}, [TMP1]
- vld1.32 {reg2}, [TMP2]
+ add TMP1, TOP, TMP1, asl #2
+ vld1.32 {reg1}, [TMP1], STRIDE
+ vld1.32 {reg2}, [TMP1]
.endm
.macro bilinear_load_0565 reg1, reg2, tmp
- mov TMP2, X, asr #16
+ mov TMP1, X, asr #16
add X, X, UX
- add TMP1, TOP, TMP2, asl #1
- add TMP2, BOTTOM, TMP2, asl #1
- vld1.32 {reg2[0]}, [TMP1]
- vld1.32 {reg2[1]}, [TMP2]
+ add TMP1, TOP, TMP1, asl #1
+ vld1.32 {reg2[0]}, [TMP1], STRIDE
+ vld1.32 {reg2[1]}, [TMP1]
convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp
.endm
@@ -134,18 +128,16 @@ fname:
.macro bilinear_load_and_vertical_interpolate_two_0565 \
acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi
- mov TMP2, X, asr #16
+ mov TMP1, X, asr #16
add X, X, UX
- mov TMP4, X, asr #16
+ add TMP1, TOP, TMP1, asl #1
+ mov TMP2, X, asr #16
add X, X, UX
- add TMP1, TOP, TMP2, asl #1
- add TMP2, BOTTOM, TMP2, asl #1
- add TMP3, TOP, TMP4, asl #1
- add TMP4, BOTTOM, TMP4, asl #1
- vld1.32 {acc2lo[0]}, [TMP1]
- vld1.32 {acc2hi[0]}, [TMP3]
- vld1.32 {acc2lo[1]}, [TMP2]
- vld1.32 {acc2hi[1]}, [TMP4]
+ add TMP2, TOP, TMP2, asl #1
+ vld1.32 {acc2lo[0]}, [TMP1], STRIDE
+ vld1.32 {acc2hi[0]}, [TMP2], STRIDE
+ vld1.32 {acc2lo[1]}, [TMP1]
+ vld1.32 {acc2hi[1]}, [TMP2]
convert_0565_to_x888 acc2, reg3, reg2, reg1
vzip.u8 reg1, reg3
vzip.u8 reg2, reg4
@@ -161,34 +153,30 @@ fname:
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
- mov TMP2, X, asr #16
+ mov TMP1, X, asr #16
add X, X, UX
- mov TMP4, X, asr #16
+ add TMP1, TOP, TMP1, asl #1
+ mov TMP2, X, asr #16
add X, X, UX
- add TMP1, TOP, TMP2, asl #1
- add TMP2, BOTTOM, TMP2, asl #1
- add TMP3, TOP, TMP4, asl #1
- add TMP4, BOTTOM, TMP4, asl #1
- vld1.32 {xacc2lo[0]}, [TMP1]
- vld1.32 {xacc2hi[0]}, [TMP3]
- vld1.32 {xacc2lo[1]}, [TMP2]
- vld1.32 {xacc2hi[1]}, [TMP4]
+ add TMP2, TOP, TMP2, asl #1
+ vld1.32 {xacc2lo[0]}, [TMP1], STRIDE
+ vld1.32 {xacc2hi[0]}, [TMP2], STRIDE
+ vld1.32 {xacc2lo[1]}, [TMP1]
+ vld1.32 {xacc2hi[1]}, [TMP2]
convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1
- mov TMP2, X, asr #16
+ mov TMP1, X, asr #16
add X, X, UX
- mov TMP4, X, asr #16
+ add TMP1, TOP, TMP1, asl #1
+ mov TMP2, X, asr #16
add X, X, UX
- add TMP1, TOP, TMP2, asl #1
- add TMP2, BOTTOM, TMP2, asl #1
- add TMP3, TOP, TMP4, asl #1
- add TMP4, BOTTOM, TMP4, asl #1
- vld1.32 {yacc2lo[0]}, [TMP1]
+ add TMP2, TOP, TMP2, asl #1
+ vld1.32 {yacc2lo[0]}, [TMP1], STRIDE
vzip.u8 xreg1, xreg3
- vld1.32 {yacc2hi[0]}, [TMP3]
+ vld1.32 {yacc2hi[0]}, [TMP2], STRIDE
vzip.u8 xreg2, xreg4
- vld1.32 {yacc2lo[1]}, [TMP2]
+ vld1.32 {yacc2lo[1]}, [TMP1]
vzip.u8 xreg3, xreg4
- vld1.32 {yacc2hi[1]}, [TMP4]
+ vld1.32 {yacc2hi[1]}, [TMP2]
vzip.u8 xreg1, xreg2
convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1
vmull.u8 xacc1, xreg1, d28
@@ -252,6 +240,7 @@ fname:
.else
.error bilinear_load_mask_8 numpix is unsupported
.endif
+ pld [MASK, #prefetch_offset]
.endm
.macro bilinear_load_mask mask_fmt, numpix, mask
@@ -279,6 +268,7 @@ fname:
.else
.error bilinear_load_dst_8888 numpix is unsupported
.endif
+ pld [OUT, #(prefetch_offset * 4)]
.endm
.macro bilinear_load_dst_8888_over numpix, dst0, dst1, dst01
@@ -303,7 +293,7 @@ fname:
* For two pixel case
* (r0, r1, x, x, g0, g1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1)
* (b0, b1, x, x, a0, a1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1)
- * We can do some optimizations for this including one pixel cases.
+ * We can do some optimizations for this including last pixel cases.
*/
.macro bilinear_duplicate_mask_x numpix, mask
.endm
@@ -497,8 +487,7 @@ fname:
bilinear_load_dst dst_fmt, op, 1, d18, d19, q9
vmull.u8 q1, d0, d28
vmlal.u8 q1, d1, d29
- vshr.u16 d30, d24, #8
- /* 4 cycles bubble */
+ /* 5 cycles bubble */
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
@@ -525,18 +514,18 @@ fname:
q1, q11, d0, d1, d20, d21, d22, d23
bilinear_load_mask mask_fmt, 2, d4
bilinear_load_dst dst_fmt, op, 2, d18, d19, q9
- vshr.u16 q15, q12, #8
- vadd.u16 q12, q12, q13
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
vshll.u16 q10, d22, #8
vmlsl.u16 q10, d22, d31
vmlal.u16 q10, d23, d31
- vshrn.u32 d30, q0, #16
- vshrn.u32 d31, q10, #16
+ vshrn.u32 d0, q0, #16
+ vshrn.u32 d1, q10, #16
bilinear_duplicate_mask mask_fmt, 2, d4
- vmovn.u16 d0, q15
+ vshr.u16 q15, q12, #8
+ vadd.u16 q12, q12, q13
+ vmovn.u16 d0, q0
bilinear_interleave_src_dst \
mask_fmt, op, 2, d0, d1, q0, d18, d19, q9
bilinear_apply_mask_to_src \
@@ -554,8 +543,7 @@ fname:
q1, q11, d0, d1, d20, d21, d22, d23 \
q3, q9, d4, d5, d16, d17, d18, d19
pld [TMP1, PF_OFFS]
- vshr.u16 q15, q12, #8
- vadd.u16 q12, q12, q13
+ sub TMP1, TMP1, STRIDE
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
@@ -567,9 +555,9 @@ fname:
vmlsl.u16 q2, d6, d30
vmlal.u16 q2, d7, d30
vshll.u16 q8, d18, #8
- bilinear_load_mask mask_fmt, 4, d30
+ bilinear_load_mask mask_fmt, 4, d22
bilinear_load_dst dst_fmt, op, 4, d2, d3, q1
- pld [TMP2, PF_OFFS]
+ pld [TMP1, PF_OFFS]
vmlsl.u16 q8, d18, d31
vmlal.u16 q8, d19, d31
vadd.u16 q12, q12, q13
@@ -577,17 +565,19 @@ fname:
vshrn.u32 d1, q10, #16
vshrn.u32 d4, q2, #16
vshrn.u32 d5, q8, #16
- bilinear_duplicate_mask mask_fmt, 4, d30
+ bilinear_duplicate_mask mask_fmt, 4, d22
+ vshr.u16 q15, q12, #8
vmovn.u16 d0, q0
vmovn.u16 d1, q2
+ vadd.u16 q12, q12, q13
bilinear_interleave_src_dst \
mask_fmt, op, 4, d0, d1, q0, d2, d3, q1
bilinear_apply_mask_to_src \
- mask_fmt, 4, d0, d1, q0, d30, \
+ mask_fmt, 4, d0, d1, q0, d22, \
q3, q8, q9, q10
bilinear_combine \
op, 4, d0, d1, q0, d2, d3, q1, \
- q3, q8, q9, q10, d22
+ q3, q8, q9, q10, d23
bilinear_deinterleave_dst mask_fmt, op, 4, d0, d1, q0
bilinear_store_&dst_fmt 4, q2, q3
.endm
@@ -610,6 +600,7 @@ pixman_asm_function fname
PF_OFFS .req r7
TMP3 .req r8
TMP4 .req r9
+ STRIDE .req r2
mov ip, sp
push {r4, r5, r6, r7, r8, r9}
@@ -617,6 +608,11 @@ pixman_asm_function fname
ldmia ip, {WB, X, UX, WIDTH}
mul PF_OFFS, PF_OFFS, UX
+ .set prefetch_offset, prefetch_distance
+
+ sub STRIDE, BOTTOM, TOP
+ .unreq BOTTOM
+
cmp WIDTH, #0
ble 3f
@@ -626,6 +622,8 @@ pixman_asm_function fname
vdup.u8 d29, WB
vadd.u16 d25, d25, d26
vadd.u16 q13, q13, q13
+ vshr.u16 q15, q12, #8
+ vadd.u16 q12, q12, q13
subs WIDTH, WIDTH, #4
blt 1f
@@ -648,7 +646,6 @@ pixman_asm_function fname
.unreq OUT
.unreq TOP
- .unreq BOTTOM
.unreq WT
.unreq WB
.unreq X
@@ -659,6 +656,7 @@ pixman_asm_function fname
.unreq PF_OFFS
.unreq TMP3
.unreq TMP4
+ .unreq STRIDE
.endfunc
.endm
@@ -682,6 +680,7 @@ pixman_asm_function fname
PF_OFFS .req r8
TMP3 .req r9
TMP4 .req r10
+ STRIDE .req r3
mov ip, sp
push {r4, r5, r6, r7, r8, r9, r10, ip}
@@ -689,6 +688,11 @@ pixman_asm_function fname
ldmia ip, {WT, WB, X, UX, WIDTH}
mul PF_OFFS, PF_OFFS, UX
+ .set prefetch_offset, prefetch_distance
+
+ sub STRIDE, BOTTOM, TOP
+ .unreq BOTTOM
+
cmp WIDTH, #0
ble 3f
@@ -698,6 +702,8 @@ pixman_asm_function fname
vdup.u8 d29, WB
vadd.u16 d25, d25, d26
vadd.u16 q13, q13, q13
+ vshr.u16 q15, q12, #8
+ vadd.u16 q12, q12, q13
subs WIDTH, WIDTH, #4
blt 1f
@@ -720,7 +726,6 @@ pixman_asm_function fname
.unreq OUT
.unreq TOP
- .unreq BOTTOM
.unreq WT
.unreq WB
.unreq X
@@ -732,6 +737,7 @@ pixman_asm_function fname
.unreq PF_OFFS
.unreq TMP3
.unreq TMP4
+ .unreq STRIDE
.endfunc
.endm