diff options
author | Siarhei Siamashka <siarhei.siamashka@nokia.com> | 2009-07-27 01:21:26 +0300 |
---|---|---|
committer | Siarhei Siamashka <siarhei.siamashka@gmail.com> | 2009-10-20 00:21:56 +0300 |
commit | ad484078854572cf640d7ffbb66f1e99328e79b8 (patch) | |
tree | 1b96861683168ff67e6c53be51bd870d83c93353 | |
parent | 358f96d20219b4460bfd8ecf88e69ff10044b577 (diff) |
ARM: Removal of unused/broken NEON code
-rw-r--r-- | pixman/pixman-arm-neon.c | 786 |
1 files changed, 0 insertions, 786 deletions
diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c index 0a29e500..9caef614 100644 --- a/pixman/pixman-arm-neon.c +++ b/pixman/pixman-arm-neon.c @@ -1901,710 +1901,6 @@ pixman_fill_neon (uint32_t *bits, #endif } -/* TODO: is there a more generic way of doing this being introduced? */ -#define NEON_SCANLINE_BUFFER_PIXELS (1024) - -static inline void -neon_quadword_copy (void * dst, - void * src, - uint32_t count, /* of quadwords */ - uint32_t trailer_count /* of bytes */) -{ - uint8_t *t_dst = dst, *t_src = src; - - /* Uses aligned multi-register loads to maximise read bandwidth - * on uncached memory such as framebuffers - * The accesses do not have the aligned qualifiers, so that the copy - * may convert between aligned-uncached and unaligned-cached memory. - * It is assumed that the CPU can infer alignedness from the address. - */ - -#ifdef USE_GCC_INLINE_ASM - - asm volatile ( - " cmp %[count], #8 \n" - " blt 1f @ skip oversized fragments \n" - "0: @ start with eight quadwords at a time \n" - " sub %[count], %[count], #8 \n" - " vld1.8 {d16, d17, d18, d19}, [%[src]]! \n" - " vld1.8 {d20, d21, d22, d23}, [%[src]]! \n" - " vld1.8 {d24, d25, d26, d27}, [%[src]]! \n" - " vld1.8 {d28, d29, d30, d31}, [%[src]]! \n" - " cmp %[count], #8 \n" - " vst1.8 {d16, d17, d18, d19}, [%[dst]]! \n" - " vst1.8 {d20, d21, d22, d23}, [%[dst]]! \n" - " vst1.8 {d24, d25, d26, d27}, [%[dst]]! \n" - " vst1.8 {d28, d29, d30, d31}, [%[dst]]! \n" - " bge 0b \n" - "1: @ four quadwords \n" - " tst %[count], #4 \n" - " beq 2f @ skip oversized fragment \n" - " vld1.8 {d16, d17, d18, d19}, [%[src]]! \n" - " vld1.8 {d20, d21, d22, d23}, [%[src]]! \n" - " vst1.8 {d16, d17, d18, d19}, [%[dst]]! \n" - " vst1.8 {d20, d21, d22, d23}, [%[dst]]! \n" - "2: @ two quadwords \n" - " tst %[count], #2 \n" - " beq 3f @ skip oversized fragment \n" - " vld1.8 {d16, d17, d18, d19}, [%[src]]! \n" - " vst1.8 {d16, d17, d18, d19}, [%[dst]]! \n" - "3: @ one quadword \n" - " tst %[count], #1 \n" - " beq 4f @ skip oversized fragment \n" - " vld1.8 {d16, d17}, [%[src]]! \n" - " vst1.8 {d16, d17}, [%[dst]]! \n" - "4: @ end \n" - - /* Clobbered input registers marked as input/outputs */ - : [dst] "+r" (t_dst), [src] "+r" (t_src), [count] "+r" (count) - - /* No unclobbered inputs */ - : - - /* Clobbered vector registers */ - : "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", - "d26", "d27", "d28", "d29", "d30", "d31", "cc", "memory"); - -#else - - while (count >= 8) - { - uint8x16x4_t t1 = vld4q_u8 (t_src); - uint8x16x4_t t2 = vld4q_u8 (t_src + sizeof(uint8x16x4_t)); - - t_src += sizeof(uint8x16x4_t) * 2; - vst4q_u8 (t_dst, t1); - vst4q_u8 (t_dst + sizeof(uint8x16x4_t), t2); - t_dst += sizeof(uint8x16x4_t) * 2; - count -= 8; - } - - if (count & 4) - { - uint8x16x4_t t1 = vld4q_u8 (t_src); - - t_src += sizeof(uint8x16x4_t); - vst4q_u8 (t_dst, t1); - t_dst += sizeof(uint8x16x4_t); - } - - if (count & 2) - { - uint8x8x4_t t1 = vld4_u8 (t_src); - - t_src += sizeof(uint8x8x4_t); - vst4_u8 (t_dst, t1); - t_dst += sizeof(uint8x8x4_t); - } - - if (count & 1) - { - uint8x16_t t1 = vld1q_u8 (t_src); - - t_src += sizeof(uint8x16_t); - vst1q_u8 (t_dst, t1); - t_dst += sizeof(uint8x16_t); - } - -#endif /* !USE_GCC_INLINE_ASM */ - - if (trailer_count) - { - if (trailer_count & 8) - { - uint8x8_t t1 = vld1_u8 (t_src); - - t_src += sizeof(uint8x8_t); - vst1_u8 (t_dst, t1); - t_dst += sizeof(uint8x8_t); - } - - if (trailer_count & 4) - { - *((uint32_t*) t_dst) = *((uint32_t*) t_src); - - t_dst += 4; - t_src += 4; - } - - if (trailer_count & 2) - { - *((uint16_t*) t_dst) = *((uint16_t*) t_src); - - t_dst += 2; - t_src += 2; - } - - if (trailer_count & 1) - { - *t_dst++ = *t_src++; - } - } -} - -static inline void -solid_over_565_8_pix_neon (uint32_t glyph_colour, - uint16_t *dest, - uint8_t * in_mask, - uint32_t dest_stride, /* bytes, not elements */ - uint32_t mask_stride, - uint32_t count /* 8-pixel groups */) -{ - /* Inner loop of glyph blitter (solid colour, alpha mask) */ - -#ifdef USE_GCC_INLINE_ASM - - asm volatile ( - " vld4.8 {d20[], d21[], d22[], d23[]}, [%[glyph_colour]] @ splat solid colour components \n" - "0: @ loop \n" - " vld1.16 {d0, d1}, [%[dest]] @ load first pixels from framebuffer \n" - " vld1.8 {d17}, [%[in_mask]] @ load alpha mask of glyph \n" - " vmull.u8 q9, d17, d23 @ apply glyph colour alpha to mask \n" - " vshrn.u16 d17, q9, #8 @ reformat it to match original mask \n" - " vmvn d18, d17 @ we need the inverse mask for the background \n" - " vsli.u16 q3, q0, #5 @ duplicate framebuffer blue bits \n" - " vshrn.u16 d2, q0, #8 @ unpack red from framebuffer pixels \n" - " vshrn.u16 d4, q0, #3 @ unpack green \n" - " vsri.u8 d2, d2, #5 @ duplicate red bits (extend 5 to 8) \n" - " vshrn.u16 d6, q3, #2 @ unpack extended blue (truncate 10 to 8) \n" - " vsri.u8 d4, d4, #6 @ duplicate green bits (extend 6 to 8) \n" - " vmull.u8 q1, d2, d18 @ apply inverse mask to background red... \n" - " vmull.u8 q2, d4, d18 @ ...green... \n" - " vmull.u8 q3, d6, d18 @ ...blue \n" - " subs %[count], %[count], #1 @ decrement/test loop counter \n" - " vmlal.u8 q1, d17, d22 @ add masked foreground red... \n" - " vmlal.u8 q2, d17, d21 @ ...green... \n" - " vmlal.u8 q3, d17, d20 @ ...blue \n" - " add %[in_mask], %[in_mask], %[mask_stride] @ advance mask pointer, while we wait \n" - " vsri.16 q1, q2, #5 @ pack green behind red \n" - " vsri.16 q1, q3, #11 @ pack blue into pixels \n" - " vst1.16 {d2, d3}, [%[dest]] @ store composited pixels \n" - " add %[dest], %[dest], %[dest_stride] @ advance framebuffer pointer \n" - " bne 0b @ next please \n" - - /* Clobbered registers marked as input/outputs */ - : [dest] "+r" (dest), [in_mask] "+r" (in_mask), [count] "+r" (count) - - /* Inputs */ - : [dest_stride] "r" (dest_stride), [mask_stride] "r" (mask_stride), [glyph_colour] "r" (&glyph_colour) - - /* Clobbers, including the inputs we modify, and potentially lots of memory */ - : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d17", "d18", "d19", - "d20", "d21", "d22", "d23", "d24", "d25", "cc", "memory" - ); - -#else - - uint8x8x4_t solid_colour = vld4_dup_u8 ((uint8_t*) &glyph_colour); - - while (count--) - { - uint16x8_t pixels = vld1q_u16 (dest); - uint8x8_t mask = vshrn_n_u16 (vmull_u8 (solid_colour.val[3], vld1_u8 (in_mask)), 8); - uint8x8_t mask_image = vmvn_u8 (mask); - - uint8x8_t t_red = vshrn_n_u16 (pixels, 8); - uint8x8_t t_green = vshrn_n_u16 (pixels, 3); - uint8x8_t t_blue = vshrn_n_u16 (vsli_n_u8 (pixels, pixels, 5), 2); - - uint16x8_t s_red = vmull_u8 (vsri_n_u8 (t_red, t_red, 5), mask_image); - uint16x8_t s_green = vmull_u8 (vsri_n_u8 (t_green, t_green, 6), mask_image); - uint16x8_t s_blue = vmull_u8 (t_blue, mask_image); - - s_red = vmlal (s_red, mask, solid_colour.val[2]); - s_green = vmlal (s_green, mask, solid_colour.val[1]); - s_blue = vmlal (s_blue, mask, solid_colour.val[0]); - - pixels = vsri_n_u16 (s_red, s_green, 5); - pixels = vsri_n_u16 (pixels, s_blue, 11); - vst1q_u16 (dest, pixels); - - dest += dest_stride; - mask += mask_stride; - } - -#endif -} - -#if 0 /* this is broken currently */ -static void -neon_composite_over_n_8_0565 (pixman_implementation_t * impl, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t src, srca; - uint16_t *dst_line, *aligned_line; - uint8_t *mask_line; - uint32_t dst_stride, mask_stride; - uint32_t kernel_count, copy_count, copy_tail; - uint8_t kernel_offset, copy_offset; - - src = _pixman_image_get_solid (src_image, dst_image->bits.format); - - /* bail out if fully transparent or degenerate */ - srca = src >> 24; - if (src == 0) - return; - - if (width == 0 || height == 0) - return; - - if (width > NEON_SCANLINE_BUFFER_PIXELS) - { - /* split the blit, so we can use a fixed-size scanline buffer - * TODO: there must be a more elegant way of doing this. - */ - int x; - for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) - { - neon_composite_over_n_8_0565 ( - impl, op, - src_image, mask_image, dst_image, - src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y, - (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height); - } - - return; - } - - PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); - - /* keep within minimum number of aligned quadwords on width - * while also keeping the minimum number of columns to process - */ - { - unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF; - unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF; - unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF; - - /* the fast copy should be quadword aligned */ - copy_offset = dst_line - ((uint16_t*) aligned_left); - aligned_line = dst_line - copy_offset; - copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4); - copy_tail = 0; - - if (aligned_right - aligned_left > ceiling_length) - { - /* unaligned routine is tightest */ - kernel_count = (uint32_t) (ceiling_length >> 4); - kernel_offset = copy_offset; - } - else - { - /* aligned routine is equally tight, so it is safer to align */ - kernel_count = copy_count; - kernel_offset = 0; - } - - /* We should avoid reading beyond scanline ends for safety */ - if (aligned_line < (dst_line - dest_x) || - (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width)) - { - /* switch to precise read */ - copy_offset = kernel_offset = 0; - aligned_line = dst_line; - kernel_count = (uint32_t) (ceiling_length >> 4); - copy_count = (width * sizeof(*dst_line)) >> 4; - copy_tail = (width * sizeof(*dst_line)) & 0xF; - } - } - - { - uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; /* deliberately not initialised */ - uint8_t glyph_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; - int y = height; - - /* row-major order */ - /* left edge, middle block, right edge */ - for ( ; y--; mask_line += mask_stride, aligned_line += dst_stride, dst_line += dst_stride) - { - /* We don't want to overrun the edges of the glyph, - * so realign the edge data into known buffers - */ - neon_quadword_copy (glyph_line + copy_offset, mask_line, width >> 4, width & 0xF); - - /* Uncached framebuffer access is really, really slow - * if we do it piecemeal. It should be much faster if we - * grab it all at once. One scanline should easily fit in - * L1 cache, so this should not waste RAM bandwidth. - */ - neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail); - - /* Apply the actual filter */ - solid_over_565_8_pix_neon ( - src, scan_line + kernel_offset, - glyph_line + kernel_offset, 8 * sizeof(*dst_line), - 8, kernel_count); - - /* Copy the modified scanline back */ - neon_quadword_copy (dst_line, scan_line + copy_offset, - width >> 3, (width & 7) * 2); - } - } -} -#endif - -#ifdef USE_GCC_INLINE_ASM - -static inline void -plain_over_565_8_pix_neon (uint32_t colour, - uint16_t *dest, - uint32_t dest_stride, /* bytes, not elements */ - uint32_t count /* 8-pixel groups */) -{ - /* Inner loop for plain translucent rects - * (solid colour without alpha mask) - */ - asm volatile ( - " vld4.8 {d20[], d21[], d22[], d23[]}, [%[colour]] @ solid colour load/splat \n" - " vmull.u8 q12, d23, d22 @ premultiply alpha red \n" - " vmull.u8 q13, d23, d21 @ premultiply alpha green \n" - " vmull.u8 q14, d23, d20 @ premultiply alpha blue \n" - " vmvn d18, d23 @ inverse alpha for background \n" - "0: @ loop\n" - " vld1.16 {d0, d1}, [%[dest]] @ load first pixels from framebuffer \n" - " vshrn.u16 d2, q0, #8 @ unpack red from framebuffer pixels \n" - " vshrn.u16 d4, q0, #3 @ unpack green \n" - " vsli.u16 q3, q0, #5 @ duplicate framebuffer blue bits \n" - " vsri.u8 d2, d2, #5 @ duplicate red bits (extend 5 to 8) \n" - " vsri.u8 d4, d4, #6 @ duplicate green bits (extend 6 to 8) \n" - " vshrn.u16 d6, q3, #2 @ unpack extended blue (truncate 10 to 8) \n" - " vmov q0, q12 @ retrieve foreground red \n" - " vmlal.u8 q0, d2, d18 @ blend red - my kingdom for a four-operand MLA \n" - " vmov q1, q13 @ retrieve foreground green \n" - " vmlal.u8 q1, d4, d18 @ blend green \n" - " vmov q2, q14 @ retrieve foreground blue \n" - " vmlal.u8 q2, d6, d18 @ blend blue \n" - " subs %[count], %[count], #1 @ decrement/test loop counter \n" - " vsri.16 q0, q1, #5 @ pack green behind red \n" - " vsri.16 q0, q2, #11 @ pack blue into pixels \n" - " vst1.16 {d0, d1}, [%[dest]] @ store composited pixels \n" - " add %[dest], %[dest], %[dest_stride] @ advance framebuffer pointer \n" - " bne 0b @ next please \n" - - /* Clobbered registers marked as input/outputs */ - : [dest] "+r" (dest), [count] "+r" (count) - - /* Inputs */ - : [dest_stride] "r" (dest_stride), [colour] "r" (&colour) - - /* Clobbers, including the inputs we modify, and - * potentially lots of memory - */ - : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d18", "d19", - "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", - "cc", "memory" - ); -} - -static void -neon_composite_over_n_0565 (pixman_implementation_t * impl, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t src, srca; - uint16_t *dst_line, *aligned_line; - uint32_t dst_stride; - uint32_t kernel_count, copy_count, copy_tail; - uint8_t kernel_offset, copy_offset; - - src = _pixman_image_get_solid (src_image, dst_image->bits.format); - - /* bail out if fully transparent */ - srca = src >> 24; - if (src == 0) - return; - - if (width == 0 || height == 0) - return; - - if (width > NEON_SCANLINE_BUFFER_PIXELS) - { - /* split the blit, so we can use a fixed-size scanline buffer * - * TODO: there must be a more elegant way of doing this. - */ - int x; - - for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) - { - neon_composite_over_n_0565 ( - impl, op, - src_image, mask_image, dst_image, - src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y, - (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height); - } - return; - } - - PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); - - /* keep within minimum number of aligned quadwords on width - * while also keeping the minimum number of columns to process - */ - { - unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF; - unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF; - unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF; - - /* the fast copy should be quadword aligned */ - copy_offset = dst_line - ((uint16_t*) aligned_left); - aligned_line = dst_line - copy_offset; - copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4); - copy_tail = 0; - - if (aligned_right - aligned_left > ceiling_length) - { - /* unaligned routine is tightest */ - kernel_count = (uint32_t) (ceiling_length >> 4); - kernel_offset = copy_offset; - } - else - { - /* aligned routine is equally tight, so it is safer to align */ - kernel_count = copy_count; - kernel_offset = 0; - } - - /* We should avoid reading beyond scanline ends for safety */ - if (aligned_line < (dst_line - dest_x) || - (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width)) - { - /* switch to precise read */ - copy_offset = kernel_offset = 0; - aligned_line = dst_line; - kernel_count = (uint32_t) (ceiling_length >> 4); - copy_count = (width * sizeof(*dst_line)) >> 4; - copy_tail = (width * sizeof(*dst_line)) & 0xF; - } - } - - { - uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; /* deliberately not initialised */ - - /* row-major order */ - /* left edge, middle block, right edge */ - for ( ; height--; aligned_line += dst_stride, dst_line += dst_stride) - { - /* Uncached framebuffer access is really, really slow if we do it piecemeal. - * It should be much faster if we grab it all at once. - * One scanline should easily fit in L1 cache, so this should - * not waste RAM bandwidth. - */ - neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail); - - /* Apply the actual filter */ - plain_over_565_8_pix_neon ( - src, scan_line + kernel_offset, 8 * sizeof(*dst_line), kernel_count); - - /* Copy the modified scanline back */ - neon_quadword_copy ( - dst_line, scan_line + copy_offset, width >> 3, (width & 7) * 2); - } - } -} - -static inline void -ARGB8_over_565_8_pix_neon (uint32_t *src, - uint16_t *dest, - uint32_t src_stride, /* bytes, not elements */ - uint32_t count /* 8-pixel groups */) -{ - asm volatile ( - "0: @ loop\n" - " pld [%[src], %[src_stride]] @ preload from next scanline \n" - " vld1.16 {d0, d1}, [%[dest]] @ load pixels from framebuffer \n" - " vld4.8 {d20, d21, d22, d23},[%[src]]! @ load source image pixels \n" - " vsli.u16 q3, q0, #5 @ duplicate framebuffer blue bits \n" - " vshrn.u16 d2, q0, #8 @ unpack red from framebuffer pixels \n" - " vshrn.u16 d4, q0, #3 @ unpack green \n" - " vmvn d18, d23 @ we need the inverse alpha for the background \n" - " vsri.u8 d2, d2, #5 @ duplicate red bits (extend 5 to 8) \n" - " vshrn.u16 d6, q3, #2 @ unpack extended blue (truncate 10 to 8) \n" - " vsri.u8 d4, d4, #6 @ duplicate green bits (extend 6 to 8) \n" - " vmull.u8 q1, d2, d18 @ apply inverse alpha to background red... \n" - " vmull.u8 q2, d4, d18 @ ...green... \n" - " vmull.u8 q3, d6, d18 @ ...blue \n" - " subs %[count], %[count], #1 @ decrement/test loop counter \n" - " vmlal.u8 q1, d23, d22 @ add blended foreground red... \n" - " vmlal.u8 q2, d23, d21 @ ...green... \n" - " vmlal.u8 q3, d23, d20 @ ...blue \n" - " vsri.16 q1, q2, #5 @ pack green behind red \n" - " vsri.16 q1, q3, #11 @ pack blue into pixels \n" - " vst1.16 {d2, d3}, [%[dest]]! @ store composited pixels \n" - " bne 0b @ next please \n" - - /* Clobbered registers marked as input/outputs */ - : [dest] "+r" (dest), [src] "+r" (src), [count] "+r" (count) - - /* Inputs */ - : [src_stride] "r" (src_stride) - - /* Clobbers, including the inputs we modify, and potentially lots of memory */ - : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d17", "d18", "d20", - "d21", "d22", "d23", "cc", "memory" - ); -} - -static void -neon_composite_over_8888_0565 (pixman_implementation_t * impl, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t *src_line; - uint16_t *dst_line, *aligned_line; - uint32_t dst_stride, src_stride; - uint32_t kernel_count, copy_count, copy_tail; - uint8_t kernel_offset, copy_offset; - - /* we assume mask is opaque - * so the only alpha to deal with is embedded in src - */ - if (width > NEON_SCANLINE_BUFFER_PIXELS) - { - /* split the blit, so we can use a fixed-size scanline buffer */ - int x; - for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) - { - neon_composite_over_8888_0565 ( - impl, op, - src_image, mask_image, dst_image, - src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y, - (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height); - } - return; - } - - PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); - - /* keep within minimum number of aligned quadwords on width - * while also keeping the minimum number of columns to process - */ - { - unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF; - unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF; - unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF; - - /* the fast copy should be quadword aligned */ - copy_offset = dst_line - ((uint16_t*) aligned_left); - aligned_line = dst_line - copy_offset; - copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4); - copy_tail = 0; - - if (aligned_right - aligned_left > ceiling_length) - { - /* unaligned routine is tightest */ - kernel_count = (uint32_t) (ceiling_length >> 4); - kernel_offset = copy_offset; - } - else - { - /* aligned routine is equally tight, so it is safer to align */ - kernel_count = copy_count; - kernel_offset = 0; - } - - /* We should avoid reading beyond scanline ends for safety */ - if (aligned_line < (dst_line - dest_x) || - (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width)) - { - /* switch to precise read */ - copy_offset = kernel_offset = 0; - aligned_line = dst_line; - kernel_count = (uint32_t) (ceiling_length >> 4); - copy_count = (width * sizeof(*dst_line)) >> 4; - copy_tail = (width * sizeof(*dst_line)) & 0xF; - } - } - - /* Preload the first input scanline */ - { - uint8_t *src_ptr = (uint8_t*) src_line; - uint32_t count = (width + 15) / 16; - -#ifdef USE_GCC_INLINE_ASM - asm volatile ( - "0: @ loop \n" - " subs %[count], %[count], #1 \n" - " pld [%[src]] \n" - " add %[src], %[src], #64 \n" - " bgt 0b \n" - - /* Clobbered input registers marked as input/outputs */ - : [src] "+r" (src_ptr), [count] "+r" (count) - : /* no unclobbered inputs */ - : "cc" - ); -#else - do - { - __pld (src_ptr); - src_ptr += 64; - } - while (--count); -#endif - } - - { - uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; /* deliberately not initialised */ - - /* row-major order */ - /* left edge, middle block, right edge */ - for ( ; height--; src_line += src_stride, aligned_line += dst_stride) - { - /* Uncached framebuffer access is really, really slow if we do - * it piecemeal. It should be much faster if we grab it all at - * once. One scanline should easily fit in L1 cache, so this - * should not waste RAM bandwidth. - */ - neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail); - - /* Apply the actual filter */ - ARGB8_over_565_8_pix_neon ( - src_line, scan_line + kernel_offset, - src_stride * sizeof(*src_line), kernel_count); - - /* Copy the modified scanline back */ - neon_quadword_copy (dst_line, - scan_line + copy_offset, - width >> 3, (width & 7) * 2); - } - } -} - -#endif /* USE_GCC_INLINE_ASM */ - static const pixman_fast_path_t arm_neon_fast_path_array[] = { { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, neon_composite_add_n_8_8, 0 }, @@ -2618,12 +1914,6 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] = #ifdef USE_GCC_INLINE_ASM { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_16_16, 0 }, { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_16_16, 0 }, -#if 0 /* this code has some bugs */ - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_n_0565, 0 }, - { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_over_n_0565, 0 }, - { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_8888_0565, 0 }, - { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_over_8888_0565, 0 }, -#endif #endif { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, neon_composite_over_8888_8888, 0 }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_over_8888_8888, 0 }, @@ -2674,79 +1964,6 @@ arm_neon_composite (pixman_implementation_t *imp, } static pixman_bool_t -pixman_blt_neon (void *src_bits, - void *dst_bits, - int src_stride, - int dst_stride, - int src_bpp, - int dst_bpp, - int src_x, - int src_y, - int dst_x, - int dst_y, - int width, - int height) -{ - if (!width || !height) - return TRUE; - - /* accelerate only straight copies involving complete bytes */ - if (src_bpp != dst_bpp || (src_bpp & 7)) - return FALSE; - - { - uint32_t bytes_per_pixel = src_bpp >> 3; - uint32_t byte_width = width * bytes_per_pixel; - /* parameter is in words for some reason */ - int32_t src_stride_bytes = src_stride * 4; - int32_t dst_stride_bytes = dst_stride * 4; - uint8_t *src_bytes = ((uint8_t*) src_bits) + - src_y * src_stride_bytes + src_x * bytes_per_pixel; - uint8_t *dst_bytes = ((uint8_t*) dst_bits) + - dst_y * dst_stride_bytes + dst_x * bytes_per_pixel; - uint32_t quadword_count = byte_width / 16; - uint32_t offset = byte_width % 16; - - while (height--) - { - neon_quadword_copy (dst_bytes, src_bytes, quadword_count, offset); - src_bytes += src_stride_bytes; - dst_bytes += dst_stride_bytes; - } - } - - return TRUE; -} - -static pixman_bool_t -arm_neon_blt (pixman_implementation_t *imp, - uint32_t * src_bits, - uint32_t * dst_bits, - int src_stride, - int dst_stride, - int src_bpp, - int dst_bpp, - int src_x, - int src_y, - int dst_x, - int dst_y, - int width, - int height) -{ - if (pixman_blt_neon ( - src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp, - src_x, src_y, dst_x, dst_y, width, height)) - { - return TRUE; - } - - return _pixman_implementation_blt ( - imp->delegate, - src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp, - src_x, src_y, dst_x, dst_y, width, height); -} - -static pixman_bool_t arm_neon_fill (pixman_implementation_t *imp, uint32_t * bits, int stride, @@ -2771,9 +1988,6 @@ _pixman_implementation_create_arm_neon (void) pixman_implementation_t *imp = _pixman_implementation_create (general); imp->composite = arm_neon_composite; -#if 0 /* this code has some bugs */ - imp->blt = arm_neon_blt; -#endif imp->fill = arm_neon_fill; return imp; |