summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArun Raghavan <arun.raghavan@collabora.co.uk>2012-10-30 12:04:42 +0530
committerArun Raghavan <arun.raghavan@collabora.co.uk>2012-10-30 20:34:21 +0530
commit94039790f8cd05542a6651f924fa1818ea1af605 (patch)
tree51e8c214a78e8e730e5efc84e87a83cdbf24910d
parent96fa87086dc5624eb4646ba126152c2e9e5b1257 (diff)
svolume: Fix ARM alignment issues
As Peter Meerwald <p.meerwald@bct-electronic.com> discovered, our ARM svolume code performance is quite terrible when the incoming samples are not word-aligned. This can very easily be the case, since the architecture only requires that the samples be 16-bit aligned, and we might end up running the innermost loop after processing modulo-4 samples. The performance degradation was ~50x on a Cortex A9 (Pandaboard). This reworks the svolume logic to first consume enough samples to make sure the rest is word aligned, and reordering the processing to work with 4 samples at a time first, and then finally deal with the remainder. With this, performance is comparable for arbitrary alignments (~3x faster than the C code).
-rw-r--r--src/pulsecore/svolume_arm.c101
1 files changed, 60 insertions, 41 deletions
diff --git a/src/pulsecore/svolume_arm.c b/src/pulsecore/svolume_arm.c
index 08b33888e..f325d774c 100644
--- a/src/pulsecore/svolume_arm.c
+++ b/src/pulsecore/svolume_arm.c
@@ -40,101 +40,117 @@
" addcs r0, %1 \n\t" \
" movcs r6, r0 \n\t"
+static pa_do_volume_func_t _volume_ref;
+
static void pa_volume_s16ne_arm(int16_t *samples, const int32_t *volumes, unsigned channels, unsigned length) {
/* Channels must be at least 4, and always a multiple of the original number.
* This is also the max amount we overread the volume array, which should
* have enough padding. */
const int32_t *ve = volumes + (channels == 3 ? 6 : PA_MAX (4U, channels));
+ unsigned rem = PA_ALIGN((size_t) samples) - (size_t) samples;
- __asm__ __volatile__ (
- " mov r6, %1 \n\t" /* r6 = volumes */
- " mov %3, %3, LSR #1 \n\t" /* length /= sizeof (int16_t) */
- " tst %3, #1 \n\t" /* check for odd samples */
- " beq 2f \n\t"
+ /* Make sure we're word-aligned, else performance _really_ sucks */
+ if (rem) {
+ _volume_ref(samples, volumes, channels, rem < length ? rem : length);
- "1: \n\t" /* odd samples volumes */
- " ldr r0, [r6], #4 \n\t" /* r0 = volume */
- " ldrh r2, [%0] \n\t" /* r2 = sample */
-
- " smulwb r0, r0, r2 \n\t" /* r0 = (r0 * r2) >> 16 */
- " ssat r0, #16, r0 \n\t" /* r0 = PA_CLAMP(r0, 0x7FFF) */
+ if (rem < length) {
+ length -= rem;
+ samples += rem / sizeof(*samples);
+ } else
+ return; /* we're done */
+ }
- " strh r0, [%0], #2 \n\t" /* sample = r0 */
+ __asm__ __volatile__ (
+ " mov r6, %4 \n\t" /* r6 = volumes + rem */
+ " mov %3, %3, LSR #1 \n\t" /* length /= sizeof (int16_t) */
- MOD_INC()
+ " cmp %3, #4 \n\t" /* check for 4+ samples */
+ " blt 2f \n\t"
- "2: \n\t"
- " mov %3, %3, LSR #1 \n\t"
- " tst %3, #1 \n\t" /* check for odd samples */
- " beq 4f \n\t"
+ /* See final case for how the multiplication works */
- "3: \n\t"
- " ldrd r2, [r6], #8 \n\t" /* 2 samples at a time */
- " ldr r0, [%0] \n\t"
+ "1: \n\t"
+ " ldrd r2, [r6], #8 \n\t" /* 4 samples at a time */
+ " ldrd r4, [r6], #8 \n\t"
+ " ldrd r0, [%0] \n\t"
#ifdef WORDS_BIGENDIAN
" smulwt r2, r2, r0 \n\t"
" smulwb r3, r3, r0 \n\t"
+ " smulwt r4, r4, r1 \n\t"
+ " smulwb r5, r5, r1 \n\t"
#else
" smulwb r2, r2, r0 \n\t"
" smulwt r3, r3, r0 \n\t"
+ " smulwb r4, r4, r1 \n\t"
+ " smulwt r5, r5, r1 \n\t"
#endif
" ssat r2, #16, r2 \n\t"
" ssat r3, #16, r3 \n\t"
+ " ssat r4, #16, r4 \n\t"
+ " ssat r5, #16, r5 \n\t"
#ifdef WORDS_BIGENDIAN
" pkhbt r0, r3, r2, LSL #16 \n\t"
+ " pkhbt r1, r5, r4, LSL #16 \n\t"
#else
" pkhbt r0, r2, r3, LSL #16 \n\t"
+ " pkhbt r1, r4, r5, LSL #16 \n\t"
#endif
- " str r0, [%0], #4 \n\t"
+ " strd r0, [%0], #8 \n\t"
MOD_INC()
- "4: \n\t"
- " movs %3, %3, LSR #1 \n\t"
- " beq 6f \n\t"
+ " subs %3, %3, #4 \n\t"
+ " cmp %3, #4 \n\t"
+ " bge 1b \n\t"
- "5: \n\t"
- " ldrd r2, [r6], #8 \n\t" /* 4 samples at a time */
- " ldrd r4, [r6], #8 \n\t"
- " ldrd r0, [%0] \n\t"
+ "2: \n\t"
+ " cmp %3, #2 \n\t"
+ " blt 3f \n\t"
+
+ " ldrd r2, [r6], #8 \n\t" /* 2 samples at a time */
+ " ldr r0, [%0] \n\t"
#ifdef WORDS_BIGENDIAN
" smulwt r2, r2, r0 \n\t"
" smulwb r3, r3, r0 \n\t"
- " smulwt r4, r4, r1 \n\t"
- " smulwb r5, r5, r1 \n\t"
#else
" smulwb r2, r2, r0 \n\t"
" smulwt r3, r3, r0 \n\t"
- " smulwb r4, r4, r1 \n\t"
- " smulwt r5, r5, r1 \n\t"
#endif
" ssat r2, #16, r2 \n\t"
" ssat r3, #16, r3 \n\t"
- " ssat r4, #16, r4 \n\t"
- " ssat r5, #16, r5 \n\t"
#ifdef WORDS_BIGENDIAN
" pkhbt r0, r3, r2, LSL #16 \n\t"
- " pkhbt r1, r5, r4, LSL #16 \n\t"
#else
" pkhbt r0, r2, r3, LSL #16 \n\t"
- " pkhbt r1, r4, r5, LSL #16 \n\t"
#endif
- " strd r0, [%0], #8 \n\t"
+ " str r0, [%0], #4 \n\t"
MOD_INC()
- " subs %3, %3, #1 \n\t"
- " bne 5b \n\t"
- "6: \n\t"
+ " subs %3, %3, #2 \n\t"
+
+ "3: \n\t" /* check for odd # of samples */
+ " cmp %3, #1 \n\t"
+ " bne 4f \n\t"
+
+ " ldr r0, [r6], #4 \n\t" /* r0 = volume */
+ " ldrh r2, [%0] \n\t" /* r2 = sample */
+
+ " smulwb r0, r0, r2 \n\t" /* r0 = (r0 * r2) >> 16 */
+ " ssat r0, #16, r0 \n\t" /* r0 = PA_CLAMP(r0, 0x7FFF) */
+
+ " strh r0, [%0], #2 \n\t" /* sample = r0 */
+
+ "4: \n\t"
: "+r" (samples), "+r" (volumes), "+r" (ve), "+r" (length)
- :
+ : "r" (volumes + ((rem / sizeof(*samples)) % channels))
: "r6", "r5", "r4", "r3", "r2", "r1", "r0", "cc"
);
}
@@ -145,6 +161,9 @@ void pa_volume_func_init_arm(pa_cpu_arm_flag_t flags) {
#if defined (__arm__) && defined (HAVE_ARMV6)
pa_log_info("Initialising ARM optimized volume functions.");
+ if (!_volume_ref)
+ _volume_ref = pa_get_volume_func(PA_SAMPLE_S16NE);
+
pa_set_volume_func(PA_SAMPLE_S16NE, (pa_do_volume_func_t) pa_volume_s16ne_arm);
#endif /* defined (__arm__) && defined (HAVE_ARMV6) */
}