diff options
author | Tony Breeds <tony@bakeyournoodle.com> | 2011-11-30 21:39:21 +0000 |
---|---|---|
committer | Josh Boyer <jwboyer@gmail.com> | 2011-12-09 07:49:27 -0500 |
commit | e32a03290c72773c304ffeebffed0a63c0a672ae (patch) | |
tree | 526f104f9451e97b3b9f111bde7568b9db2e6c5b /arch | |
parent | ca899859f14e510854c5c8908d3ffdfcd34b16de (diff) |
powerpc/boot: Add extended precision shifts to the boot wrapper.
The upcomming currituck patches will need to do 64-bit shifts which will
fail with undefined symbol without this patch.
I looked at linking against libgcc but we can't guarantee that libgcc
was compiled with soft-float. Also Using ../lib/div64.S or
../kernel/misc_32.S, this will break the build as the .o's need to be
built with different flags for the bootwrapper vs the kernel. So for
now the easyest option is to just copy code from
arch/powerpc/kernel/misc_32.S I don't think this code changes too often ;P
Signed-off-by: Tony Breeds <tony@bakeyournoodle.com>
Signed-off-by: Josh Boyer <jwboyer@gmail.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/boot/div64.S | 52 |
1 files changed, 52 insertions, 0 deletions
diff --git a/arch/powerpc/boot/div64.S b/arch/powerpc/boot/div64.S index d271ab542673..bbcb8a4cc121 100644 --- a/arch/powerpc/boot/div64.S +++ b/arch/powerpc/boot/div64.S @@ -57,3 +57,55 @@ __div64_32: stw r8,4(r3) mr r3,r6 # return the remainder in r3 blr + +/* + * Extended precision shifts. + * + * Updated to be valid for shift counts from 0 to 63 inclusive. + * -- Gabriel + * + * R3/R4 has 64 bit value + * R5 has shift count + * result in R3/R4 + * + * ashrdi3: arithmetic right shift (sign propagation) + * lshrdi3: logical right shift + * ashldi3: left shift + */ + .globl __ashrdi3 +__ashrdi3: + subfic r6,r5,32 + srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count + addi r7,r5,32 # could be xori, or addi with -32 + slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) + rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 + sraw r7,r3,r7 # t2 = MSW >> (count-32) + or r4,r4,r6 # LSW |= t1 + slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 + sraw r3,r3,r5 # MSW = MSW >> count + or r4,r4,r7 # LSW |= t2 + blr + + .globl __ashldi3 +__ashldi3: + subfic r6,r5,32 + slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count + addi r7,r5,32 # could be xori, or addi with -32 + srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) + slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) + or r3,r3,r6 # MSW |= t1 + slw r4,r4,r5 # LSW = LSW << count + or r3,r3,r7 # MSW |= t2 + blr + + .globl __lshrdi3 +__lshrdi3: + subfic r6,r5,32 + srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count + addi r7,r5,32 # could be xori, or addi with -32 + slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) + srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) + or r4,r4,r6 # LSW |= t1 + srw r3,r3,r5 # MSW = MSW >> count + or r4,r4,r7 # LSW |= t2 + blr |