summaryrefslogtreecommitdiff
path: root/include/asm-parisc
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-parisc')
-rw-r--r--include/asm-parisc/assembly.h71
-rw-r--r--include/asm-parisc/atomic.h20
-rw-r--r--include/asm-parisc/bitops.h290
-rw-r--r--include/asm-parisc/cacheflush.h35
-rw-r--r--include/asm-parisc/dma-mapping.h8
-rw-r--r--include/asm-parisc/errno.h1
-rw-r--r--include/asm-parisc/grfioctl.h2
-rw-r--r--include/asm-parisc/ide.h1
-rw-r--r--include/asm-parisc/irq.h5
-rw-r--r--include/asm-parisc/led.h3
-rw-r--r--include/asm-parisc/mmzone.h6
-rw-r--r--include/asm-parisc/parisc-device.h7
-rw-r--r--include/asm-parisc/pci.h2
-rw-r--r--include/asm-parisc/pgtable.h5
-rw-r--r--include/asm-parisc/processor.h19
-rw-r--r--include/asm-parisc/psw.h51
-rw-r--r--include/asm-parisc/ptrace.h2
-rw-r--r--include/asm-parisc/semaphore.h3
-rw-r--r--include/asm-parisc/smp.h7
-rw-r--r--include/asm-parisc/spinlock.h24
-rw-r--r--include/asm-parisc/spinlock_types.h8
-rw-r--r--include/asm-parisc/system.h48
-rw-r--r--include/asm-parisc/tlbflush.h24
-rw-r--r--include/asm-parisc/types.h2
-rw-r--r--include/asm-parisc/unistd.h17
25 files changed, 376 insertions, 285 deletions
diff --git a/include/asm-parisc/assembly.h b/include/asm-parisc/assembly.h
index 30b023411fef..3ce3440d1b0c 100644
--- a/include/asm-parisc/assembly.h
+++ b/include/asm-parisc/assembly.h
@@ -21,7 +21,9 @@
#ifndef _PARISC_ASSEMBLY_H
#define _PARISC_ASSEMBLY_H
-#ifdef __LP64__
+#define CALLEE_FLOAT_FRAME_SIZE 80
+
+#ifdef CONFIG_64BIT
#define LDREG ldd
#define STREG std
#define LDREGX ldd,s
@@ -30,8 +32,8 @@
#define SHRREG shrd
#define RP_OFFSET 16
#define FRAME_SIZE 128
-#define CALLEE_SAVE_FRAME_SIZE 144
-#else
+#define CALLEE_REG_FRAME_SIZE 144
+#else /* CONFIG_64BIT */
#define LDREG ldw
#define STREG stw
#define LDREGX ldwx,s
@@ -40,9 +42,11 @@
#define SHRREG shr
#define RP_OFFSET 20
#define FRAME_SIZE 64
-#define CALLEE_SAVE_FRAME_SIZE 128
+#define CALLEE_REG_FRAME_SIZE 128
#endif
+#define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE)
+
#ifdef CONFIG_PA20
#define BL b,l
# ifdef CONFIG_64BIT
@@ -300,9 +304,35 @@
fldd,mb -8(\regs), %fr0
.endm
+ .macro callee_save_float
+ fstd,ma %fr12, 8(%r30)
+ fstd,ma %fr13, 8(%r30)
+ fstd,ma %fr14, 8(%r30)
+ fstd,ma %fr15, 8(%r30)
+ fstd,ma %fr16, 8(%r30)
+ fstd,ma %fr17, 8(%r30)
+ fstd,ma %fr18, 8(%r30)
+ fstd,ma %fr19, 8(%r30)
+ fstd,ma %fr20, 8(%r30)
+ fstd,ma %fr21, 8(%r30)
+ .endm
+
+ .macro callee_rest_float
+ fldd,mb -8(%r30), %fr21
+ fldd,mb -8(%r30), %fr20
+ fldd,mb -8(%r30), %fr19
+ fldd,mb -8(%r30), %fr18
+ fldd,mb -8(%r30), %fr17
+ fldd,mb -8(%r30), %fr16
+ fldd,mb -8(%r30), %fr15
+ fldd,mb -8(%r30), %fr14
+ fldd,mb -8(%r30), %fr13
+ fldd,mb -8(%r30), %fr12
+ .endm
+
#ifdef __LP64__
.macro callee_save
- std,ma %r3, CALLEE_SAVE_FRAME_SIZE(%r30)
+ std,ma %r3, CALLEE_REG_FRAME_SIZE(%r30)
mfctl %cr27, %r3
std %r4, -136(%r30)
std %r5, -128(%r30)
@@ -340,13 +370,13 @@
ldd -128(%r30), %r5
ldd -136(%r30), %r4
mtctl %r3, %cr27
- ldd,mb -CALLEE_SAVE_FRAME_SIZE(%r30), %r3
+ ldd,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3
.endm
#else /* ! __LP64__ */
.macro callee_save
- stw,ma %r3, CALLEE_SAVE_FRAME_SIZE(%r30)
+ stw,ma %r3, CALLEE_REG_FRAME_SIZE(%r30)
mfctl %cr27, %r3
stw %r4, -124(%r30)
stw %r5, -120(%r30)
@@ -384,7 +414,7 @@
ldw -120(%r30), %r5
ldw -124(%r30), %r4
mtctl %r3, %cr27
- ldw,mb -CALLEE_SAVE_FRAME_SIZE(%r30), %r3
+ ldw,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3
.endm
#endif /* ! __LP64__ */
@@ -450,5 +480,30 @@
REST_CR (%cr22, PT_PSW (\regs))
.endm
+
+ /* First step to create a "relied upon translation"
+ * See PA 2.0 Arch. page F-4 and F-5.
+ *
+ * The ssm was originally necessary due to a "PCxT bug".
+ * But someone decided it needed to be added to the architecture
+ * and this "feature" went into rev3 of PA-RISC 1.1 Arch Manual.
+ * It's been carried forward into PA 2.0 Arch as well. :^(
+ *
+ * "ssm 0,%r0" is a NOP with side effects (prefetch barrier).
+ * rsm/ssm prevents the ifetch unit from speculatively fetching
+ * instructions past this line in the code stream.
+ * PA 2.0 processor will single step all insn in the same QUAD (4 insn).
+ */
+ .macro pcxt_ssm_bug
+ rsm PSW_SM_I,%r0
+ nop /* 1 */
+ nop /* 2 */
+ nop /* 3 */
+ nop /* 4 */
+ nop /* 5 */
+ nop /* 6 */
+ nop /* 7 */
+ .endm
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 048a2c7fd0c0..983e9a2b6042 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -164,6 +164,26 @@ static __inline__ int atomic_read(const atomic_t *v)
}
/* exported interface */
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
index af7db694b22d..55b98c67fd82 100644
--- a/include/asm-parisc/bitops.h
+++ b/include/asm-parisc/bitops.h
@@ -2,7 +2,7 @@
#define _PARISC_BITOPS_H
#include <linux/compiler.h>
-#include <asm/spinlock.h>
+#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
#include <asm/byteorder.h>
#include <asm/atomic.h>
@@ -12,193 +12,157 @@
* to include/asm-i386/bitops.h or kerneldoc
*/
-#ifdef __LP64__
-# define SHIFT_PER_LONG 6
-#ifndef BITS_PER_LONG
-# define BITS_PER_LONG 64
-#endif
-#else
-# define SHIFT_PER_LONG 5
-#ifndef BITS_PER_LONG
-# define BITS_PER_LONG 32
-#endif
-#endif
-
-#define CHOP_SHIFTCOUNT(x) ((x) & (BITS_PER_LONG - 1))
+#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
-static __inline__ void set_bit(int nr, volatile unsigned long * address)
+/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
+ * on use of volatile and __*_bit() (set/clear/change):
+ * *_bit() want use of volatile.
+ * __*_bit() are "relaxed" and don't use spinlock or volatile.
+ */
+
+static __inline__ void set_bit(int nr, volatile unsigned long * addr)
{
- unsigned long mask;
- unsigned long *addr = (unsigned long *) address;
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
- mask = 1L << CHOP_SHIFTCOUNT(nr);
_atomic_spin_lock_irqsave(addr, flags);
*addr |= mask;
_atomic_spin_unlock_irqrestore(addr, flags);
}
-static __inline__ void __set_bit(int nr, volatile unsigned long * address)
+static __inline__ void __set_bit(unsigned long nr, volatile unsigned long * addr)
{
- unsigned long mask;
- unsigned long *addr = (unsigned long *) address;
+ unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
- addr += (nr >> SHIFT_PER_LONG);
- mask = 1L << CHOP_SHIFTCOUNT(nr);
- *addr |= mask;
+ *m |= 1UL << CHOP_SHIFTCOUNT(nr);
}
-static __inline__ void clear_bit(int nr, volatile unsigned long * address)
+static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
{
- unsigned long mask;
- unsigned long *addr = (unsigned long *) address;
+ unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
- mask = 1L << CHOP_SHIFTCOUNT(nr);
_atomic_spin_lock_irqsave(addr, flags);
- *addr &= ~mask;
+ *addr &= mask;
_atomic_spin_unlock_irqrestore(addr, flags);
}
-static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * address)
+static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * addr)
{
- unsigned long mask;
- unsigned long *addr = (unsigned long *) address;
+ unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
- addr += (nr >> SHIFT_PER_LONG);
- mask = 1L << CHOP_SHIFTCOUNT(nr);
- *addr &= ~mask;
+ *m &= ~(1UL << CHOP_SHIFTCOUNT(nr));
}
-static __inline__ void change_bit(int nr, volatile unsigned long * address)
+static __inline__ void change_bit(int nr, volatile unsigned long * addr)
{
- unsigned long mask;
- unsigned long *addr = (unsigned long *) address;
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
- mask = 1L << CHOP_SHIFTCOUNT(nr);
_atomic_spin_lock_irqsave(addr, flags);
*addr ^= mask;
_atomic_spin_unlock_irqrestore(addr, flags);
}
-static __inline__ void __change_bit(int nr, volatile unsigned long * address)
+static __inline__ void __change_bit(unsigned long nr, volatile unsigned long * addr)
{
- unsigned long mask;
- unsigned long *addr = (unsigned long *) address;
+ unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
- addr += (nr >> SHIFT_PER_LONG);
- mask = 1L << CHOP_SHIFTCOUNT(nr);
- *addr ^= mask;
+ *m ^= 1UL << CHOP_SHIFTCOUNT(nr);
}
-static __inline__ int test_and_set_bit(int nr, volatile unsigned long * address)
+static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
{
- unsigned long mask;
- unsigned long *addr = (unsigned long *) address;
- int oldbit;
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long oldbit;
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
- mask = 1L << CHOP_SHIFTCOUNT(nr);
_atomic_spin_lock_irqsave(addr, flags);
- oldbit = (*addr & mask) ? 1 : 0;
- *addr |= mask;
+ oldbit = *addr;
+ *addr = oldbit | mask;
_atomic_spin_unlock_irqrestore(addr, flags);
- return oldbit;
+ return (oldbit & mask) ? 1 : 0;
}
static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address)
{
- unsigned long mask;
- unsigned long *addr = (unsigned long *) address;
- int oldbit;
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long oldbit;
+ unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
- addr += (nr >> SHIFT_PER_LONG);
- mask = 1L << CHOP_SHIFTCOUNT(nr);
- oldbit = (*addr & mask) ? 1 : 0;
- *addr |= mask;
+ oldbit = *addr;
+ *addr = oldbit | mask;
- return oldbit;
+ return (oldbit & mask) ? 1 : 0;
}
-static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * address)
+static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
{
- unsigned long mask;
- unsigned long *addr = (unsigned long *) address;
- int oldbit;
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long oldbit;
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
- mask = 1L << CHOP_SHIFTCOUNT(nr);
_atomic_spin_lock_irqsave(addr, flags);
- oldbit = (*addr & mask) ? 1 : 0;
- *addr &= ~mask;
+ oldbit = *addr;
+ *addr = oldbit & ~mask;
_atomic_spin_unlock_irqrestore(addr, flags);
- return oldbit;
+ return (oldbit & mask) ? 1 : 0;
}
static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address)
{
- unsigned long mask;
- unsigned long *addr = (unsigned long *) address;
- int oldbit;
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
+ unsigned long oldbit;
- addr += (nr >> SHIFT_PER_LONG);
- mask = 1L << CHOP_SHIFTCOUNT(nr);
- oldbit = (*addr & mask) ? 1 : 0;
- *addr &= ~mask;
+ oldbit = *addr;
+ *addr = oldbit & ~mask;
- return oldbit;
+ return (oldbit & mask) ? 1 : 0;
}
-static __inline__ int test_and_change_bit(int nr, volatile unsigned long * address)
+static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
{
- unsigned long mask;
- unsigned long *addr = (unsigned long *) address;
- int oldbit;
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long oldbit;
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
- mask = 1L << CHOP_SHIFTCOUNT(nr);
_atomic_spin_lock_irqsave(addr, flags);
- oldbit = (*addr & mask) ? 1 : 0;
- *addr ^= mask;
+ oldbit = *addr;
+ *addr = oldbit ^ mask;
_atomic_spin_unlock_irqrestore(addr, flags);
- return oldbit;
+ return (oldbit & mask) ? 1 : 0;
}
static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address)
{
- unsigned long mask;
- unsigned long *addr = (unsigned long *) address;
- int oldbit;
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
+ unsigned long oldbit;
- addr += (nr >> SHIFT_PER_LONG);
- mask = 1L << CHOP_SHIFTCOUNT(nr);
- oldbit = (*addr & mask) ? 1 : 0;
- *addr ^= mask;
+ oldbit = *addr;
+ *addr = oldbit ^ mask;
- return oldbit;
+ return (oldbit & mask) ? 1 : 0;
}
static __inline__ int test_bit(int nr, const volatile unsigned long *address)
{
- unsigned long mask;
- const unsigned long *addr = (const unsigned long *)address;
-
- addr += (nr >> SHIFT_PER_LONG);
- mask = 1L << CHOP_SHIFTCOUNT(nr);
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ const unsigned long *addr = (const unsigned long *)address + (nr >> SHIFT_PER_LONG);
return !!(*addr & mask);
}
@@ -229,7 +193,7 @@ static __inline__ unsigned long __ffs(unsigned long x)
unsigned long ret;
__asm__(
-#if BITS_PER_LONG > 32
+#ifdef __LP64__
" ldi 63,%1\n"
" extrd,u,*<> %0,63,32,%%r0\n"
" extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */
@@ -304,14 +268,7 @@ static __inline__ int fls(int x)
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
-#define hweight64(x) \
-({ \
- unsigned long __x = (x); \
- unsigned int __w; \
- __w = generic_hweight32((unsigned int) __x); \
- __w += generic_hweight32((unsigned int) (__x>>32)); \
- __w; \
-})
+#define hweight64(x) generic_hweight64(x)
#define hweight32(x) generic_hweight32(x)
#define hweight16(x) generic_hweight16(x)
#define hweight8(x) generic_hweight8(x)
@@ -324,7 +281,13 @@ static __inline__ int fls(int x)
*/
static inline int sched_find_first_bit(const unsigned long *b)
{
-#ifndef __LP64__
+#ifdef __LP64__
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 64;
+ return __ffs(b[2]) + 128;
+#else
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
@@ -334,14 +297,6 @@ static inline int sched_find_first_bit(const unsigned long *b)
if (b[3])
return __ffs(b[3]) + 96;
return __ffs(b[4]) + 128;
-#else
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(((unsigned int)b[1])))
- return __ffs(b[1]) + 64;
- if (b[1] >> 32)
- return __ffs(b[1] >> 32) + 96;
- return __ffs(b[2]) + 128;
#endif
}
@@ -391,7 +346,7 @@ found_middle:
static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
{
- const unsigned long *p = addr + (offset >> 6);
+ const unsigned long *p = addr + (offset >> SHIFT_PER_LONG);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
@@ -445,71 +400,90 @@ found_middle:
* test_and_{set,clear}_bit guarantee atomicity without
* disabling interrupts.
*/
-#ifdef __LP64__
-#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
-#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
-#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
-#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
-#else
-#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
-#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
-#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
-#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
-#endif
-#endif /* __KERNEL__ */
+/* '3' is bits per byte */
+#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
-static __inline__ int ext2_test_bit(int nr, __const__ void * addr)
-{
- __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
+#define ext2_test_bit(nr, addr) \
+ test_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
+#define ext2_set_bit(nr, addr) \
+ __test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
+#define ext2_clear_bit(nr, addr) \
+ __test_and_clear_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
- return (ADDR[nr >> 3] >> (nr & 7)) & 1;
-}
+#define ext2_set_bit_atomic(l,nr,addr) \
+ test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
+#define ext2_clear_bit_atomic(l,nr,addr) \
+ test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
+
+#endif /* __KERNEL__ */
-/*
- * This implementation of ext2_find_{first,next}_zero_bit was stolen from
- * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
- */
#define ext2_find_first_zero_bit(addr, size) \
- ext2_find_next_zero_bit((addr), (size), 0)
+ ext2_find_next_zero_bit((addr), (size), 0)
-extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
- unsigned long size, unsigned long offset)
+/* include/linux/byteorder does not support "unsigned long" type */
+static inline unsigned long ext2_swabp(unsigned long * x)
{
- unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
+#ifdef __LP64__
+ return (unsigned long) __swab64p((u64 *) x);
+#else
+ return (unsigned long) __swab32p((u32 *) x);
+#endif
+}
+
+/* include/linux/byteorder doesn't support "unsigned long" type */
+static inline unsigned long ext2_swab(unsigned long y)
+{
+#ifdef __LP64__
+ return (unsigned long) __swab64((u64) y);
+#else
+ return (unsigned long) __swab32((u32) y);
+#endif
+}
+
+static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
+{
+ unsigned long *p = (unsigned long *) addr + (offset >> SHIFT_PER_LONG);
+ unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long tmp;
if (offset >= size)
return size;
size -= result;
- offset &= 31UL;
+ offset &= (BITS_PER_LONG - 1UL);
if (offset) {
- tmp = cpu_to_le32p(p++);
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
+ tmp = ext2_swabp(p++);
+ tmp |= (~0UL >> (BITS_PER_LONG - offset));
+ if (size < BITS_PER_LONG)
goto found_first;
- if (tmp != ~0U)
+ if (~tmp)
goto found_middle;
- size -= 32;
- result += 32;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
}
- while (size >= 32) {
- if ((tmp = cpu_to_le32p(p++)) != ~0U)
- goto found_middle;
- result += 32;
- size -= 32;
+
+ while (size & ~(BITS_PER_LONG - 1)) {
+ if (~(tmp = *(p++)))
+ goto found_middle_swap;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
}
if (!size)
return result;
- tmp = cpu_to_le32p(p);
+ tmp = ext2_swabp(p);
found_first:
- tmp |= ~0U << size;
+ tmp |= ~0UL << size;
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. Skip ffz */
found_middle:
return result + ffz(tmp);
+
+found_middle_swap:
+ return result + ffz(ext2_swab(tmp));
}
+
/* Bitmap functions for the minix filesystem. */
#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h
index aa592d8c0e39..1bc3c83ee74b 100644
--- a/include/asm-parisc/cacheflush.h
+++ b/include/asm-parisc/cacheflush.h
@@ -100,30 +100,34 @@ static inline void flush_cache_range(struct vm_area_struct *vma,
/* Simple function to work out if we have an existing address translation
* for a user space vma. */
-static inline pte_t *__translation_exists(struct mm_struct *mm,
- unsigned long addr)
+static inline int translation_exists(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
{
- pgd_t *pgd = pgd_offset(mm, addr);
+ pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
pmd_t *pmd;
- pte_t *pte;
+ pte_t pte;
if(pgd_none(*pgd))
- return NULL;
+ return 0;
pmd = pmd_offset(pgd, addr);
if(pmd_none(*pmd) || pmd_bad(*pmd))
- return NULL;
+ return 0;
- pte = pte_offset_map(pmd, addr);
+ /* We cannot take the pte lock here: flush_cache_page is usually
+ * called with pte lock already held. Whereas flush_dcache_page
+ * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
+ * the vma itself is secure, but the pte might come or go racily.
+ */
+ pte = *pte_offset_map(pmd, addr);
+ /* But pte_unmap() does nothing on this architecture */
- /* The PA flush mappings show up as pte_none, but they're
- * valid none the less */
- if(pte_none(*pte) && ((pte_val(*pte) & _PAGE_FLUSH) == 0))
- return NULL;
- return pte;
-}
-#define translation_exists(vma, addr) __translation_exists((vma)->vm_mm, addr)
+ /* Filter out coincidental file entries and swap entries */
+ if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
+ return 0;
+ return pte_pfn(pte) == pfn;
+}
/* Private function to flush a page from the cache of a non-current
* process. cr25 contains the Page Directory of the current user
@@ -175,9 +179,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{
BUG_ON(!vma->vm_mm->context);
- if(likely(translation_exists(vma, vmaddr)))
+ if (likely(translation_exists(vma, vmaddr, pfn)))
__flush_cache_page(vma, vmaddr);
}
#endif
-
diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h
index 4db84f969e9e..74d4ac6f2151 100644
--- a/include/asm-parisc/dma-mapping.h
+++ b/include/asm-parisc/dma-mapping.h
@@ -9,8 +9,8 @@
/* See Documentation/DMA-mapping.txt */
struct hppa_dma_ops {
int (*dma_supported)(struct device *dev, u64 mask);
- void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, int flag);
- void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, int flag);
+ void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
+ void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
@@ -49,14 +49,14 @@ extern struct hppa_dma_ops *hppa_dma_ops;
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- int flag)
+ gfp_t flag)
{
return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
}
static inline void *
dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- int flag)
+ gfp_t flag)
{
return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
}
diff --git a/include/asm-parisc/errno.h b/include/asm-parisc/errno.h
index 08464c405471..e2f3ddc796be 100644
--- a/include/asm-parisc/errno.h
+++ b/include/asm-parisc/errno.h
@@ -114,6 +114,7 @@
#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
+#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */
/* for robust mutexes */
#define EOWNERDEAD 254 /* Owner died */
diff --git a/include/asm-parisc/grfioctl.h b/include/asm-parisc/grfioctl.h
index d3cfc0168fb1..6a910311b56b 100644
--- a/include/asm-parisc/grfioctl.h
+++ b/include/asm-parisc/grfioctl.h
@@ -69,6 +69,8 @@
#define CRT_ID_TVRX S9000_ID_98765 /* TVRX (gto/falcon) */
#define CRT_ID_ARTIST S9000_ID_ARTIST /* Artist */
#define CRT_ID_SUMMIT 0x2FC1066B /* Summit FX2, FX4, FX6 ... */
+#define CRT_ID_LEGO 0x35ACDA30 /* Lego FX5, FX10 ... */
+#define CRT_ID_PINNACLE 0x35ACDA16 /* Pinnacle FXe */
/* structure for ioctl(GCDESCRIBE) */
diff --git a/include/asm-parisc/ide.h b/include/asm-parisc/ide.h
index 3243cf2cd227..b27bf7aeb256 100644
--- a/include/asm-parisc/ide.h
+++ b/include/asm-parisc/ide.h
@@ -22,7 +22,6 @@
#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
-#define ide_check_region(from,extent) check_region((from), (extent))
#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
#define ide_release_region(from,extent) release_region((from), (extent))
/* Generic I/O and MEMIO string operations. */
diff --git a/include/asm-parisc/irq.h b/include/asm-parisc/irq.h
index f876bdf22056..b0a30e2c9813 100644
--- a/include/asm-parisc/irq.h
+++ b/include/asm-parisc/irq.h
@@ -8,6 +8,7 @@
#define _ASM_PARISC_IRQ_H
#include <linux/config.h>
+#include <linux/cpumask.h>
#include <asm/types.h>
#define NO_IRQ (-1)
@@ -49,10 +50,10 @@ extern int txn_alloc_irq(unsigned int nbits);
extern int txn_claim_irq(int);
extern unsigned int txn_alloc_data(unsigned int);
extern unsigned long txn_alloc_addr(unsigned int);
+extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
-
-extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
+extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest);
/* soft power switch support (power.c) */
extern struct tasklet_struct power_tasklet;
diff --git a/include/asm-parisc/led.h b/include/asm-parisc/led.h
index 1ac8ab6c580d..efadfd543ec6 100644
--- a/include/asm-parisc/led.h
+++ b/include/asm-parisc/led.h
@@ -23,9 +23,6 @@
#define LED_CMD_REG_NONE 0 /* NULL == no addr for the cmd register */
-/* led tasklet struct */
-extern struct tasklet_struct led_tasklet;
-
/* register_led_driver() */
int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg);
diff --git a/include/asm-parisc/mmzone.h b/include/asm-parisc/mmzone.h
index 595d3dce120a..ae039f4fd711 100644
--- a/include/asm-parisc/mmzone.h
+++ b/include/asm-parisc/mmzone.h
@@ -27,12 +27,6 @@ extern struct node_map_data node_data[];
})
#define node_localnr(pfn, nid) ((pfn) - node_start_pfn(nid))
-#define local_mapnr(kvaddr) \
-({ \
- unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT; \
- (__pfn - node_start_pfn(pfn_to_nid(__pfn))); \
-})
-
#define pfn_to_page(pfn) \
({ \
unsigned long __pfn = (pfn); \
diff --git a/include/asm-parisc/parisc-device.h b/include/asm-parisc/parisc-device.h
index ef69ab4b17a9..1d247e32a608 100644
--- a/include/asm-parisc/parisc-device.h
+++ b/include/asm-parisc/parisc-device.h
@@ -1,7 +1,7 @@
#include <linux/device.h>
struct parisc_device {
- unsigned long hpa; /* Hard Physical Address */
+ struct resource hpa; /* Hard Physical Address */
struct parisc_device_id id;
struct parisc_driver *driver; /* Driver for this device */
char name[80]; /* The hardware description */
@@ -39,6 +39,11 @@ struct parisc_driver {
#define to_parisc_driver(d) container_of(d, struct parisc_driver, drv)
#define parisc_parent(d) to_parisc_device(d->dev.parent)
+static inline char *parisc_pathname(struct parisc_device *d)
+{
+ return d->dev.bus_id;
+}
+
static inline void
parisc_set_drvdata(struct parisc_device *d, void *p)
{
diff --git a/include/asm-parisc/pci.h b/include/asm-parisc/pci.h
index d0b761f690b5..fa39d07d49e9 100644
--- a/include/asm-parisc/pci.h
+++ b/include/asm-parisc/pci.h
@@ -69,7 +69,7 @@ struct pci_hba_data {
#define PCI_PORT_HBA(a) ((a) >> HBA_PORT_SPACE_BITS)
#define PCI_PORT_ADDR(a) ((a) & (HBA_PORT_SPACE_SIZE - 1))
-#if CONFIG_64BIT
+#ifdef CONFIG_64BIT
#define PCI_F_EXTEND 0xffffffff00000000UL
#define PCI_IS_LMMIO(hba,a) pci_is_lmmio(hba,a)
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h
index 820c6e712cd7..b4554711c3e7 100644
--- a/include/asm-parisc/pgtable.h
+++ b/include/asm-parisc/pgtable.h
@@ -12,6 +12,7 @@
*/
#include <linux/spinlock.h>
+#include <linux/mm.h> /* for vm_area_struct */
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/bitops.h>
@@ -418,7 +419,6 @@ extern void paging_init (void);
#define PG_dcache_dirty PG_arch_1
-struct vm_area_struct; /* forward declaration (include/linux/mm.h) */
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
/* Encode and de-code a swap entry */
@@ -464,6 +464,7 @@ static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned
extern spinlock_t pa_dbit_lock;
+struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
@@ -501,6 +502,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
+#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
+
#define MK_IOSPACE_PFN(space, pfn) (pfn)
#define GET_IOSPACE(pfn) 0
#define GET_PFN(pfn) (pfn)
diff --git a/include/asm-parisc/processor.h b/include/asm-parisc/processor.h
index a9dfadd05658..aae40e8c3aa8 100644
--- a/include/asm-parisc/processor.h
+++ b/include/asm-parisc/processor.h
@@ -122,8 +122,27 @@ struct thread_struct {
};
/* Thread struct flags. */
+#define PARISC_UAC_NOPRINT (1UL << 0) /* see prctl and unaligned.c */
+#define PARISC_UAC_SIGBUS (1UL << 1)
#define PARISC_KERNEL_DEATH (1UL << 31) /* see die_if_kernel()... */
+#define PARISC_UAC_SHIFT 0
+#define PARISC_UAC_MASK (PARISC_UAC_NOPRINT|PARISC_UAC_SIGBUS)
+
+#define SET_UNALIGN_CTL(task,value) \
+ ({ \
+ (task)->thread.flags = (((task)->thread.flags & ~PARISC_UAC_MASK) \
+ | (((value) << PARISC_UAC_SHIFT) & \
+ PARISC_UAC_MASK)); \
+ 0; \
+ })
+
+#define GET_UNALIGN_CTL(task,addr) \
+ ({ \
+ put_user(((task)->thread.flags & PARISC_UAC_MASK) \
+ >> PARISC_UAC_SHIFT, (int __user *) (addr)); \
+ })
+
#define INIT_THREAD { \
regs: { gr: { 0, }, \
fr: { 0, }, \
diff --git a/include/asm-parisc/psw.h b/include/asm-parisc/psw.h
index 51323029f377..4334d6ca2add 100644
--- a/include/asm-parisc/psw.h
+++ b/include/asm-parisc/psw.h
@@ -1,4 +1,7 @@
#ifndef _PARISC_PSW_H
+
+#include <linux/config.h>
+
#define PSW_I 0x00000001
#define PSW_D 0x00000002
#define PSW_P 0x00000004
@@ -9,6 +12,16 @@
#define PSW_G 0x00000040 /* PA1.x only */
#define PSW_O 0x00000080 /* PA2.0 only */
+/* ssm/rsm instructions number PSW_W and PSW_E differently */
+#define PSW_SM_I PSW_I /* Enable External Interrupts */
+#define PSW_SM_D PSW_D
+#define PSW_SM_P PSW_P
+#define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */
+#define PSW_SM_R PSW_R /* Enable Recover Counter Trap */
+#define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */
+
+#define PSW_SM_QUIET PSW_SM_R+PSW_SM_Q+PSW_SM_P+PSW_SM_D+PSW_SM_I
+
#define PSW_CB 0x0000ff00
#define PSW_M 0x00010000
@@ -30,33 +43,21 @@
#define PSW_Z 0x40000000 /* PA1.x only */
#define PSW_Y 0x80000000 /* PA1.x only */
-#ifdef __LP64__
-#define PSW_HI_CB 0x000000ff /* PA2.0 only */
+#ifdef CONFIG_64BIT
+# define PSW_HI_CB 0x000000ff /* PA2.0 only */
#endif
-/* PSW bits to be used with ssm/rsm */
-#define PSW_SM_I 0x1
-#define PSW_SM_D 0x2
-#define PSW_SM_P 0x4
-#define PSW_SM_Q 0x8
-#define PSW_SM_R 0x10
-#define PSW_SM_F 0x20
-#define PSW_SM_G 0x40
-#define PSW_SM_O 0x80
-#define PSW_SM_E 0x100
-#define PSW_SM_W 0x200
-
-#ifdef __LP64__
-# define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)
-# define KERNEL_PSW (PSW_W | PSW_C | PSW_Q | PSW_P | PSW_D)
-# define REAL_MODE_PSW (PSW_W | PSW_Q)
-# define USER_PSW_MASK (PSW_W | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)
-# define USER_PSW_HI_MASK (PSW_HI_CB)
-#else
-# define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)
-# define KERNEL_PSW (PSW_C | PSW_Q | PSW_P | PSW_D)
-# define REAL_MODE_PSW (PSW_Q)
-# define USER_PSW_MASK (PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)
+#ifdef CONFIG_64BIT
+# define USER_PSW_HI_MASK PSW_HI_CB
+# define WIDE_PSW PSW_W
+#else
+# define WIDE_PSW 0
#endif
+/* Used when setting up for rfi */
+#define KERNEL_PSW (WIDE_PSW | PSW_C | PSW_Q | PSW_P | PSW_D)
+#define REAL_MODE_PSW (WIDE_PSW | PSW_Q)
+#define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)
+#define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)
+
#endif
diff --git a/include/asm-parisc/ptrace.h b/include/asm-parisc/ptrace.h
index 3f428aa371a4..93f990e418f1 100644
--- a/include/asm-parisc/ptrace.h
+++ b/include/asm-parisc/ptrace.h
@@ -49,7 +49,7 @@ struct pt_regs {
#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0)
#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0)
#define instruction_pointer(regs) ((regs)->iaoq[0] & ~3)
-#define profile_pc(regs) instruction_pointer(regs)
+unsigned long profile_pc(struct pt_regs *);
extern void show_regs(struct pt_regs *);
#endif
diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h
index f78bb2e34538..c9ee41cd0707 100644
--- a/include/asm-parisc/semaphore.h
+++ b/include/asm-parisc/semaphore.h
@@ -49,9 +49,6 @@ struct semaphore {
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
}
-#define __MUTEX_INITIALIZER(name) \
- __SEMAPHORE_INITIALIZER(name,1)
-
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h
index 9413f67a540b..dbdbd2e9fdf9 100644
--- a/include/asm-parisc/smp.h
+++ b/include/asm-parisc/smp.h
@@ -29,6 +29,7 @@ extern cpumask_t cpu_online_map;
#define cpu_logical_map(cpu) (cpu)
extern void smp_send_reschedule(int cpu);
+extern void smp_send_all_nop(void);
#endif /* !ASSEMBLY */
@@ -53,7 +54,11 @@ extern unsigned long cpu_present_mask;
#define raw_smp_processor_id() (current_thread_info()->cpu)
-#endif /* CONFIG_SMP */
+#else /* CONFIG_SMP */
+
+static inline void smp_send_all_nop(void) { return; }
+
+#endif
#define NO_PROC_ID 0xFF /* No processor magic marker */
#define ANY_PROC_ID 0xFF /* Any processor magic marker */
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index 43eaa6e742e0..16c2ac075fc5 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -5,29 +5,31 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>
-/* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
- * since it only has load-and-zero. Moreover, at least on some PA processors,
- * the semaphore address has to be 16-byte aligned.
- */
-
static inline int __raw_spin_is_locked(raw_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
return *a == 0;
}
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
#define __raw_spin_unlock_wait(x) \
do { cpu_relax(); } while (__raw_spin_is_locked(x))
-static inline void __raw_spin_lock(raw_spinlock_t *x)
+static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
+ unsigned long flags)
{
volatile unsigned int *a;
mb();
a = __ldcw_align(x);
while (__ldcw(a) == 0)
- while (*a == 0);
+ while (*a == 0)
+ if (flags & PSW_SM_I) {
+ local_irq_enable();
+ cpu_relax();
+ local_irq_disable();
+ } else
+ cpu_relax();
mb();
}
@@ -65,26 +67,20 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
{
- unsigned long flags;
- local_irq_save(flags);
__raw_spin_lock(&rw->lock);
rw->counter++;
__raw_spin_unlock(&rw->lock);
- local_irq_restore(flags);
}
static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
{
- unsigned long flags;
- local_irq_save(flags);
__raw_spin_lock(&rw->lock);
rw->counter--;
__raw_spin_unlock(&rw->lock);
- local_irq_restore(flags);
}
/* write_lock is less trivial. We optimistically grab the lock and check
diff --git a/include/asm-parisc/spinlock_types.h b/include/asm-parisc/spinlock_types.h
index 785bba822fbf..d6b479bdb886 100644
--- a/include/asm-parisc/spinlock_types.h
+++ b/include/asm-parisc/spinlock_types.h
@@ -6,11 +6,15 @@
#endif
typedef struct {
+#ifdef CONFIG_PA20
+ volatile unsigned int slock;
+# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#else
volatile unsigned int lock[4];
+# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
+#endif
} raw_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
-
typedef struct {
raw_spinlock_t lock;
volatile int counter;
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h
index 26ff844a21c1..f3928d3a80cb 100644
--- a/include/asm-parisc/system.h
+++ b/include/asm-parisc/system.h
@@ -138,13 +138,7 @@ static inline void set_eiem(unsigned long val)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
-#define __ldcw(a) ({ \
- unsigned __ret; \
- __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
- __ret; \
-})
-
+#ifndef CONFIG_PA20
/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
and GCC only guarantees 8-byte alignment for stack locals, we can't
be assured of 16-byte alignment for atomic lock data even if we
@@ -152,37 +146,41 @@ static inline void set_eiem(unsigned long val)
we use a struct containing an array of four ints for the atomic lock
type and dynamically select the 16-byte aligned int from the array
for the semaphore. */
+
#define __PA_LDCW_ALIGNMENT 16
#define __ldcw_align(a) ({ \
unsigned long __ret = (unsigned long) &(a)->lock[0]; \
__ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \
(volatile unsigned int *) __ret; \
})
+#define LDCW "ldcw"
-#ifdef CONFIG_SMP
-# define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
-#endif
+#else /*CONFIG_PA20*/
+/* From: "Jim Hull" <jim.hull of hp.com>
+ I've attached a summary of the change, but basically, for PA 2.0, as
+ long as the ",CO" (coherent operation) completer is specified, then the
+ 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
+ they only require "natural" alignment (4-byte for ldcw, 8-byte for
+ ldcd). */
-#define KERNEL_START (0x10100000 - 0x1000)
+#define __PA_LDCW_ALIGNMENT 4
+#define __ldcw_align(a) ((volatile unsigned int *)a)
+#define LDCW "ldcw,co"
-/* This is for the serialisation of PxTLB broadcasts. At least on the
- * N class systems, only one PxTLB inter processor broadcast can be
- * active at any one time on the Merced bus. This tlb purge
- * synchronisation is fairly lightweight and harmless so we activate
- * it on all SMP systems not just the N class. */
-#ifdef CONFIG_SMP
-extern spinlock_t pa_tlb_lock;
+#endif /*!CONFIG_PA20*/
-#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
-#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
-
-#else
-
-#define purge_tlb_start(x) do { } while(0)
-#define purge_tlb_end(x) do { } while (0)
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
+#define __ldcw(a) ({ \
+ unsigned __ret; \
+ __asm__ __volatile__(LDCW " 0(%1),%0" : "=r" (__ret) : "r" (a)); \
+ __ret; \
+})
+#ifdef CONFIG_SMP
+# define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
#endif
+#define KERNEL_START (0x10100000 - 0x1000)
#define arch_align_stack(x) (x)
#endif
diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h
index eb27b78930e8..c9ec39c6fc6c 100644
--- a/include/asm-parisc/tlbflush.h
+++ b/include/asm-parisc/tlbflush.h
@@ -7,6 +7,20 @@
#include <linux/mm.h>
#include <asm/mmu_context.h>
+
+/* This is for the serialisation of PxTLB broadcasts. At least on the
+ * N class systems, only one PxTLB inter processor broadcast can be
+ * active at any one time on the Merced bus. This tlb purge
+ * synchronisation is fairly lightweight and harmless so we activate
+ * it on all SMP systems not just the N class. We also need to have
+ * preemption disabled on uniprocessor machines, and spin_lock does that
+ * nicely.
+ */
+extern spinlock_t pa_tlb_lock;
+
+#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
+#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
+
extern void flush_tlb_all(void);
/*
@@ -64,29 +78,25 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
{
unsigned long npages;
-
npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- if (npages >= 512) /* XXX arbitrary, should be tuned */
+ if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
flush_tlb_all();
else {
-
mtsp(vma->vm_mm->context,1);
+ purge_tlb_start();
if (split_tlb) {
- purge_tlb_start();
while (npages--) {
pdtlb(start);
pitlb(start);
start += PAGE_SIZE;
}
- purge_tlb_end();
} else {
- purge_tlb_start();
while (npages--) {
pdtlb(start);
start += PAGE_SIZE;
}
- purge_tlb_end();
}
+ purge_tlb_end();
}
}
diff --git a/include/asm-parisc/types.h b/include/asm-parisc/types.h
index d21b9d0d63ea..34fdce361a5a 100644
--- a/include/asm-parisc/types.h
+++ b/include/asm-parisc/types.h
@@ -33,8 +33,10 @@ typedef unsigned long long __u64;
#ifdef __LP64__
#define BITS_PER_LONG 64
+#define SHIFT_PER_LONG 6
#else
#define BITS_PER_LONG 32
+#define SHIFT_PER_LONG 5
#endif
#ifndef __ASSEMBLY__
diff --git a/include/asm-parisc/unistd.h b/include/asm-parisc/unistd.h
index 6a9f0cadff58..80b7b98c70a1 100644
--- a/include/asm-parisc/unistd.h
+++ b/include/asm-parisc/unistd.h
@@ -687,8 +687,8 @@
#define __NR_shmget (__NR_Linux + 194)
#define __NR_shmctl (__NR_Linux + 195)
-#define __NR_getpmsg (__NR_Linux + 196) /* some people actually want streams */
-#define __NR_putpmsg (__NR_Linux + 197) /* some people actually want streams */
+#define __NR_getpmsg (__NR_Linux + 196) /* Somebody *wants* streams? */
+#define __NR_putpmsg (__NR_Linux + 197)
#define __NR_lstat64 (__NR_Linux + 198)
#define __NR_truncate64 (__NR_Linux + 199)
@@ -755,8 +755,14 @@
#define __NR_mbind (__NR_Linux + 260)
#define __NR_get_mempolicy (__NR_Linux + 261)
#define __NR_set_mempolicy (__NR_Linux + 262)
+#define __NR_vserver (__NR_Linux + 263)
+#define __NR_add_key (__NR_Linux + 264)
+#define __NR_request_key (__NR_Linux + 265)
+#define __NR_keyctl (__NR_Linux + 266)
+#define __NR_ioprio_set (__NR_Linux + 267)
+#define __NR_ioprio_get (__NR_Linux + 268)
-#define __NR_Linux_syscalls 263
+#define __NR_Linux_syscalls 269
#define HPUX_GATEWAY_ADDR 0xC0000004
#define LINUX_GATEWAY_ADDR 0x100
@@ -807,10 +813,10 @@
#define K_INLINE_SYSCALL(name, nr, args...) ({ \
long __sys_res; \
{ \
- register unsigned long __res asm("r28"); \
+ register unsigned long __res __asm__("r28"); \
K_LOAD_ARGS_##nr(args) \
/* FIXME: HACK stw/ldw r19 around syscall */ \
- asm volatile( \
+ __asm__ volatile( \
K_STW_ASM_PIC \
" ble 0x100(%%sr2, %%r0)\n" \
" ldi %1, %%r20\n" \
@@ -1005,7 +1011,6 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
struct pt_regs *regs);
int sys_vfork(struct pt_regs *regs);
int sys_pipe(int *fildes);
-long sys_ptrace(long request, pid_t pid, long addr, long data);
struct sigaction;
asmlinkage long sys_rt_sigaction(int sig,
const struct sigaction __user *act,