summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-generic
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/4level-fixup.h38
-rw-r--r--include/asm-generic/bitops.h81
-rw-r--r--include/asm-generic/bug.h34
-rw-r--r--include/asm-generic/cputime.h66
-rw-r--r--include/asm-generic/div64.h58
-rw-r--r--include/asm-generic/dma-mapping-broken.h22
-rw-r--r--include/asm-generic/dma-mapping.h309
-rw-r--r--include/asm-generic/errno-base.h39
-rw-r--r--include/asm-generic/errno.h105
-rw-r--r--include/asm-generic/hdreg.h8
-rw-r--r--include/asm-generic/ide_iops.h38
-rw-r--r--include/asm-generic/iomap.h63
-rw-r--r--include/asm-generic/ipc.h31
-rw-r--r--include/asm-generic/local.h118
-rw-r--r--include/asm-generic/pci-dma-compat.h107
-rw-r--r--include/asm-generic/pci.h34
-rw-r--r--include/asm-generic/percpu.h42
-rw-r--r--include/asm-generic/pgtable-nopmd.h65
-rw-r--r--include/asm-generic/pgtable-nopud.h61
-rw-r--r--include/asm-generic/pgtable.h213
-rw-r--r--include/asm-generic/resource.h88
-rw-r--r--include/asm-generic/rtc.h213
-rw-r--r--include/asm-generic/sections.h13
-rw-r--r--include/asm-generic/siginfo.h287
-rw-r--r--include/asm-generic/statfs.h51
-rw-r--r--include/asm-generic/termios.h69
-rw-r--r--include/asm-generic/tlb.h160
-rw-r--r--include/asm-generic/topology.h48
-rw-r--r--include/asm-generic/uaccess.h26
-rw-r--r--include/asm-generic/unaligned.h121
-rw-r--r--include/asm-generic/vmlinux.lds.h90
-rw-r--r--include/asm-generic/xor.h718
32 files changed, 3416 insertions, 0 deletions
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
new file mode 100644
index 00000000000..c20ec257ecc
--- /dev/null
+++ b/include/asm-generic/4level-fixup.h
@@ -0,0 +1,38 @@
+#ifndef _4LEVEL_FIXUP_H
+#define _4LEVEL_FIXUP_H
+
+#define __ARCH_HAS_4LEVEL_HACK
+#define __PAGETABLE_PUD_FOLDED
+
+#define PUD_SIZE PGDIR_SIZE
+#define PUD_MASK PGDIR_MASK
+#define PTRS_PER_PUD 1
+
+#define pud_t pgd_t
+
+#define pmd_alloc(mm, pud, address) \
+({ pmd_t *ret; \
+ if (pgd_none(*pud)) \
+ ret = __pmd_alloc(mm, pud, address); \
+ else \
+ ret = pmd_offset(pud, address); \
+ ret; \
+})
+
+#define pud_alloc(mm, pgd, address) (pgd)
+#define pud_offset(pgd, start) (pgd)
+#define pud_none(pud) 0
+#define pud_bad(pud) 0
+#define pud_present(pud) 1
+#define pud_ERROR(pud) do { } while (0)
+#define pud_clear(pud) pgd_clear(pud)
+
+#undef pud_free_tlb
+#define pud_free_tlb(tlb, x) do { } while (0)
+#define pud_free(x) do { } while (0)
+#define __pud_free_tlb(tlb, x) do { } while (0)
+
+#undef pud_addr_end
+#define pud_addr_end(addr, end) (end)
+
+#endif
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
new file mode 100644
index 00000000000..ce31b739fd8
--- /dev/null
+++ b/include/asm-generic/bitops.h
@@ -0,0 +1,81 @@
+#ifndef _ASM_GENERIC_BITOPS_H_
+#define _ASM_GENERIC_BITOPS_H_
+
+/*
+ * For the benefit of those who are trying to port Linux to another
+ * architecture, here are some C-language equivalents. You should
+ * recode these in the native assembly language, if at all possible.
+ * To guarantee atomicity, these routines call cli() and sti() to
+ * disable interrupts while they operate. (You have to provide inline
+ * routines to cli() and sti().)
+ *
+ * Also note, these routines assume that you have 32 bit longs.
+ * You will have to change this if you are trying to port Linux to the
+ * Alpha architecture or to a Cray. :-)
+ *
+ * C language equivalents written by Theodore Ts'o, 9/26/92
+ */
+
+extern __inline__ int set_bit(int nr,long * addr)
+{
+ int mask, retval;
+
+ addr += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ cli();
+ retval = (mask & *addr) != 0;
+ *addr |= mask;
+ sti();
+ return retval;
+}
+
+extern __inline__ int clear_bit(int nr, long * addr)
+{
+ int mask, retval;
+
+ addr += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ cli();
+ retval = (mask & *addr) != 0;
+ *addr &= ~mask;
+ sti();
+ return retval;
+}
+
+extern __inline__ int test_bit(int nr, const unsigned long * addr)
+{
+ int mask;
+
+ addr += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ return ((mask & *addr) != 0);
+}
+
+/*
+ * fls: find last bit set.
+ */
+
+#define fls(x) generic_fls(x)
+
+#ifdef __KERNEL__
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+#define ffs(x) generic_ffs(x)
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_GENERIC_BITOPS_H */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
new file mode 100644
index 00000000000..e5913c3b715
--- /dev/null
+++ b/include/asm-generic/bug.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_GENERIC_BUG_H
+#define _ASM_GENERIC_BUG_H
+
+#include <linux/compiler.h>
+#include <linux/config.h>
+
+#ifndef HAVE_ARCH_BUG
+#define BUG() do { \
+ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ panic("BUG!"); \
+} while (0)
+#endif
+
+#ifndef HAVE_ARCH_PAGE_BUG
+#define PAGE_BUG(page) do { \
+ printk("page BUG for page at %p\n", page); \
+ BUG(); \
+} while (0)
+#endif
+
+#ifndef HAVE_ARCH_BUG_ON
+#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
+#endif
+
+#ifndef HAVE_ARCH_WARN_ON
+#define WARN_ON(condition) do { \
+ if (unlikely((condition)!=0)) { \
+ printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
+ dump_stack(); \
+ } \
+} while (0)
+#endif
+
+#endif
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
new file mode 100644
index 00000000000..6f178563e33
--- /dev/null
+++ b/include/asm-generic/cputime.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_GENERIC_CPUTIME_H
+#define _ASM_GENERIC_CPUTIME_H
+
+#include <linux/time.h>
+#include <linux/jiffies.h>
+
+typedef unsigned long cputime_t;
+
+#define cputime_zero (0UL)
+#define cputime_max ((~0UL >> 1) - 1)
+#define cputime_add(__a, __b) ((__a) + (__b))
+#define cputime_sub(__a, __b) ((__a) - (__b))
+#define cputime_div(__a, __n) ((__a) / (__n))
+#define cputime_halve(__a) ((__a) >> 1)
+#define cputime_eq(__a, __b) ((__a) == (__b))
+#define cputime_gt(__a, __b) ((__a) > (__b))
+#define cputime_ge(__a, __b) ((__a) >= (__b))
+#define cputime_lt(__a, __b) ((__a) < (__b))
+#define cputime_le(__a, __b) ((__a) <= (__b))
+#define cputime_to_jiffies(__ct) (__ct)
+#define jiffies_to_cputime(__hz) (__hz)
+
+typedef u64 cputime64_t;
+
+#define cputime64_zero (0ULL)
+#define cputime64_add(__a, __b) ((__a) + (__b))
+#define cputime64_to_jiffies64(__ct) (__ct)
+#define cputime_to_cputime64(__ct) ((u64) __ct)
+
+
+/*
+ * Convert cputime to milliseconds and back.
+ */
+#define cputime_to_msecs(__ct) jiffies_to_msecs(__ct)
+#define msecs_to_cputime(__msecs) msecs_to_jiffies(__msecs)
+
+/*
+ * Convert cputime to seconds and back.
+ */
+#define cputime_to_secs(jif) ((jif) / HZ)
+#define secs_to_cputime(sec) ((sec) * HZ)
+
+/*
+ * Convert cputime to timespec and back.
+ */
+#define timespec_to_cputime(__val) timespec_to_jiffies(__val)
+#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val)
+
+/*
+ * Convert cputime to timeval and back.
+ */
+#define timeval_to_cputime(__val) timeval_to_jiffies(__val)
+#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val)
+
+/*
+ * Convert cputime to clock and back.
+ */
+#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct)
+#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x)
+
+/*
+ * Convert cputime64 to clock.
+ */
+#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct)
+
+#endif
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
new file mode 100644
index 00000000000..8f4e3193342
--- /dev/null
+++ b/include/asm-generic/div64.h
@@ -0,0 +1,58 @@
+#ifndef _ASM_GENERIC_DIV64_H
+#define _ASM_GENERIC_DIV64_H
+/*
+ * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
+ * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
+ *
+ * The semantics of do_div() are:
+ *
+ * uint32_t do_div(uint64_t *n, uint32_t base)
+ * {
+ * uint32_t remainder = *n % base;
+ * *n = *n / base;
+ * return remainder;
+ * }
+ *
+ * NOTE: macro parameter n is evaluated multiple times,
+ * beware of side effects!
+ */
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#if BITS_PER_LONG == 64
+
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+ })
+
+#elif BITS_PER_LONG == 32
+
+extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
+
+/* The unnecessary pointer compare is there
+ * to check for type safety (n must be 64bit)
+ */
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
+ if (likely(((n) >> 32) == 0)) { \
+ __rem = (uint32_t)(n) % __base; \
+ (n) = (uint32_t)(n) / __base; \
+ } else \
+ __rem = __div64_32(&(n), __base); \
+ __rem; \
+ })
+
+#else /* BITS_PER_LONG == ?? */
+
+# error do_div() does not yet support the C64
+
+#endif /* BITS_PER_LONG */
+
+#endif /* _ASM_GENERIC_DIV64_H */
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h
new file mode 100644
index 00000000000..fd9de9502df
--- /dev/null
+++ b/include/asm-generic/dma-mapping-broken.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_GENERIC_DMA_MAPPING_H
+#define _ASM_GENERIC_DMA_MAPPING_H
+
+/* This is used for archs that do not support DMA */
+
+
+static inline void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ int flag)
+{
+ BUG();
+ return NULL;
+}
+
+static inline void
+dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
+{
+ BUG();
+}
+
+#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
new file mode 100644
index 00000000000..8cef663c5cd
--- /dev/null
+++ b/include/asm-generic/dma-mapping.h
@@ -0,0 +1,309 @@
+/* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com
+ *
+ * Implements the generic device dma API via the existing pci_ one
+ * for unconverted architectures
+ */
+
+#ifndef _ASM_GENERIC_DMA_MAPPING_H
+#define _ASM_GENERIC_DMA_MAPPING_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_PCI
+
+/* we implement the API below in terms of the existing PCI one,
+ * so include it */
+#include <linux/pci.h>
+/* need struct page definitions */
+#include <linux/mm.h>
+
+static inline int
+dma_supported(struct device *dev, u64 mask)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return pci_dma_supported(to_pci_dev(dev), mask);
+}
+
+static inline int
+dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
+}
+
+static inline void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ unsigned int __nocast flag)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
+}
+
+static inline void
+dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
+}
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
+ size, (int)direction);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
+ size, (int)direction);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
+}
+
+static inline int
+dma_mapping_error(dma_addr_t dma_addr)
+{
+ return pci_dma_mapping_error(dma_addr);
+}
+
+
+#else
+
+static inline int
+dma_supported(struct device *dev, u64 mask)
+{
+ return 0;
+}
+
+static inline int
+dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ BUG();
+ return 0;
+}
+
+static inline void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ unsigned int __nocast flag)
+{
+ BUG();
+ return NULL;
+}
+
+static inline void
+dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
+{
+ BUG();
+}
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG();
+ return 0;
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG();
+ return 0;
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ BUG();
+ return 0;
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline int
+dma_error(dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+#endif
+
+/* Now for the API extensions over the pci_ one */
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+#define dma_is_consistent(d) (1)
+
+static inline int
+dma_get_cache_alignment(void)
+{
+ /* no easy way to get cache size on all processors, so return
+ * the maximum possible, to be safe */
+ return (1 << L1_CACHE_SHIFT_MAX);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything, that's all the pci API can do */
+ dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything, that's all the pci API can do */
+ dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
+}
+
+static inline void
+dma_cache_sync(void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ /* could define this in terms of the dma_cache ... operations,
+ * but if you get this on a platform, you should convert the platform
+ * to using the generic device DMA API */
+ BUG();
+}
+
+#endif
+
diff --git a/include/asm-generic/errno-base.h b/include/asm-generic/errno-base.h
new file mode 100644
index 00000000000..65115978510
--- /dev/null
+++ b/include/asm-generic/errno-base.h
@@ -0,0 +1,39 @@
+#ifndef _ASM_GENERIC_ERRNO_BASE_H
+#define _ASM_GENERIC_ERRNO_BASE_H
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Argument list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+
+#endif
diff --git a/include/asm-generic/errno.h b/include/asm-generic/errno.h
new file mode 100644
index 00000000000..4dd2384bc38
--- /dev/null
+++ b/include/asm-generic/errno.h
@@ -0,0 +1,105 @@
+#ifndef _ASM_GENERIC_ERRNO_H
+#define _ASM_GENERIC_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+#define ENOSYS 38 /* Function not implemented */
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale NFS file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+#define ECANCELED 125 /* Operation Canceled */
+#define ENOKEY 126 /* Required key not available */
+#define EKEYEXPIRED 127 /* Key has expired */
+#define EKEYREVOKED 128 /* Key has been revoked */
+#define EKEYREJECTED 129 /* Key was rejected by service */
+
+#endif
diff --git a/include/asm-generic/hdreg.h b/include/asm-generic/hdreg.h
new file mode 100644
index 00000000000..7051fba8bcf
--- /dev/null
+++ b/include/asm-generic/hdreg.h
@@ -0,0 +1,8 @@
+#warning <asm/hdreg.h> is obsolete, please do not use it
+
+#ifndef __ASM_GENERIC_HDREG_H
+#define __ASM_GENERIC_HDREG_H
+
+typedef unsigned long ide_ioreg_t;
+
+#endif /* __ASM_GENERIC_HDREG_H */
diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h
new file mode 100644
index 00000000000..1b91d068191
--- /dev/null
+++ b/include/asm-generic/ide_iops.h
@@ -0,0 +1,38 @@
+/* Generic I/O and MEMIO string operations. */
+
+#define __ide_insw insw
+#define __ide_insl insl
+#define __ide_outsw outsw
+#define __ide_outsl outsl
+
+static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ *(u16 *)addr = readw(port);
+ addr += 2;
+ }
+}
+
+static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ *(u32 *)addr = readl(port);
+ addr += 4;
+ }
+}
+
+static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ writew(*(u16 *)addr, port);
+ addr += 2;
+ }
+}
+
+static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
+{
+ while (count--) {
+ writel(*(u32 *)addr, port);
+ addr += 4;
+ }
+}
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
new file mode 100644
index 00000000000..4991543d44c
--- /dev/null
+++ b/include/asm-generic/iomap.h
@@ -0,0 +1,63 @@
+#ifndef __GENERIC_IO_H
+#define __GENERIC_IO_H
+
+#include <linux/linkage.h>
+
+/*
+ * These are the "generic" interfaces for doing new-style
+ * memory-mapped or PIO accesses. Architectures may do
+ * their own arch-optimized versions, these just act as
+ * wrappers around the old-style IO register access functions:
+ * read[bwl]/write[bwl]/in[bwl]/out[bwl]
+ *
+ * Don't include this directly, include it from <asm/io.h>.
+ */
+
+/*
+ * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
+ * access or a MMIO access, these functions don't care. The info is
+ * encoded in the hardware mapping set up by the mapping functions
+ * (or the cookie itself, depending on implementation and hw).
+ *
+ * The generic routines just encode the PIO/MMIO as part of the
+ * cookie, and coldly assume that the MMIO IO mappings are not
+ * in the low address range. Architectures for which this is not
+ * true can't use this generic implementation.
+ */
+extern unsigned int fastcall ioread8(void __iomem *);
+extern unsigned int fastcall ioread16(void __iomem *);
+extern unsigned int fastcall ioread32(void __iomem *);
+
+extern void fastcall iowrite8(u8, void __iomem *);
+extern void fastcall iowrite16(u16, void __iomem *);
+extern void fastcall iowrite32(u32, void __iomem *);
+
+/*
+ * "string" versions of the above. Note that they
+ * use native byte ordering for the accesses (on
+ * the assumption that IO and memory agree on a
+ * byte order, and CPU byteorder is irrelevant).
+ *
+ * They do _not_ update the port address. If you
+ * want MMIO that copies stuff laid out in MMIO
+ * memory across multiple ports, use "memcpy_toio()"
+ * and friends.
+ */
+extern void fastcall ioread8_rep(void __iomem *port, void *buf, unsigned long count);
+extern void fastcall ioread16_rep(void __iomem *port, void *buf, unsigned long count);
+extern void fastcall ioread32_rep(void __iomem *port, void *buf, unsigned long count);
+
+extern void fastcall iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void fastcall iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void fastcall iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
+
+/* Create a virtual mapping cookie for an IO port range */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *);
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+struct pci_dev;
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+
+#endif
diff --git a/include/asm-generic/ipc.h b/include/asm-generic/ipc.h
new file mode 100644
index 00000000000..a40407a165c
--- /dev/null
+++ b/include/asm-generic/ipc.h
@@ -0,0 +1,31 @@
+#ifndef _ASM_GENERIC_IPC_H
+#define _ASM_GENERIC_IPC_H
+/*
+ * These are used to wrap system calls.
+ *
+ * See architecture code for ugly details..
+ */
+struct ipc_kludge {
+ struct msgbuf __user *msgp;
+ long msgtyp;
+};
+
+#define SEMOP 1
+#define SEMGET 2
+#define SEMCTL 3
+#define SEMTIMEDOP 4
+#define MSGSND 11
+#define MSGRCV 12
+#define MSGGET 13
+#define MSGCTL 14
+#define SHMAT 21
+#define SHMDT 22
+#define SHMGET 23
+#define SHMCTL 24
+
+/* Used by the DIPC package, try and avoid reusing it */
+#define DIPC 25
+
+#define IPCCALL(version,op) ((version)<<16 | (op))
+
+#endif /* _ASM_GENERIC_IPC_H */
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
new file mode 100644
index 00000000000..16fc00360f7
--- /dev/null
+++ b/include/asm-generic/local.h
@@ -0,0 +1,118 @@
+#ifndef _ASM_GENERIC_LOCAL_H
+#define _ASM_GENERIC_LOCAL_H
+
+#include <linux/config.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+#include <asm/types.h>
+
+/* An unsigned long type for operations which are atomic for a single
+ * CPU. Usually used in combination with per-cpu variables. */
+
+#if BITS_PER_LONG == 32
+/* Implement in terms of atomics. */
+
+/* Don't use typedef: don't want them to be mixed with atomic_t's. */
+typedef struct
+{
+ atomic_t a;
+} local_t;
+
+#define LOCAL_INIT(i) { ATOMIC_INIT(i) }
+
+#define local_read(l) ((unsigned long)atomic_read(&(l)->a))
+#define local_set(l,i) atomic_set((&(l)->a),(i))
+#define local_inc(l) atomic_inc(&(l)->a)
+#define local_dec(l) atomic_dec(&(l)->a)
+#define local_add(i,l) atomic_add((i),(&(l)->a))
+#define local_sub(i,l) atomic_sub((i),(&(l)->a))
+
+/* Non-atomic variants, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well. */
+#define __local_inc(l) local_set((l), local_read(l) + 1)
+#define __local_dec(l) local_set((l), local_read(l) - 1)
+#define __local_add(i,l) local_set((l), local_read(l) + (i))
+#define __local_sub(i,l) local_set((l), local_read(l) - (i))
+
+#else /* ... can't use atomics. */
+/* Implement in terms of three variables.
+ Another option would be to use local_irq_save/restore. */
+
+typedef struct
+{
+ /* 0 = in hardirq, 1 = in softirq, 2 = usermode. */
+ unsigned long v[3];
+} local_t;
+
+#define _LOCAL_VAR(l) ((l)->v[!in_interrupt() + !in_irq()])
+
+#define LOCAL_INIT(i) { { (i), 0, 0 } }
+
+static inline unsigned long local_read(local_t *l)
+{
+ return l->v[0] + l->v[1] + l->v[2];
+}
+
+static inline void local_set(local_t *l, unsigned long v)
+{
+ l->v[0] = v;
+ l->v[1] = l->v[2] = 0;
+}
+
+static inline void local_inc(local_t *l)
+{
+ preempt_disable();
+ _LOCAL_VAR(l)++;
+ preempt_enable();
+}
+
+static inline void local_dec(local_t *l)
+{
+ preempt_disable();
+ _LOCAL_VAR(l)--;
+ preempt_enable();
+}
+
+static inline void local_add(unsigned long v, local_t *l)
+{
+ preempt_disable();
+ _LOCAL_VAR(l) += v;
+ preempt_enable();
+}
+
+static inline void local_sub(unsigned long v, local_t *l)
+{
+ preempt_disable();
+ _LOCAL_VAR(l) -= v;
+ preempt_enable();
+}
+
+/* Non-atomic variants, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well. */
+#define __local_inc(l) ((l)->v[0]++)
+#define __local_dec(l) ((l)->v[0]--)
+#define __local_add(i,l) ((l)->v[0] += (i))
+#define __local_sub(i,l) ((l)->v[0] -= (i))
+
+#endif /* Non-atomic implementation */
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable (eg. mystruct.foo), not an address.
+ */
+#define cpu_local_read(v) local_read(&__get_cpu_var(v))
+#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
+#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
+#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
+#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
+#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
+
+/* Non-atomic increments, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well.
+ */
+#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
+#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
+#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
+#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
+
+#endif /* _ASM_GENERIC_LOCAL_H */
diff --git a/include/asm-generic/pci-dma-compat.h b/include/asm-generic/pci-dma-compat.h
new file mode 100644
index 00000000000..25c10e96b2b
--- /dev/null
+++ b/include/asm-generic/pci-dma-compat.h
@@ -0,0 +1,107 @@
+/* include this file if the platform implements the dma_ DMA Mapping API
+ * and wants to provide the pci_ DMA Mapping API in terms of it */
+
+#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
+#define _ASM_GENERIC_PCI_DMA_COMPAT_H
+
+#include <linux/dma-mapping.h>
+
+/* note pci_set_dma_mask isn't here, since it's a public function
+ * exported from drivers/pci, use dma_supported instead */
+
+static inline int
+pci_dma_supported(struct pci_dev *hwdev, u64 mask)
+{
+ return dma_supported(hwdev == NULL ? NULL : &hwdev->dev, mask);
+}
+
+static inline void *
+pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
+}
+
+static inline void
+pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
+}
+
+static inline dma_addr_t
+pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
+{
+ return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
+}
+
+static inline dma_addr_t
+pci_map_page(struct pci_dev *hwdev, struct page *page,
+ unsigned long offset, size_t size, int direction)
+{
+ return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
+ size_t size, int direction)
+{
+ dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
+}
+
+static inline int
+pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nelems, int direction)
+{
+ dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nelems, int direction)
+{
+ dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
+}
+
+static inline int
+pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+ return dma_mapping_error(dma_addr);
+}
+
+#endif
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
new file mode 100644
index 00000000000..9d4cc47bde3
--- /dev/null
+++ b/include/asm-generic/pci.h
@@ -0,0 +1,34 @@
+/*
+ * linux/include/asm-generic/pci.h
+ *
+ * Copyright (C) 2003 Russell King
+ */
+#ifndef _ASM_GENERIC_PCI_H
+#define _ASM_GENERIC_PCI_H
+
+/**
+ * pcibios_resource_to_bus - convert resource to PCI bus address
+ * @dev: device which owns this resource
+ * @region: converted bus-centric region (start,end)
+ * @res: resource to convert
+ *
+ * Convert a resource to a PCI device bus address or bus window.
+ */
+static inline void
+pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+ struct resource *res)
+{
+ region->start = res->start;
+ region->end = res->end;
+}
+
+#define pcibios_scan_all_fns(a, b) 0
+
+#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ return channel ? 15 : 14;
+}
+#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
+
+#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
new file mode 100644
index 00000000000..3b709b84934
--- /dev/null
+++ b/include/asm-generic/percpu.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_GENERIC_PERCPU_H_
+#define _ASM_GENERIC_PERCPU_H_
+#include <linux/compiler.h>
+
+#define __GENERIC_PER_CPU
+#ifdef CONFIG_SMP
+
+extern unsigned long __per_cpu_offset[NR_CPUS];
+
+/* Separate out the type, so (int[3], foo) works. */
+#define DEFINE_PER_CPU(type, name) \
+ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+
+/* var is in discarded region: offset to particular copy we want */
+#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
+#define __get_cpu_var(var) per_cpu(var, smp_processor_id())
+
+/* A macro to avoid #include hell... */
+#define percpu_modcopy(pcpudst, src, size) \
+do { \
+ unsigned int __i; \
+ for (__i = 0; __i < NR_CPUS; __i++) \
+ if (cpu_possible(__i)) \
+ memcpy((pcpudst)+__per_cpu_offset[__i], \
+ (src), (size)); \
+} while (0)
+#else /* ! SMP */
+
+#define DEFINE_PER_CPU(type, name) \
+ __typeof__(type) per_cpu__##name
+
+#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
+#define __get_cpu_var(var) per_cpu__##var
+
+#endif /* SMP */
+
+#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
+
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
+
+#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
new file mode 100644
index 00000000000..c8d53ba20e1
--- /dev/null
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -0,0 +1,65 @@
+#ifndef _PGTABLE_NOPMD_H
+#define _PGTABLE_NOPMD_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm-generic/pgtable-nopud.h>
+
+#define __PAGETABLE_PMD_FOLDED
+
+/*
+ * Having the pmd type consist of a pud gets the size right, and allows
+ * us to conceptually access the pud entry that this pmd is folded into
+ * without casting.
+ */
+typedef struct { pud_t pud; } pmd_t;
+
+#define PMD_SHIFT PUD_SHIFT
+#define PTRS_PER_PMD 1
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+/*
+ * The "pud_xxx()" functions here are trivial for a folded two-level
+ * setup: the pmd is never bad, and a pmd always exists (as it's folded
+ * into the pud entry)
+ */
+static inline int pud_none(pud_t pud) { return 0; }
+static inline int pud_bad(pud_t pud) { return 0; }
+static inline int pud_present(pud_t pud) { return 1; }
+static inline void pud_clear(pud_t *pud) { }
+#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud))
+
+#define pud_populate(mm, pmd, pte) do { } while (0)
+
+/*
+ * (pmds are folded into puds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval })
+
+static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
+{
+ return (pmd_t *)pud;
+}
+
+#define pmd_val(x) (pud_val((x).pud))
+#define __pmd(x) ((pmd_t) { __pud(x) } )
+
+#define pud_page(pud) (pmd_page((pmd_t){ pud }))
+#define pud_page_kernel(pud) (pmd_page_kernel((pmd_t){ pud }))
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pud, so has no extra memory associated with it.
+ */
+#define pmd_alloc_one(mm, address) NULL
+#define pmd_free(x) do { } while (0)
+#define __pmd_free_tlb(tlb, x) do { } while (0)
+
+#undef pmd_addr_end
+#define pmd_addr_end(addr, end) (end)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _PGTABLE_NOPMD_H */
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
new file mode 100644
index 00000000000..82e29f0ce46
--- /dev/null
+++ b/include/asm-generic/pgtable-nopud.h
@@ -0,0 +1,61 @@
+#ifndef _PGTABLE_NOPUD_H
+#define _PGTABLE_NOPUD_H
+
+#ifndef __ASSEMBLY__
+
+#define __PAGETABLE_PUD_FOLDED
+
+/*
+ * Having the pud type consist of a pgd gets the size right, and allows
+ * us to conceptually access the pgd entry that this pud is folded into
+ * without casting.
+ */
+typedef struct { pgd_t pgd; } pud_t;
+
+#define PUD_SHIFT PGDIR_SHIFT
+#define PTRS_PER_PUD 1
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pud is never bad, and a pud always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+static inline void pgd_clear(pgd_t *pgd) { }
+#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
+
+#define pgd_populate(mm, pgd, pud) do { } while (0)
+/*
+ * (puds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
+
+static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
+{
+ return (pud_t *)pgd;
+}
+
+#define pud_val(x) (pgd_val((x).pgd))
+#define __pud(x) ((pud_t) { __pgd(x) } )
+
+#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
+#define pgd_page_kernel(pgd) (pud_page_kernel((pud_t){ pgd }))
+
+/*
+ * allocating and freeing a pud is trivial: the 1-entry pud is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+#define pud_alloc_one(mm, address) NULL
+#define pud_free(x) do { } while (0)
+#define __pud_free_tlb(tlb, x) do { } while (0)
+
+#undef pud_addr_end
+#define pud_addr_end(addr, end) (end)
+
+#endif /* __ASSEMBLY__ */
+#endif /* _PGTABLE_NOPUD_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
new file mode 100644
index 00000000000..a3b28710d56
--- /dev/null
+++ b/include/asm-generic/pgtable.h
@@ -0,0 +1,213 @@
+#ifndef _ASM_GENERIC_PGTABLE_H
+#define _ASM_GENERIC_PGTABLE_H
+
+#ifndef __HAVE_ARCH_PTEP_ESTABLISH
+/*
+ * Establish a new mapping:
+ * - flush the old one
+ * - update the page tables
+ * - inform the TLB about the new one
+ *
+ * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock.
+ *
+ * Note: the old pte is known to not be writable, so we don't need to
+ * worry about dirty bits etc getting lost.
+ */
+#ifndef __HAVE_ARCH_SET_PTE_ATOMIC
+#define ptep_establish(__vma, __address, __ptep, __entry) \
+do { \
+ set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+} while (0)
+#else /* __HAVE_ARCH_SET_PTE_ATOMIC */
+#define ptep_establish(__vma, __address, __ptep, __entry) \
+do { \
+ set_pte_atomic(__ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+} while (0)
+#endif /* __HAVE_ARCH_SET_PTE_ATOMIC */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+/*
+ * Largely same as above, but only sets the access flags (dirty,
+ * accessed, and writable). Furthermore, we know it always gets set
+ * to a "more permissive" setting, which allows most architectures
+ * to optimize this.
+ */
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+do { \
+ set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+} while (0)
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young(__vma, __address, __ptep) \
+({ \
+ pte_t __pte = *(__ptep); \
+ int r = 1; \
+ if (!pte_young(__pte)) \
+ r = 0; \
+ else \
+ set_pte_at((__vma)->vm_mm, (__address), \
+ (__ptep), pte_mkold(__pte)); \
+ r; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(__vma, __address, __ptep) \
+({ \
+ int __young; \
+ __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
+ if (__young) \
+ flush_tlb_page(__vma, __address); \
+ __young; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \
+({ \
+ pte_t __pte = *__ptep; \
+ int r = 1; \
+ if (!pte_dirty(__pte)) \
+ r = 0; \
+ else \
+ set_pte_at((__vma)->vm_mm, (__address), (__ptep), \
+ pte_mkclean(__pte)); \
+ r; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
+#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
+({ \
+ int __dirty; \
+ __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \
+ if (__dirty) \
+ flush_tlb_page(__vma, __address); \
+ __dirty; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define ptep_get_and_clear(__mm, __address, __ptep) \
+({ \
+ pte_t __pte = *(__ptep); \
+ pte_clear((__mm), (__address), (__ptep)); \
+ __pte; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+#define ptep_clear_flush(__vma, __address, __ptep) \
+({ \
+ pte_t __pte; \
+ __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
+ flush_tlb_page(__vma, __address); \
+ __pte; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
+{
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B) (pte_val(A) == pte_val(B))
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
+#define page_test_and_clear_dirty(page) (0)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
+#define page_test_and_clear_young(page) (0)
+#endif
+
+#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
+#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
+#endif
+
+#ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
+#define lazy_mmu_prot_update(pte) do { } while (0)
+#endif
+
+/*
+ * When walking page tables, get the address of the next boundary, or
+ * the end address of the range if that comes earlier. Although end might
+ * wrap to 0 only in clear_page_range, __boundary may wrap to 0 throughout.
+ */
+
+#ifndef pgd_addr_end
+#define pgd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pud_addr_end
+#define pud_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pmd_addr_end
+#define pmd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef __ASSEMBLY__
+/*
+ * When walking page tables, we usually want to skip any p?d_none entries;
+ * and any p?d_bad entries - reporting the error before resetting to none.
+ * Do the tests inline, but report and clear the bad entry in mm/memory.c.
+ */
+void pgd_clear_bad(pgd_t *);
+void pud_clear_bad(pud_t *);
+void pmd_clear_bad(pmd_t *);
+
+static inline int pgd_none_or_clear_bad(pgd_t *pgd)
+{
+ if (pgd_none(*pgd))
+ return 1;
+ if (unlikely(pgd_bad(*pgd))) {
+ pgd_clear_bad(pgd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pud_none_or_clear_bad(pud_t *pud)
+{
+ if (pud_none(*pud))
+ return 1;
+ if (unlikely(pud_bad(*pud))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pmd_none_or_clear_bad(pmd_t *pmd)
+{
+ if (pmd_none(*pmd))
+ return 1;
+ if (unlikely(pmd_bad(*pmd))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
new file mode 100644
index 00000000000..b1fcda9eac2
--- /dev/null
+++ b/include/asm-generic/resource.h
@@ -0,0 +1,88 @@
+#ifndef _ASM_GENERIC_RESOURCE_H
+#define _ASM_GENERIC_RESOURCE_H
+
+/*
+ * Resource limit IDs
+ *
+ * ( Compatibility detail: there are architectures that have
+ * a different rlimit ID order in the 5-9 range and want
+ * to keep that order for binary compatibility. The reasons
+ * are historic and all new rlimits are identical across all
+ * arches. If an arch has such special order for some rlimits
+ * then it defines them prior including asm-generic/resource.h. )
+ */
+
+#define RLIMIT_CPU 0 /* CPU time in ms */
+#define RLIMIT_FSIZE 1 /* Maximum filesize */
+#define RLIMIT_DATA 2 /* max data size */
+#define RLIMIT_STACK 3 /* max stack size */
+#define RLIMIT_CORE 4 /* max core file size */
+
+#ifndef RLIMIT_RSS
+# define RLIMIT_RSS 5 /* max resident set size */
+#endif
+
+#ifndef RLIMIT_NPROC
+# define RLIMIT_NPROC 6 /* max number of processes */
+#endif
+
+#ifndef RLIMIT_NOFILE
+# define RLIMIT_NOFILE 7 /* max number of open files */
+#endif
+
+#ifndef RLIMIT_MEMLOCK
+# define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
+#endif
+
+#ifndef RLIMIT_AS
+# define RLIMIT_AS 9 /* address space limit */
+#endif
+
+#define RLIMIT_LOCKS 10 /* maximum file locks held */
+#define RLIMIT_SIGPENDING 11 /* max number of pending signals */
+#define RLIMIT_MSGQUEUE 12 /* maximum bytes in POSIX mqueues */
+
+#define RLIM_NLIMITS 13
+
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ *
+ * Some architectures override this (for compatibility reasons):
+ */
+#ifndef RLIM_INFINITY
+# define RLIM_INFINITY (~0UL)
+#endif
+
+/*
+ * RLIMIT_STACK default maximum - some architectures override it:
+ */
+#ifndef _STK_LIM_MAX
+# define _STK_LIM_MAX RLIM_INFINITY
+#endif
+
+#ifdef __KERNEL__
+
+/*
+ * boot-time rlimit defaults for the init task:
+ */
+#define INIT_RLIMITS \
+{ \
+ [RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_STACK] = { _STK_LIM, _STK_LIM_MAX }, \
+ [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \
+ [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_NPROC] = { 0, 0 }, \
+ [RLIMIT_NOFILE] = { INR_OPEN, INR_OPEN }, \
+ [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \
+ [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_SIGPENDING] = { 0, 0 }, \
+ [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \
+}
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
new file mode 100644
index 00000000000..cef08db34ad
--- /dev/null
+++ b/include/asm-generic/rtc.h
@@ -0,0 +1,213 @@
+/*
+ * inclue/asm-generic/rtc.h
+ *
+ * Author: Tom Rini <trini@mvista.com>
+ *
+ * Based on:
+ * drivers/char/rtc.c
+ *
+ * Please read the COPYING file for all license details.
+ */
+
+#ifndef __ASM_RTC_H__
+#define __ASM_RTC_H__
+
+#ifdef __KERNEL__
+
+#include <linux/mc146818rtc.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+
+#define RTC_PIE 0x40 /* periodic interrupt enable */
+#define RTC_AIE 0x20 /* alarm interrupt enable */
+#define RTC_UIE 0x10 /* update-finished interrupt enable */
+
+/* some dummy definitions */
+#define RTC_BATT_BAD 0x100 /* battery bad */
+#define RTC_SQWE 0x08 /* enable square-wave output */
+#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
+#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
+#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
+
+/*
+ * Returns true if a clock update is in progress
+ */
+static inline unsigned char rtc_is_updating(void)
+{
+ unsigned char uip;
+
+ spin_lock_irq(&rtc_lock);
+ uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
+ spin_unlock_irq(&rtc_lock);
+ return uip;
+}
+
+static inline unsigned int get_rtc_time(struct rtc_time *time)
+{
+ unsigned long uip_watchdog = jiffies;
+ unsigned char ctrl;
+#ifdef CONFIG_MACH_DECSTATION
+ unsigned int real_year;
+#endif
+
+ /*
+ * read RTC once any update in progress is done. The update
+ * can take just over 2ms. We wait 10 to 20ms. There is no need to
+ * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
+ * If you need to know *exactly* when a second has started, enable
+ * periodic update complete interrupts, (via ioctl) and then
+ * immediately read /dev/rtc which will block until you get the IRQ.
+ * Once the read clears, read the RTC time (again via ioctl). Easy.
+ */
+
+ if (rtc_is_updating() != 0)
+ while (jiffies - uip_watchdog < 2*HZ/100) {
+ barrier();
+ cpu_relax();
+ }
+
+ /*
+ * Only the values that we read from the RTC are set. We leave
+ * tm_wday, tm_yday and tm_isdst untouched. Even though the
+ * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
+ * by the RTC when initially set to a non-zero value.
+ */
+ spin_lock_irq(&rtc_lock);
+ time->tm_sec = CMOS_READ(RTC_SECONDS);
+ time->tm_min = CMOS_READ(RTC_MINUTES);
+ time->tm_hour = CMOS_READ(RTC_HOURS);
+ time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
+ time->tm_mon = CMOS_READ(RTC_MONTH);
+ time->tm_year = CMOS_READ(RTC_YEAR);
+#ifdef CONFIG_MACH_DECSTATION
+ real_year = CMOS_READ(RTC_DEC_YEAR);
+#endif
+ ctrl = CMOS_READ(RTC_CONTROL);
+ spin_unlock_irq(&rtc_lock);
+
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ {
+ BCD_TO_BIN(time->tm_sec);
+ BCD_TO_BIN(time->tm_min);
+ BCD_TO_BIN(time->tm_hour);
+ BCD_TO_BIN(time->tm_mday);
+ BCD_TO_BIN(time->tm_mon);
+ BCD_TO_BIN(time->tm_year);
+ }
+
+#ifdef CONFIG_MACH_DECSTATION
+ time->tm_year += real_year - 72;
+#endif
+
+ /*
+ * Account for differences between how the RTC uses the values
+ * and how they are defined in a struct rtc_time;
+ */
+ if (time->tm_year <= 69)
+ time->tm_year += 100;
+
+ time->tm_mon--;
+
+ return RTC_24H;
+}
+
+/* Set the current date and time in the real time clock. */
+static inline int set_rtc_time(struct rtc_time *time)
+{
+ unsigned char mon, day, hrs, min, sec;
+ unsigned char save_control, save_freq_select;
+ unsigned int yrs;
+#ifdef CONFIG_MACH_DECSTATION
+ unsigned int real_yrs, leap_yr;
+#endif
+
+ yrs = time->tm_year;
+ mon = time->tm_mon + 1; /* tm_mon starts at zero */
+ day = time->tm_mday;
+ hrs = time->tm_hour;
+ min = time->tm_min;
+ sec = time->tm_sec;
+
+ if (yrs > 255) /* They are unsigned */
+ return -EINVAL;
+
+ spin_lock_irq(&rtc_lock);
+#ifdef CONFIG_MACH_DECSTATION
+ real_yrs = yrs;
+ leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) ||
+ !((yrs + 1900) % 400));
+ yrs = 72;
+
+ /*
+ * We want to keep the year set to 73 until March
+ * for non-leap years, so that Feb, 29th is handled
+ * correctly.
+ */
+ if (!leap_yr && mon < 3) {
+ real_yrs--;
+ yrs = 73;
+ }
+#endif
+ /* These limits and adjustments are independent of
+ * whether the chip is in binary mode or not.
+ */
+ if (yrs > 169) {
+ spin_unlock_irq(&rtc_lock);
+ return -EINVAL;
+ }
+
+ if (yrs >= 100)
+ yrs -= 100;
+
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
+ || RTC_ALWAYS_BCD) {
+ BIN_TO_BCD(sec);
+ BIN_TO_BCD(min);
+ BIN_TO_BCD(hrs);
+ BIN_TO_BCD(day);
+ BIN_TO_BCD(mon);
+ BIN_TO_BCD(yrs);
+ }
+
+ save_control = CMOS_READ(RTC_CONTROL);
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+#ifdef CONFIG_MACH_DECSTATION
+ CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
+#endif
+ CMOS_WRITE(yrs, RTC_YEAR);
+ CMOS_WRITE(mon, RTC_MONTH);
+ CMOS_WRITE(day, RTC_DAY_OF_MONTH);
+ CMOS_WRITE(hrs, RTC_HOURS);
+ CMOS_WRITE(min, RTC_MINUTES);
+ CMOS_WRITE(sec, RTC_SECONDS);
+
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+
+ spin_unlock_irq(&rtc_lock);
+
+ return 0;
+}
+
+static inline unsigned int get_rtc_ss(void)
+{
+ struct rtc_time h;
+
+ get_rtc_time(&h);
+ return h.tm_sec;
+}
+
+static inline int get_rtc_pll(struct rtc_pll_info *pll)
+{
+ return -EINVAL;
+}
+static inline int set_rtc_pll(struct rtc_pll_info *pll)
+{
+ return -EINVAL;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_RTC_H__ */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
new file mode 100644
index 00000000000..976ac29598b
--- /dev/null
+++ b/include/asm-generic/sections.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_GENERIC_SECTIONS_H_
+#define _ASM_GENERIC_SECTIONS_H_
+
+/* References to section boundaries */
+
+extern char _text[], _stext[], _etext[];
+extern char _data[], _sdata[], _edata[];
+extern char __bss_start[], __bss_stop[];
+extern char __init_begin[], __init_end[];
+extern char _sinittext[], _einittext[];
+extern char _end[];
+
+#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
new file mode 100644
index 00000000000..9cac8e8dde5
--- /dev/null
+++ b/include/asm-generic/siginfo.h
@@ -0,0 +1,287 @@
+#ifndef _ASM_GENERIC_SIGINFO_H
+#define _ASM_GENERIC_SIGINFO_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+typedef union sigval {
+ int sival_int;
+ void __user *sival_ptr;
+} sigval_t;
+
+/*
+ * This is the size (including padding) of the part of the
+ * struct siginfo that is before the union.
+ */
+#ifndef __ARCH_SI_PREAMBLE_SIZE
+#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
+#endif
+
+#define SI_MAX_SIZE 128
+#ifndef SI_PAD_SIZE
+#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
+#endif
+
+#ifndef __ARCH_SI_UID_T
+#define __ARCH_SI_UID_T uid_t
+#endif
+
+/*
+ * The default "si_band" type is "long", as specified by POSIX.
+ * However, some architectures want to override this to "int"
+ * for historical compatibility reasons, so we allow that.
+ */
+#ifndef __ARCH_SI_BAND_T
+#define __ARCH_SI_BAND_T long
+#endif
+
+#ifndef HAVE_ARCH_SIGINFO_T
+
+typedef struct siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[SI_PAD_SIZE];
+
+ /* kill() */
+ struct {
+ pid_t _pid; /* sender's pid */
+ __ARCH_SI_UID_T _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
+ sigval_t _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ pid_t _pid; /* sender's pid */
+ __ARCH_SI_UID_T _uid; /* sender's uid */
+ sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ pid_t _pid; /* which child */
+ __ARCH_SI_UID_T _uid; /* sender's uid */
+ int _status; /* exit code */
+ clock_t _utime;
+ clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ void __user *_addr; /* faulting insn/memory ref. */
+#ifdef __ARCH_SI_TRAPNO
+ int _trapno; /* TRAP # which caused the signal */
+#endif
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} siginfo_t;
+
+#endif
+
+/*
+ * How these fields are to be accessed.
+ */
+#define si_pid _sifields._kill._pid
+#define si_uid _sifields._kill._uid
+#define si_tid _sifields._timer._tid
+#define si_overrun _sifields._timer._overrun
+#define si_sys_private _sifields._timer._sys_private
+#define si_status _sifields._sigchld._status
+#define si_utime _sifields._sigchld._utime
+#define si_stime _sifields._sigchld._stime
+#define si_value _sifields._rt._sigval
+#define si_int _sifields._rt._sigval.sival_int
+#define si_ptr _sifields._rt._sigval.sival_ptr
+#define si_addr _sifields._sigfault._addr
+#ifdef __ARCH_SI_TRAPNO
+#define si_trapno _sifields._sigfault._trapno
+#endif
+#define si_band _sifields._sigpoll._band
+#define si_fd _sifields._sigpoll._fd
+
+#ifdef __KERNEL__
+#define __SI_MASK 0xffff0000u
+#define __SI_KILL (0 << 16)
+#define __SI_TIMER (1 << 16)
+#define __SI_POLL (2 << 16)
+#define __SI_FAULT (3 << 16)
+#define __SI_CHLD (4 << 16)
+#define __SI_RT (5 << 16)
+#define __SI_MESGQ (6 << 16)
+#define __SI_CODE(T,N) ((T) | ((N) & 0xffff))
+#else
+#define __SI_KILL 0
+#define __SI_TIMER 0
+#define __SI_POLL 0
+#define __SI_FAULT 0
+#define __SI_CHLD 0
+#define __SI_RT 0
+#define __SI_MESGQ 0
+#define __SI_CODE(T,N) (N)
+#endif
+
+/*
+ * si_code values
+ * Digital reserves positive values for kernel-generated signals.
+ */
+#define SI_USER 0 /* sent by kill, sigsend, raise */
+#define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
+#define SI_QUEUE -1 /* sent by sigqueue */
+#define SI_TIMER __SI_CODE(__SI_TIMER,-2) /* sent by timer expiration */
+#define SI_MESGQ __SI_CODE(__SI_MESGQ,-3) /* sent by real time mesq state change */
+#define SI_ASYNCIO -4 /* sent by AIO completion */
+#define SI_SIGIO -5 /* sent by queued SIGIO */
+#define SI_TKILL -6 /* sent by tkill system call */
+#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
+
+#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
+#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
+
+/*
+ * SIGILL si_codes
+ */
+#define ILL_ILLOPC (__SI_FAULT|1) /* illegal opcode */
+#define ILL_ILLOPN (__SI_FAULT|2) /* illegal operand */
+#define ILL_ILLADR (__SI_FAULT|3) /* illegal addressing mode */
+#define ILL_ILLTRP (__SI_FAULT|4) /* illegal trap */
+#define ILL_PRVOPC (__SI_FAULT|5) /* privileged opcode */
+#define ILL_PRVREG (__SI_FAULT|6) /* privileged register */
+#define ILL_COPROC (__SI_FAULT|7) /* coprocessor error */
+#define ILL_BADSTK (__SI_FAULT|8) /* internal stack error */
+#define NSIGILL 8
+
+/*
+ * SIGFPE si_codes
+ */
+#define FPE_INTDIV (__SI_FAULT|1) /* integer divide by zero */
+#define FPE_INTOVF (__SI_FAULT|2) /* integer overflow */
+#define FPE_FLTDIV (__SI_FAULT|3) /* floating point divide by zero */
+#define FPE_FLTOVF (__SI_FAULT|4) /* floating point overflow */
+#define FPE_FLTUND (__SI_FAULT|5) /* floating point underflow */
+#define FPE_FLTRES (__SI_FAULT|6) /* floating point inexact result */
+#define FPE_FLTINV (__SI_FAULT|7) /* floating point invalid operation */
+#define FPE_FLTSUB (__SI_FAULT|8) /* subscript out of range */
+#define NSIGFPE 8
+
+/*
+ * SIGSEGV si_codes
+ */
+#define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */
+#define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */
+#define NSIGSEGV 2
+
+/*
+ * SIGBUS si_codes
+ */
+#define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */
+#define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */
+#define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */
+#define NSIGBUS 3
+
+/*
+ * SIGTRAP si_codes
+ */
+#define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */
+#define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */
+#define NSIGTRAP 2
+
+/*
+ * SIGCHLD si_codes
+ */
+#define CLD_EXITED (__SI_CHLD|1) /* child has exited */
+#define CLD_KILLED (__SI_CHLD|2) /* child was killed */
+#define CLD_DUMPED (__SI_CHLD|3) /* child terminated abnormally */
+#define CLD_TRAPPED (__SI_CHLD|4) /* traced child has trapped */
+#define CLD_STOPPED (__SI_CHLD|5) /* child has stopped */
+#define CLD_CONTINUED (__SI_CHLD|6) /* stopped child has continued */
+#define NSIGCHLD 6
+
+/*
+ * SIGPOLL si_codes
+ */
+#define POLL_IN (__SI_POLL|1) /* data input available */
+#define POLL_OUT (__SI_POLL|2) /* output buffers available */
+#define POLL_MSG (__SI_POLL|3) /* input message available */
+#define POLL_ERR (__SI_POLL|4) /* i/o error */
+#define POLL_PRI (__SI_POLL|5) /* high priority input available */
+#define POLL_HUP (__SI_POLL|6) /* device disconnected */
+#define NSIGPOLL 6
+
+/*
+ * sigevent definitions
+ *
+ * It seems likely that SIGEV_THREAD will have to be handled from
+ * userspace, libpthread transmuting it to SIGEV_SIGNAL, which the
+ * thread manager then catches and does the appropriate nonsense.
+ * However, everything is written out here so as to not get lost.
+ */
+#define SIGEV_SIGNAL 0 /* notify via signal */
+#define SIGEV_NONE 1 /* other notification: meaningless */
+#define SIGEV_THREAD 2 /* deliver via thread creation */
+#define SIGEV_THREAD_ID 4 /* deliver to thread */
+
+#define SIGEV_MAX_SIZE 64
+#ifndef SIGEV_PAD_SIZE
+#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3)
+#endif
+
+typedef struct sigevent {
+ sigval_t sigev_value;
+ int sigev_signo;
+ int sigev_notify;
+ union {
+ int _pad[SIGEV_PAD_SIZE];
+ int _tid;
+
+ struct {
+ void (*_function)(sigval_t);
+ void *_attribute; /* really pthread_attr_t */
+ } _sigev_thread;
+ } _sigev_un;
+} sigevent_t;
+
+#define sigev_notify_function _sigev_un._sigev_thread._function
+#define sigev_notify_attributes _sigev_un._sigev_thread._attribute
+#define sigev_notify_thread_id _sigev_un._tid
+
+#ifdef __KERNEL__
+
+struct siginfo;
+void do_schedule_next_timer(struct siginfo *info);
+
+#ifndef HAVE_ARCH_COPY_SIGINFO
+
+#include <linux/string.h>
+
+static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+{
+ if (from->si_code < 0)
+ memcpy(to, from, sizeof(*to));
+ else
+ /* _sigchld is currently the largest know union member */
+ memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+}
+
+#endif
+
+extern int copy_siginfo_to_user(struct siginfo __user *to, struct siginfo *from);
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h
new file mode 100644
index 00000000000..1d01043e797
--- /dev/null
+++ b/include/asm-generic/statfs.h
@@ -0,0 +1,51 @@
+#ifndef _GENERIC_STATFS_H
+#define _GENERIC_STATFS_H
+
+#ifndef __KERNEL_STRICT_NAMES
+# include <linux/types.h>
+typedef __kernel_fsid_t fsid_t;
+#endif
+
+struct statfs {
+ __u32 f_type;
+ __u32 f_bsize;
+ __u32 f_blocks;
+ __u32 f_bfree;
+ __u32 f_bavail;
+ __u32 f_files;
+ __u32 f_ffree;
+ __kernel_fsid_t f_fsid;
+ __u32 f_namelen;
+ __u32 f_frsize;
+ __u32 f_spare[5];
+};
+
+struct statfs64 {
+ __u32 f_type;
+ __u32 f_bsize;
+ __u64 f_blocks;
+ __u64 f_bfree;
+ __u64 f_bavail;
+ __u64 f_files;
+ __u64 f_ffree;
+ __kernel_fsid_t f_fsid;
+ __u32 f_namelen;
+ __u32 f_frsize;
+ __u32 f_spare[5];
+};
+
+struct compat_statfs64 {
+ __u32 f_type;
+ __u32 f_bsize;
+ __u64 f_blocks;
+ __u64 f_bfree;
+ __u64 f_bavail;
+ __u64 f_files;
+ __u64 f_ffree;
+ __kernel_fsid_t f_fsid;
+ __u32 f_namelen;
+ __u32 f_frsize;
+ __u32 f_spare[5];
+};
+
+#endif
diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h
new file mode 100644
index 00000000000..1e58ca39592
--- /dev/null
+++ b/include/asm-generic/termios.h
@@ -0,0 +1,69 @@
+/* termios.h: generic termios/termio user copying/translation
+ */
+
+#ifndef _ASM_GENERIC_TERMIOS_H
+#define _ASM_GENERIC_TERMIOS_H
+
+#include <asm/uaccess.h>
+
+#ifndef __ARCH_TERMIO_GETPUT
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+static inline int user_termio_to_kernel_termios(struct termios *termios,
+ struct termio __user *termio)
+{
+ unsigned short tmp;
+
+ if (get_user(tmp, &termio->c_iflag) < 0)
+ goto fault;
+ termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
+
+ if (get_user(tmp, &termio->c_oflag) < 0)
+ goto fault;
+ termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
+
+ if (get_user(tmp, &termio->c_cflag) < 0)
+ goto fault;
+ termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
+
+ if (get_user(tmp, &termio->c_lflag) < 0)
+ goto fault;
+ termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
+
+ if (get_user(termios->c_line, &termio->c_line) < 0)
+ goto fault;
+
+ if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
+ goto fault;
+
+ return 0;
+
+ fault:
+ return -EFAULT;
+}
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+static inline int kernel_termios_to_user_termio(struct termio __user *termio,
+ struct termios *termios)
+{
+ if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
+ put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
+ put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
+ put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
+ put_user(termios->c_line, &termio->c_line) < 0 ||
+ copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif /* __ARCH_TERMIO_GETPUT */
+
+#endif /* _ASM_GENERIC_TERMIOS_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
new file mode 100644
index 00000000000..faff403e106
--- /dev/null
+++ b/include/asm-generic/tlb.h
@@ -0,0 +1,160 @@
+/* asm-generic/tlb.h
+ *
+ * Generic TLB shootdown code
+ *
+ * Copyright 2001 Red Hat, Inc.
+ * Based on code from mm/memory.c Copyright Linus Torvalds and others.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_GENERIC__TLB_H
+#define _ASM_GENERIC__TLB_H
+
+#include <linux/config.h>
+#include <linux/swap.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * For UP we don't need to worry about TLB flush
+ * and page free order so much..
+ */
+#ifdef CONFIG_SMP
+ #define FREE_PTE_NR 506
+ #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
+#else
+ #define FREE_PTE_NR 1
+ #define tlb_fast_mode(tlb) 1
+#endif
+
+/* struct mmu_gather is an opaque type used by the mm code for passing around
+ * any data needed by arch specific code for tlb_remove_page. This structure
+ * can be per-CPU or per-MM as the page table lock is held for the duration of
+ * TLB shootdown.
+ */
+struct mmu_gather {
+ struct mm_struct *mm;
+ unsigned int nr; /* set to ~0U means fast mode */
+ unsigned int need_flush;/* Really unmapped some ptes? */
+ unsigned int fullmm; /* non-zero means full mm flush */
+ unsigned long freed;
+ struct page * pages[FREE_PTE_NR];
+};
+
+/* Users of the generic TLB shootdown code must declare this storage space. */
+DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+/* tlb_gather_mmu
+ * Return a pointer to an initialized struct mmu_gather.
+ */
+static inline struct mmu_gather *
+tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
+{
+ struct mmu_gather *tlb = &per_cpu(mmu_gathers, smp_processor_id());
+
+ tlb->mm = mm;
+
+ /* Use fast mode if only one CPU is online */
+ tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
+
+ tlb->fullmm = full_mm_flush;
+ tlb->freed = 0;
+
+ return tlb;
+}
+
+static inline void
+tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+ if (!tlb->need_flush)
+ return;
+ tlb->need_flush = 0;
+ tlb_flush(tlb);
+ if (!tlb_fast_mode(tlb)) {
+ free_pages_and_swap_cache(tlb->pages, tlb->nr);
+ tlb->nr = 0;
+ }
+}
+
+/* tlb_finish_mmu
+ * Called at the end of the shootdown operation to free up any resources
+ * that were required. The page table lock is still held at this point.
+ */
+static inline void
+tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+ int freed = tlb->freed;
+ struct mm_struct *mm = tlb->mm;
+ int rss = get_mm_counter(mm, rss);
+
+ if (rss < freed)
+ freed = rss;
+ add_mm_counter(mm, rss, -freed);
+ tlb_flush_mmu(tlb, start, end);
+
+ /* keep the page table cache within bounds */
+ check_pgt_cache();
+}
+
+static inline unsigned int
+tlb_is_full_mm(struct mmu_gather *tlb)
+{
+ return tlb->fullmm;
+}
+
+/* tlb_remove_page
+ * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
+ * handling the additional races in SMP caused by other CPUs caching valid
+ * mappings in their TLBs.
+ */
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ tlb->need_flush = 1;
+ if (tlb_fast_mode(tlb)) {
+ free_page_and_swap_cache(page);
+ return;
+ }
+ tlb->pages[tlb->nr++] = page;
+ if (tlb->nr >= FREE_PTE_NR)
+ tlb_flush_mmu(tlb, 0, 0);
+}
+
+/**
+ * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
+ *
+ * Record the fact that pte's were really umapped in ->need_flush, so we can
+ * later optimise away the tlb invalidate. This helps when userspace is
+ * unmapping already-unmapped pages, which happens quite a lot.
+ */
+#define tlb_remove_tlb_entry(tlb, ptep, address) \
+ do { \
+ tlb->need_flush = 1; \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
+ } while (0)
+
+#define pte_free_tlb(tlb, ptep) \
+ do { \
+ tlb->need_flush = 1; \
+ __pte_free_tlb(tlb, ptep); \
+ } while (0)
+
+#ifndef __ARCH_HAS_4LEVEL_HACK
+#define pud_free_tlb(tlb, pudp) \
+ do { \
+ tlb->need_flush = 1; \
+ __pud_free_tlb(tlb, pudp); \
+ } while (0)
+#endif
+
+#define pmd_free_tlb(tlb, pmdp) \
+ do { \
+ tlb->need_flush = 1; \
+ __pmd_free_tlb(tlb, pmdp); \
+ } while (0)
+
+#define tlb_migrate_finish(mm) do {} while (0)
+
+#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
new file mode 100644
index 00000000000..ec96e8b0f19
--- /dev/null
+++ b/include/asm-generic/topology.h
@@ -0,0 +1,48 @@
+/*
+ * linux/include/asm-generic/topology.h
+ *
+ * Written by: Matthew Dobson, IBM Corporation
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <colpatch@us.ibm.com>
+ */
+#ifndef _ASM_GENERIC_TOPOLOGY_H
+#define _ASM_GENERIC_TOPOLOGY_H
+
+/* Other architectures wishing to use this simple topology API should fill
+ in the below functions as appropriate in their own <asm/topology.h> file. */
+#ifndef cpu_to_node
+#define cpu_to_node(cpu) (0)
+#endif
+#ifndef parent_node
+#define parent_node(node) (0)
+#endif
+#ifndef node_to_cpumask
+#define node_to_cpumask(node) (cpu_online_map)
+#endif
+#ifndef node_to_first_cpu
+#define node_to_first_cpu(node) (0)
+#endif
+#ifndef pcibus_to_cpumask
+#define pcibus_to_cpumask(bus) (cpu_online_map)
+#endif
+
+#endif /* _ASM_GENERIC_TOPOLOGY_H */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
new file mode 100644
index 00000000000..549cb3a1640
--- /dev/null
+++ b/include/asm-generic/uaccess.h
@@ -0,0 +1,26 @@
+#ifndef _ASM_GENERIC_UACCESS_H_
+#define _ASM_GENERIC_UACCESS_H_
+
+/*
+ * This macro should be used instead of __get_user() when accessing
+ * values at locations that are not known to be aligned.
+ */
+#define __get_user_unaligned(x, ptr) \
+({ \
+ __typeof__ (*(ptr)) __x; \
+ __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \
+ (x) = __x; \
+})
+
+
+/*
+ * This macro should be used instead of __put_user() when accessing
+ * values at locations that are not known to be aligned.
+ */
+#define __put_user_unaligned(x, ptr) \
+({ \
+ __typeof__ (*(ptr)) __x = (x); \
+ __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \
+})
+
+#endif /* _ASM_GENERIC_UACCESS_H */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
new file mode 100644
index 00000000000..c856a43e3b4
--- /dev/null
+++ b/include/asm-generic/unaligned.h
@@ -0,0 +1,121 @@
+#ifndef _ASM_GENERIC_UNALIGNED_H_
+#define _ASM_GENERIC_UNALIGNED_H_
+
+/*
+ * For the benefit of those who are trying to port Linux to another
+ * architecture, here are some C-language equivalents.
+ *
+ * This is based almost entirely upon Richard Henderson's
+ * asm-alpha/unaligned.h implementation. Some comments were
+ * taken from David Mosberger's asm-ia64/unaligned.h header.
+ */
+
+#include <linux/types.h>
+
+/*
+ * The main single-value unaligned transfer routines.
+ */
+#define get_unaligned(ptr) \
+ ((__typeof__(*(ptr)))__get_unaligned((ptr), sizeof(*(ptr))))
+#define put_unaligned(x,ptr) \
+ __put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr)))
+
+/*
+ * This function doesn't actually exist. The idea is that when
+ * someone uses the macros below with an unsupported size (datatype),
+ * the linker will alert us to the problem via an unresolved reference
+ * error.
+ */
+extern void bad_unaligned_access_length(void) __attribute__((noreturn));
+
+struct __una_u64 { __u64 x __attribute__((packed)); };
+struct __una_u32 { __u32 x __attribute__((packed)); };
+struct __una_u16 { __u16 x __attribute__((packed)); };
+
+/*
+ * Elemental unaligned loads
+ */
+
+static inline unsigned long __uldq(const __u64 *addr)
+{
+ const struct __una_u64 *ptr = (const struct __una_u64 *) addr;
+ return ptr->x;
+}
+
+static inline unsigned long __uldl(const __u32 *addr)
+{
+ const struct __una_u32 *ptr = (const struct __una_u32 *) addr;
+ return ptr->x;
+}
+
+static inline unsigned long __uldw(const __u16 *addr)
+{
+ const struct __una_u16 *ptr = (const struct __una_u16 *) addr;
+ return ptr->x;
+}
+
+/*
+ * Elemental unaligned stores
+ */
+
+static inline void __ustq(__u64 val, __u64 *addr)
+{
+ struct __una_u64 *ptr = (struct __una_u64 *) addr;
+ ptr->x = val;
+}
+
+static inline void __ustl(__u32 val, __u32 *addr)
+{
+ struct __una_u32 *ptr = (struct __una_u32 *) addr;
+ ptr->x = val;
+}
+
+static inline void __ustw(__u16 val, __u16 *addr)
+{
+ struct __una_u16 *ptr = (struct __una_u16 *) addr;
+ ptr->x = val;
+}
+
+static inline unsigned long __get_unaligned(const void *ptr, size_t size)
+{
+ unsigned long val;
+ switch (size) {
+ case 1:
+ val = *(const __u8 *)ptr;
+ break;
+ case 2:
+ val = __uldw((const __u16 *)ptr);
+ break;
+ case 4:
+ val = __uldl((const __u32 *)ptr);
+ break;
+ case 8:
+ val = __uldq((const __u64 *)ptr);
+ break;
+ default:
+ bad_unaligned_access_length();
+ };
+ return val;
+}
+
+static inline void __put_unaligned(unsigned long val, void *ptr, size_t size)
+{
+ switch (size) {
+ case 1:
+ *(__u8 *)ptr = val;
+ break;
+ case 2:
+ __ustw(val, (__u16 *)ptr);
+ break;
+ case 4:
+ __ustl(val, (__u32 *)ptr);
+ break;
+ case 8:
+ __ustq(val, (__u64 *)ptr);
+ break;
+ default:
+ bad_unaligned_access_length();
+ };
+}
+
+#endif /* _ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
new file mode 100644
index 00000000000..99cef06a364
--- /dev/null
+++ b/include/asm-generic/vmlinux.lds.h
@@ -0,0 +1,90 @@
+#ifndef LOAD_OFFSET
+#define LOAD_OFFSET 0
+#endif
+
+#ifndef VMLINUX_SYMBOL
+#define VMLINUX_SYMBOL(_sym_) _sym_
+#endif
+
+#define RODATA \
+ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
+ *(.rodata) *(.rodata.*) \
+ *(__vermagic) /* Kernel version magic */ \
+ } \
+ \
+ .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
+ *(.rodata1) \
+ } \
+ \
+ /* PCI quirks */ \
+ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
+ *(.pci_fixup_early) \
+ VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
+ VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
+ *(.pci_fixup_header) \
+ VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
+ VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
+ *(.pci_fixup_final) \
+ VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
+ VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
+ *(.pci_fixup_enable) \
+ VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
+ } \
+ \
+ /* Kernel symbol table: Normal symbols */ \
+ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ksymtab) = .; \
+ *(__ksymtab) \
+ VMLINUX_SYMBOL(__stop___ksymtab) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only symbols */ \
+ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
+ *(__ksymtab_gpl) \
+ VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
+ } \
+ \
+ /* Kernel symbol table: Normal symbols */ \
+ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___kcrctab) = .; \
+ *(__kcrctab) \
+ VMLINUX_SYMBOL(__stop___kcrctab) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only symbols */ \
+ __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
+ *(__kcrctab_gpl) \
+ VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
+ } \
+ \
+ /* Kernel symbol table: strings */ \
+ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
+ *(__ksymtab_strings) \
+ } \
+ \
+ /* Built-in module parameters. */ \
+ __param : AT(ADDR(__param) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___param) = .; \
+ *(__param) \
+ VMLINUX_SYMBOL(__stop___param) = .; \
+ }
+
+#define SECURITY_INIT \
+ .security_initcall.init : { \
+ VMLINUX_SYMBOL(__security_initcall_start) = .; \
+ *(.security_initcall.init) \
+ VMLINUX_SYMBOL(__security_initcall_end) = .; \
+ }
+
+#define SCHED_TEXT \
+ VMLINUX_SYMBOL(__sched_text_start) = .; \
+ *(.sched.text) \
+ VMLINUX_SYMBOL(__sched_text_end) = .;
+
+#define LOCK_TEXT \
+ VMLINUX_SYMBOL(__lock_text_start) = .; \
+ *(.spinlock.text) \
+ VMLINUX_SYMBOL(__lock_text_end) = .;
diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h
new file mode 100644
index 00000000000..aaab875e1a3
--- /dev/null
+++ b/include/asm-generic/xor.h
@@ -0,0 +1,718 @@
+/*
+ * include/asm-generic/xor.h
+ *
+ * Generic optimized RAID-5 checksumming functions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/processor.h>
+
+static void
+xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0];
+ p1[1] ^= p2[1];
+ p1[2] ^= p2[2];
+ p1[3] ^= p2[3];
+ p1[4] ^= p2[4];
+ p1[5] ^= p2[5];
+ p1[6] ^= p2[6];
+ p1[7] ^= p2[7];
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0] ^ p3[0];
+ p1[1] ^= p2[1] ^ p3[1];
+ p1[2] ^= p2[2] ^ p3[2];
+ p1[3] ^= p2[3] ^ p3[3];
+ p1[4] ^= p2[4] ^ p3[4];
+ p1[5] ^= p2[5] ^ p3[5];
+ p1[6] ^= p2[6] ^ p3[6];
+ p1[7] ^= p2[7] ^ p3[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ d0 ^= p5[0];
+ d1 ^= p5[1];
+ d2 ^= p5[2];
+ d3 ^= p5[3];
+ d4 ^= p5[4];
+ d5 ^= p5[5];
+ d6 ^= p5[6];
+ d7 ^= p5[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+ prefetchw(p1);
+ prefetch(p2);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ once_more:
+ p1[0] ^= p2[0];
+ p1[1] ^= p2[1];
+ p1[2] ^= p2[2];
+ p1[3] ^= p2[3];
+ p1[4] ^= p2[4];
+ p1[5] ^= p2[5];
+ p1[6] ^= p2[6];
+ p1[7] ^= p2[7];
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ once_more:
+ p1[0] ^= p2[0] ^ p3[0];
+ p1[1] ^= p2[1] ^ p3[1];
+ p1[2] ^= p2[2] ^ p3[2];
+ p1[3] ^= p2[3] ^ p3[3];
+ p1[4] ^= p2[4] ^ p3[4];
+ p1[5] ^= p2[5] ^ p3[5];
+ p1[6] ^= p2[6] ^ p3[6];
+ p1[7] ^= p2[7] ^ p3[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ once_more:
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+ prefetch(p5);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ prefetch(p5+8);
+ once_more:
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+ prefetch(p5);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ prefetch(p5+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ d0 ^= p5[0];
+ d1 ^= p5[1];
+ d2 ^= p5[2];
+ d3 ^= p5[3];
+ d4 ^= p5[4];
+ d5 ^= p5[5];
+ d6 ^= p5[6];
+ d7 ^= p5[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static struct xor_block_template xor_block_8regs = {
+ .name = "8regs",
+ .do_2 = xor_8regs_2,
+ .do_3 = xor_8regs_3,
+ .do_4 = xor_8regs_4,
+ .do_5 = xor_8regs_5,
+};
+
+static struct xor_block_template xor_block_32regs = {
+ .name = "32regs",
+ .do_2 = xor_32regs_2,
+ .do_3 = xor_32regs_3,
+ .do_4 = xor_32regs_4,
+ .do_5 = xor_32regs_5,
+};
+
+static struct xor_block_template xor_block_8regs_p = {
+ .name = "8regs_prefetch",
+ .do_2 = xor_8regs_p_2,
+ .do_3 = xor_8regs_p_3,
+ .do_4 = xor_8regs_p_4,
+ .do_5 = xor_8regs_p_5,
+};
+
+static struct xor_block_template xor_block_32regs_p = {
+ .name = "32regs_prefetch",
+ .do_2 = xor_32regs_p_2,
+ .do_3 = xor_32regs_p_3,
+ .do_4 = xor_32regs_p_4,
+ .do_5 = xor_32regs_p_5,
+};
+
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
+ } while (0)