summaryrefslogtreecommitdiff
path: root/arch/sh/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/include')
-rw-r--r--arch/sh/include/asm/adc.h2
-rw-r--r--arch/sh/include/asm/addrspace.h3
-rw-r--r--arch/sh/include/asm/bitops.h4
-rw-r--r--arch/sh/include/asm/cache.h2
-rw-r--r--arch/sh/include/asm/cacheflush.h3
-rw-r--r--arch/sh/include/asm/dma.h2
-rw-r--r--arch/sh/include/asm/elf.h2
-rw-r--r--arch/sh/include/asm/fpu.h5
-rw-r--r--arch/sh/include/asm/freq.h2
-rw-r--r--arch/sh/include/asm/futex.h3
-rw-r--r--arch/sh/include/asm/io.h120
-rw-r--r--arch/sh/include/asm/kdebug.h3
-rw-r--r--arch/sh/include/asm/mmu_context.h2
-rw-r--r--arch/sh/include/asm/mmzone.h3
-rw-r--r--arch/sh/include/asm/pci.h4
-rw-r--r--arch/sh/include/asm/processor_32.h2
-rw-r--r--arch/sh/include/asm/segment.h3
-rw-r--r--arch/sh/include/asm/smc37c93x.h4
-rw-r--r--arch/sh/include/asm/sparsemem.h7
-rw-r--r--arch/sh/include/asm/stacktrace.h2
-rw-r--r--arch/sh/include/asm/string_32.h30
-rw-r--r--arch/sh/include/asm/syscall_32.h5
-rw-r--r--arch/sh/include/asm/syscalls_32.h3
-rw-r--r--arch/sh/include/asm/thread_info.h5
-rw-r--r--arch/sh/include/asm/uaccess_32.h53
-rw-r--r--arch/sh/include/asm/watchdog.h2
26 files changed, 74 insertions, 202 deletions
diff --git a/arch/sh/include/asm/adc.h b/arch/sh/include/asm/adc.h
index 99ec66849559..feccfe639e38 100644
--- a/arch/sh/include/asm/adc.h
+++ b/arch/sh/include/asm/adc.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ADC_H
#define __ASM_ADC_H
-#ifdef __KERNEL__
/*
* Copyright (C) 2004 Andriy Skulysh
*/
@@ -10,5 +9,4 @@
int adc_single(unsigned int channel);
-#endif /* __KERNEL__ */
#endif /* __ASM_ADC_H */
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 34bfbcddcce0..468fba333e89 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -7,8 +7,6 @@
#ifndef __ASM_SH_ADDRSPACE_H
#define __ASM_SH_ADDRSPACE_H
-#ifdef __KERNEL__
-
#include <cpu/addrspace.h>
/* If this CPU supports segmentation, hook up the helpers */
@@ -62,5 +60,4 @@
#define P3_ADDR_MAX P4SEG
#endif
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_ADDRSPACE_H */
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h
index 445dd14c448a..450b5854d7c6 100644
--- a/arch/sh/include/asm/bitops.h
+++ b/arch/sh/include/asm/bitops.h
@@ -2,8 +2,6 @@
#ifndef __ASM_SH_BITOPS_H
#define __ASM_SH_BITOPS_H
-#ifdef __KERNEL__
-
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
@@ -71,6 +69,4 @@ static inline unsigned long __ffs(unsigned long word)
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
-#endif /* __KERNEL__ */
-
#endif /* __ASM_SH_BITOPS_H */
diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
index 2408ac4873aa..a293343456af 100644
--- a/arch/sh/include/asm/cache.h
+++ b/arch/sh/include/asm/cache.h
@@ -8,7 +8,6 @@
*/
#ifndef __ASM_SH_CACHE_H
#define __ASM_SH_CACHE_H
-#ifdef __KERNEL__
#include <linux/init.h>
#include <cpu/cache.h>
@@ -44,5 +43,4 @@ struct cache_info {
unsigned long flags;
};
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHE_H */
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index fe7400079b97..4486a865ff62 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -2,8 +2,6 @@
#ifndef __ASM_SH_CACHEFLUSH_H
#define __ASM_SH_CACHEFLUSH_H
-#ifdef __KERNEL__
-
#include <linux/mm.h>
/*
@@ -109,5 +107,4 @@ static inline void *sh_cacheop_vaddr(void *vaddr)
return vaddr;
}
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */
diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h
index 4d5a21a891c0..17d23ae98c77 100644
--- a/arch/sh/include/asm/dma.h
+++ b/arch/sh/include/asm/dma.h
@@ -6,7 +6,6 @@
*/
#ifndef __ASM_SH_DMA_H
#define __ASM_SH_DMA_H
-#ifdef __KERNEL__
#include <linux/spinlock.h>
#include <linux/wait.h>
@@ -144,5 +143,4 @@ extern int isa_dma_bridge_buggy;
#define isa_dma_bridge_buggy (0)
#endif
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_DMA_H */
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index 7661fb5d548a..2862d6d1cb64 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -90,7 +90,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
#endif
#define ELF_ARCH EM_SH
-#ifdef __KERNEL__
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
@@ -209,5 +208,4 @@ do { \
NEW_AUX_ENT(AT_L2_CACHESHAPE, l2_cache_shape); \
} while (0)
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_ELF_H */
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h
index 43cfaf929aa7..04584be8986c 100644
--- a/arch/sh/include/asm/fpu.h
+++ b/arch/sh/include/asm/fpu.h
@@ -37,11 +37,6 @@ struct user_regset;
extern int do_fpu_inst(unsigned short, struct pt_regs *);
extern int init_fpu(struct task_struct *);
-extern int fpregs_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf);
-
static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
if (task_thread_info(tsk)->status & TS_USEDFPU) {
diff --git a/arch/sh/include/asm/freq.h b/arch/sh/include/asm/freq.h
index 18133bf83738..87c23621b7ae 100644
--- a/arch/sh/include/asm/freq.h
+++ b/arch/sh/include/asm/freq.h
@@ -6,9 +6,7 @@
*/
#ifndef __ASM_SH_FREQ_H
#define __ASM_SH_FREQ_H
-#ifdef __KERNEL__
#include <cpu/freq.h>
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_FREQ_H */
diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h
index b39cda09fb95..b70f3fce6ed7 100644
--- a/arch/sh/include/asm/futex.h
+++ b/arch/sh/include/asm/futex.h
@@ -2,8 +2,6 @@
#ifndef __ASM_SH_FUTEX_H
#define __ASM_SH_FUTEX_H
-#ifdef __KERNEL__
-
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
@@ -71,5 +69,4 @@ static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
return ret;
}
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_FUTEX_H */
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 26f0f9b4658b..6d5c6463bc07 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -17,13 +17,12 @@
#include <asm/cache.h>
#include <asm/addrspace.h>
#include <asm/machvec.h>
+#include <asm/page.h>
#include <linux/pgtable.h>
#include <asm-generic/iomap.h>
-#ifdef __KERNEL__
#define __IO_PREFIX generic
#include <asm/io_generic.h>
-#include <asm/io_trapped.h>
#include <asm-generic/pci_iomap.h>
#include <mach/mangle-port.h>
@@ -243,125 +242,38 @@ unsigned long long poke_real_address_q(unsigned long long addr,
#define phys_to_virt(address) (__va(address))
#endif
-/*
- * On 32-bit SH, we traditionally have the whole physical address space
- * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
- * not need to do anything but place the address in the proper segment.
- * This is true for P1 and P2 addresses, as well as some P3 ones.
- * However, most of the P3 addresses and newer cores using extended
- * addressing need to map through page tables, so the ioremap()
- * implementation becomes a bit more complicated.
- *
- * See arch/sh/mm/ioremap.c for additional notes on this.
- *
- * We cheat a bit and always return uncachable areas until we've fixed
- * the drivers to handle caching properly.
- *
- * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
- * doesn't exist, so everything must go through page tables.
- */
#ifdef CONFIG_MMU
+void iounmap(void __iomem *addr);
void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
pgprot_t prot, void *caller);
-void iounmap(void __iomem *addr);
-
-static inline void __iomem *
-__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
- return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
-}
-
-static inline void __iomem *
-__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
-#ifdef CONFIG_29BIT
- phys_addr_t last_addr = offset + size - 1;
-
- /*
- * For P1 and P2 space this is trivial, as everything is already
- * mapped. Uncached access for P1 addresses are done through P2.
- * In the P3 case or for addresses outside of the 29-bit space,
- * mapping must be done by the PMB or by using page tables.
- */
- if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
- u64 flags = pgprot_val(prot);
-
- /*
- * Anything using the legacy PTEA space attributes needs
- * to be kicked down to page table mappings.
- */
- if (unlikely(flags & _PAGE_PCC_MASK))
- return NULL;
- if (unlikely(flags & _PAGE_CACHABLE))
- return (void __iomem *)P1SEGADDR(offset);
-
- return (void __iomem *)P2SEGADDR(offset);
- }
-
- /* P4 above the store queues are always mapped. */
- if (unlikely(offset >= P3_ADDR_MAX))
- return (void __iomem *)P4SEGADDR(offset);
-#endif
-
- return NULL;
-}
-
-static inline void __iomem *
-__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
- void __iomem *ret;
-
- ret = __ioremap_trapped(offset, size);
- if (ret)
- return ret;
-
- ret = __ioremap_29bit(offset, size, prot);
- if (ret)
- return ret;
-
- return __ioremap(offset, size, prot);
-}
-#else
-#define __ioremap(offset, size, prot) ((void __iomem *)(offset))
-#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
-static inline void iounmap(void __iomem *addr) {}
-#endif /* CONFIG_MMU */
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
{
- return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
+ return __ioremap_caller(offset, size, PAGE_KERNEL_NOCACHE,
+ __builtin_return_address(0));
}
static inline void __iomem *
ioremap_cache(phys_addr_t offset, unsigned long size)
{
- return __ioremap_mode(offset, size, PAGE_KERNEL);
+ return __ioremap_caller(offset, size, PAGE_KERNEL,
+ __builtin_return_address(0));
}
#define ioremap_cache ioremap_cache
#ifdef CONFIG_HAVE_IOREMAP_PROT
-static inline void __iomem *
-ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
+static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ unsigned long flags)
{
- return __ioremap_mode(offset, size, __pgprot(flags));
+ return __ioremap_caller(offset, size, __pgprot(flags),
+ __builtin_return_address(0));
}
-#endif
+#endif /* CONFIG_HAVE_IOREMAP_PROT */
-#ifdef CONFIG_IOREMAP_FIXED
-extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
-extern int iounmap_fixed(void __iomem *);
-extern void ioremap_fixed_init(void);
-#else
-static inline void __iomem *
-ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
-{
- BUG();
- return NULL;
-}
-
-static inline void ioremap_fixed_init(void) { }
-static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
-#endif
+#else /* CONFIG_MMU */
+#define iounmap(addr) do { } while (0)
+#define ioremap(offset, size) ((void __iomem *)(unsigned long)(offset))
+#endif /* CONFIG_MMU */
#define ioremap_uc ioremap
@@ -380,6 +292,4 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
int valid_phys_addr_range(phys_addr_t addr, size_t size);
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
-#endif /* __KERNEL__ */
-
#endif /* __ASM_SH_IO_H */
diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h
index 960545306afa..de8693fabb1d 100644
--- a/arch/sh/include/asm/kdebug.h
+++ b/arch/sh/include/asm/kdebug.h
@@ -12,8 +12,7 @@ enum die_val {
};
/* arch/sh/kernel/dumpstack.c */
-extern void printk_address(unsigned long address, int reliable,
- const char *loglvl);
+extern void printk_address(unsigned long address, int reliable);
extern void dump_mem(const char *str, const char *loglvl,
unsigned long bottom, unsigned long top);
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h
index 48e67d544d53..f664e51e8a15 100644
--- a/arch/sh/include/asm/mmu_context.h
+++ b/arch/sh/include/asm/mmu_context.h
@@ -8,7 +8,6 @@
#ifndef __ASM_SH_MMU_CONTEXT_H
#define __ASM_SH_MMU_CONTEXT_H
-#ifdef __KERNEL__
#include <cpu/mmu_context.h>
#include <asm/tlbflush.h>
#include <linux/uaccess.h>
@@ -177,5 +176,4 @@ static inline void disable_mmu(void)
#define disable_mmu() do { } while (0)
#endif
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_MMU_CONTEXT_H */
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h
index cbaee1d1b673..6552a088dc97 100644
--- a/arch/sh/include/asm/mmzone.h
+++ b/arch/sh/include/asm/mmzone.h
@@ -2,8 +2,6 @@
#ifndef __ASM_SH_MMZONE_H
#define __ASM_SH_MMZONE_H
-#ifdef __KERNEL__
-
#ifdef CONFIG_NEED_MULTIPLE_NODES
#include <linux/numa.h>
@@ -44,5 +42,4 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
/* arch/sh/mm/init.c */
void __init allocate_pgdat(unsigned int nid);
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_MMZONE_H */
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h
index 10a36b1cf2ea..ad22e88c6657 100644
--- a/arch/sh/include/asm/pci.h
+++ b/arch/sh/include/asm/pci.h
@@ -2,8 +2,6 @@
#ifndef __ASM_SH_PCI_H
#define __ASM_SH_PCI_H
-#ifdef __KERNEL__
-
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
@@ -96,6 +94,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return channel ? 15 : 14;
}
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_PCI_H */
-
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index d44409413418..aa92cc933889 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -8,7 +8,6 @@
#ifndef __ASM_SH_PROCESSOR_32_H
#define __ASM_SH_PROCESSOR_32_H
-#ifdef __KERNEL__
#include <linux/compiler.h>
#include <linux/linkage.h>
@@ -203,5 +202,4 @@ static inline void prefetchw(const void *x)
}
#endif
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_PROCESSOR_32_H */
diff --git a/arch/sh/include/asm/segment.h b/arch/sh/include/asm/segment.h
index 33d1d28057cb..02e54a3335d6 100644
--- a/arch/sh/include/asm/segment.h
+++ b/arch/sh/include/asm/segment.h
@@ -24,8 +24,7 @@ typedef struct {
#define USER_DS KERNEL_DS
#endif
-#define segment_eq(a, b) ((a).seg == (b).seg)
-
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
diff --git a/arch/sh/include/asm/smc37c93x.h b/arch/sh/include/asm/smc37c93x.h
index f054c30a171a..891f2f8f2fd0 100644
--- a/arch/sh/include/asm/smc37c93x.h
+++ b/arch/sh/include/asm/smc37c93x.h
@@ -112,8 +112,8 @@ typedef struct uart_reg {
#define FCR_RFRES 0x0200 /* Receiver FIFO reset */
#define FCR_TFRES 0x0400 /* Transmitter FIFO reset */
#define FCR_DMA 0x0800 /* DMA mode select */
-#define FCR_RTL 0x4000 /* Receiver triger (LSB) */
-#define FCR_RTM 0x8000 /* Receiver triger (MSB) */
+#define FCR_RTL 0x4000 /* Receiver trigger (LSB) */
+#define FCR_RTM 0x8000 /* Receiver trigger (MSB) */
/* Line Control Register */
diff --git a/arch/sh/include/asm/sparsemem.h b/arch/sh/include/asm/sparsemem.h
index 4eb899751e45..4703cbe23844 100644
--- a/arch/sh/include/asm/sparsemem.h
+++ b/arch/sh/include/asm/sparsemem.h
@@ -2,16 +2,11 @@
#ifndef __ASM_SH_SPARSEMEM_H
#define __ASM_SH_SPARSEMEM_H
-#ifdef __KERNEL__
/*
* SECTION_SIZE_BITS 2^N: how big each section will be
- * MAX_PHYSADDR_BITS 2^N: how much physical address space we have
- * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
+ * MAX_PHYSMEM_BITS 2^N: how much physical address space we have
*/
#define SECTION_SIZE_BITS 26
-#define MAX_PHYSADDR_BITS 32
#define MAX_PHYSMEM_BITS 32
-#endif
-
#endif /* __ASM_SH_SPARSEMEM_H */
diff --git a/arch/sh/include/asm/stacktrace.h b/arch/sh/include/asm/stacktrace.h
index 50c173c0b9f5..4f98cdc64ec5 100644
--- a/arch/sh/include/asm/stacktrace.h
+++ b/arch/sh/include/asm/stacktrace.h
@@ -12,8 +12,6 @@
struct stacktrace_ops {
void (*address)(void *data, unsigned long address, int reliable);
- /* On negative return stop dumping */
- int (*stack)(void *data, char *name);
};
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
diff --git a/arch/sh/include/asm/string_32.h b/arch/sh/include/asm/string_32.h
index 3558b1d7123e..a276b193d3b4 100644
--- a/arch/sh/include/asm/string_32.h
+++ b/arch/sh/include/asm/string_32.h
@@ -2,8 +2,6 @@
#ifndef __ASM_SH_STRING_H
#define __ASM_SH_STRING_H
-#ifdef __KERNEL__
-
/*
* Copyright (C) 1999 Niibe Yutaka
* But consider these trivial functions to be public domain.
@@ -28,32 +26,6 @@ static inline char *strcpy(char *__dest, const char *__src)
return __xdest;
}
-#define __HAVE_ARCH_STRNCPY
-static inline char *strncpy(char *__dest, const char *__src, size_t __n)
-{
- register char *__xdest = __dest;
- unsigned long __dummy;
-
- if (__n == 0)
- return __xdest;
-
- __asm__ __volatile__(
- "1:\n"
- "mov.b @%1+, %2\n\t"
- "mov.b %2, @%0\n\t"
- "cmp/eq #0, %2\n\t"
- "bt/s 2f\n\t"
- " cmp/eq %5,%1\n\t"
- "bf/s 1b\n\t"
- " add #1, %0\n"
- "2:"
- : "=r" (__dest), "=r" (__src), "=&z" (__dummy)
- : "0" (__dest), "1" (__src), "r" (__src+__n)
- : "memory", "t");
-
- return __xdest;
-}
-
#define __HAVE_ARCH_STRCMP
static inline int strcmp(const char *__cs, const char *__ct)
{
@@ -127,6 +99,4 @@ extern void *memchr(const void *__s, int __c, size_t __n);
#define __HAVE_ARCH_STRLEN
extern size_t strlen(const char *);
-#endif /* __KERNEL__ */
-
#endif /* __ASM_SH_STRING_H */
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h
index 0b5b8e75edac..cb51a7528384 100644
--- a/arch/sh/include/asm/syscall_32.h
+++ b/arch/sh/include/asm/syscall_32.h
@@ -40,10 +40,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
- if (error)
- regs->regs[0] = -error;
- else
- regs->regs[0] = val;
+ regs->regs[0] = (long) error ?: val;
}
static inline void syscall_get_arguments(struct task_struct *task,
diff --git a/arch/sh/include/asm/syscalls_32.h b/arch/sh/include/asm/syscalls_32.h
index 9f9faf63b48c..5c555b864fe0 100644
--- a/arch/sh/include/asm/syscalls_32.h
+++ b/arch/sh/include/asm/syscalls_32.h
@@ -2,8 +2,6 @@
#ifndef __ASM_SH_SYSCALLS_32_H
#define __ASM_SH_SYSCALLS_32_H
-#ifdef __KERNEL__
-
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/types.h>
@@ -26,5 +24,4 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
unsigned long thread_info_flags);
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_SYSCALLS_32_H */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 6404be69d5fa..243ea5150aa0 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -10,8 +10,6 @@
* Copyright (C) 2002 David Howells (dhowells@redhat.com)
* - Incorporating suggestions made by Linus Torvalds and Dave Miller
*/
-#ifdef __KERNEL__
-
#include <asm/page.h>
/*
@@ -170,7 +168,4 @@ static inline unsigned int get_thread_fault_code(void)
}
#endif /* !__ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
#endif /* __ASM_SH_THREAD_INFO_H */
diff --git a/arch/sh/include/asm/uaccess_32.h b/arch/sh/include/asm/uaccess_32.h
index 624cf55acc27..5d7ddc092afd 100644
--- a/arch/sh/include/asm/uaccess_32.h
+++ b/arch/sh/include/asm/uaccess_32.h
@@ -26,6 +26,9 @@ do { \
case 4: \
__get_user_asm(x, ptr, retval, "l"); \
break; \
+ case 8: \
+ __get_user_u64(x, ptr, retval); \
+ break; \
default: \
__get_user_unknown(); \
break; \
@@ -66,6 +69,56 @@ do { \
extern void __get_user_unknown(void);
+#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+#define __get_user_u64(x, addr, err) \
+({ \
+__asm__ __volatile__( \
+ "1:\n\t" \
+ "mov.l %2,%R1\n\t" \
+ "mov.l %T2,%S1\n\t" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3:\n\t" \
+ "mov #0,%R1\n\t" \
+ "mov #0,%S1\n\t" \
+ "mov.l 4f, %0\n\t" \
+ "jmp @%0\n\t" \
+ " mov %3, %0\n\t" \
+ ".balign 4\n" \
+ "4: .long 2b\n\t" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".long 1b, 3b\n\t" \
+ ".long 1b + 2, 3b\n\t" \
+ ".previous" \
+ :"=&r" (err), "=&r" (x) \
+ :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); })
+#else
+#define __get_user_u64(x, addr, err) \
+({ \
+__asm__ __volatile__( \
+ "1:\n\t" \
+ "mov.l %2,%S1\n\t" \
+ "mov.l %T2,%R1\n\t" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3:\n\t" \
+ "mov #0,%S1\n\t" \
+ "mov #0,%R1\n\t" \
+ "mov.l 4f, %0\n\t" \
+ "jmp @%0\n\t" \
+ " mov %3, %0\n\t" \
+ ".balign 4\n" \
+ "4: .long 2b\n\t" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".long 1b, 3b\n\t" \
+ ".long 1b + 2, 3b\n\t" \
+ ".previous" \
+ :"=&r" (err), "=&r" (x) \
+ :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); })
+#endif
+
#define __put_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
diff --git a/arch/sh/include/asm/watchdog.h b/arch/sh/include/asm/watchdog.h
index cecd0fc507f9..b9ca4c99f046 100644
--- a/arch/sh/include/asm/watchdog.h
+++ b/arch/sh/include/asm/watchdog.h
@@ -8,7 +8,6 @@
*/
#ifndef __ASM_SH_WATCHDOG_H
#define __ASM_SH_WATCHDOG_H
-#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/io.h>
@@ -157,5 +156,4 @@ static inline void sh_wdt_write_csr(__u8 val)
__raw_writew((WTCSR_HIGH << 8) | (__u16)val, WTCSR);
}
#endif /* CONFIG_CPU_SUBTYPE_SH7785 || CONFIG_CPU_SUBTYPE_SH7780 */
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_WATCHDOG_H */