diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 11:09:27 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 11:09:27 -0700 |
commit | 76d3f4c27d3c2c85e5cfe731537b6929145bf652 (patch) | |
tree | 80d6dd04ad832122f69edaad710252d771e39c1b /arch/arc | |
parent | c1101cbc7db316dcdc94d344727fd372622d0ce7 (diff) | |
parent | baadb8fd0c62540f2ffb2d0f12b8a47c7975562b (diff) |
Merge tag 'arc-v3.11-rc1-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull first batch of ARC changes from Vineet Gupta:
"There's a second bunch to follow next week - which depends on commits
on other trees (irq/net). I'd have preferred the accompanying ARC
change via respective trees, but it didn't workout somehow.
Highlights of changes:
- Continuation of ARC MM changes from 3.10 including
zero page optimization
Setting pagecache pages dirty by default
Non executable stack by default
Reducing dcache flushes for aliasing VIPT config
- Long overdue rework of pt_regs machinery - removing the unused word
gutters and adding ECR register to baseline (helps cleanup lot of
low level code)
- Support for ARC gcc 4.8
- Few other preventive fixes, cosmetics, usage of Kconfig helper..
The diffstat is larger than normal primarily because of arcregs.h
header split as well as beautification of macros in entry.h"
* tag 'arc-v3.11-rc1-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (32 commits)
ARC: warn on improper stack unwind FDE entries
arc: delete __cpuinit usage from all arc files
ARC: [tlb-miss] Fix bug with CONFIG_ARC_DBG_TLB_MISS_COUNT
ARC: [tlb-miss] Extraneous PTE bit testing/setting
ARC: Adjustments for gcc 4.8
ARC: Setup Vector Table Base in early boot
ARC: Remove explicit passing around of ECR
ARC: pt_regs update #5: Use real ECR for pt_regs->event vs. synth values
ARC: stop using pt_regs->orig_r8
ARC: pt_regs update #4: r25 saved/restored unconditionally
ARC: K/U SP saved from one location in stack switching macro
ARC: Entry Handler tweaks: Simplify branch for in-kernel preemption
ARC: Entry Handler tweaks: Avoid hardcoded LIMMS for ECR values
ARC: Increase readability of entry handlers
ARC: pt_regs update #3: Remove unused gutter at start of callee_regs
ARC: pt_regs update #2: Remove unused gutter at start of pt_regs
ARC: pt_regs update #1: Align pt_regs end with end of kernel stack page
ARC: pt_regs update #0: remove kernel stack canary
ARC: [mm] Remove @write argument to do_page_fault()
ARC: [mm] Make stack/heap Non-executable by default
...
Diffstat (limited to 'arch/arc')
48 files changed, 670 insertions, 876 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 5917099470ea..4a0e54fc01b2 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -184,6 +184,7 @@ config ARC_CACHE_PAGES config ARC_CACHE_VIPT_ALIASING bool "Support VIPT Aliasing D$" + depends on ARC_HAS_DCACHE default n endif #ARC_CACHE @@ -361,13 +362,6 @@ config ARC_MISALIGN_ACCESS Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide potential bugs in code -config ARC_STACK_NONEXEC - bool "Make stack non-executable" - default n - help - To disable the execute permissions of stack/heap of processes - which are enabled by default. - config HZ int "Timer Frequency" default 100 diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 183397fd289e..8c0b1aa56f7e 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -9,25 +9,27 @@ UTS_MACHINE := arc ifeq ($(CROSS_COMPILE),) -CROSS_COMPILE := arc-elf32- +CROSS_COMPILE := arc-linux-uclibc- endif KBUILD_DEFCONFIG := fpga_defconfig cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__ -LINUXINCLUDE += -include ${src}/arch/arc/include/asm/defines.h - ifdef CONFIG_ARC_CURR_IN_REG # For a global register defintion, make sure it gets passed to every file # We had a customer reported bug where some code built in kernel was NOT using # any kernel headers, and missing the r25 global register -# Can't do unconditionally (like above) because of recursive include issues +# Can't do unconditionally because of recursive include issues # due to <linux/thread_info.h> LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h endif -atleast_gcc44 := $(call cc-ifversion, -gt, 0402, y) +upto_gcc42 := $(call cc-ifversion, -le, 0402, y) +upto_gcc44 := $(call cc-ifversion, -le, 0404, y) +atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y) +atleast_gcc48 := $(call cc-ifversion, -ge, 0408, y) + cflags-$(atleast_gcc44) += -fsection-anchors cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock @@ -35,6 +37,11 @@ cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape cflags-$(CONFIG_ARC_HAS_RTSC) += -mrtsc cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables +# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok +ifeq ($(atleast_gcc48),y) +cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2 +endif + ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE # Generic build system uses -O2, we want -O3 cflags-y += -O3 @@ -48,11 +55,10 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB -# STAR 9000518362: +# STAR 9000518362: (fixed with binutils shipping with gcc 4.8) # arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept -# --build-id w/o "-marclinux". -# Default arc-elf32-ld is OK -ldflags-y += -marclinux +# --build-id w/o "-marclinux". Default arc-elf32-ld is OK +ldflags-$(upto_gcc44) += -marclinux ARC_LIBGCC := -mA7 cflags-$(CONFIG_ARC_HAS_HW_MPY) += -multcost=16 @@ -66,8 +72,8 @@ ifndef CONFIG_ARC_HAS_HW_MPY # With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments, # e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted - ARC_LIBGCC := -marc600 - ifneq ($(atleast_gcc44),y) + ifeq ($(upto_gcc42),y) + ARC_LIBGCC := -marc600 cflags-y += -multcost=30 endif endif diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/fpga_defconfig index 95350be6ef6f..c109af320274 100644 --- a/arch/arc/configs/fpga_defconfig +++ b/arch/arc/configs/fpga_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-elf32-" +CONFIG_CROSS_COMPILE="arc-linux-uclibc-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index 446c96c24eff..451af30914f6 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-elf32-" +CONFIG_CROSS_COMPILE="arc-linux-uclibc-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index 4fa5cd9f2202..6be6492442d6 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-elf32-" +CONFIG_CROSS_COMPILE="arc-linux-uclibc-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="tb10x" CONFIG_SYSVIPC=y diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 1b907c465666..355cb470c2a4 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -20,7 +20,6 @@ #define ARC_REG_PERIBASE_BCR 0x69 #define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */ #define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */ -#define ARC_REG_MMU_BCR 0x6f #define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */ #define ARC_REG_TIMERS_BCR 0x75 #define ARC_REG_ICCM_BCR 0x78 @@ -34,22 +33,12 @@ #define ARC_REG_D_UNCACH_BCR 0x6A /* status32 Bits Positions */ -#define STATUS_H_BIT 0 /* CPU Halted */ -#define STATUS_E1_BIT 1 /* Int 1 enable */ -#define STATUS_E2_BIT 2 /* Int 2 enable */ -#define STATUS_A1_BIT 3 /* Int 1 active */ -#define STATUS_A2_BIT 4 /* Int 2 active */ #define STATUS_AE_BIT 5 /* Exception active */ #define STATUS_DE_BIT 6 /* PC is in delay slot */ #define STATUS_U_BIT 7 /* User/Kernel mode */ #define STATUS_L_BIT 12 /* Loop inhibit */ /* These masks correspond to the status word(STATUS_32) bits */ -#define STATUS_H_MASK (1<<STATUS_H_BIT) -#define STATUS_E1_MASK (1<<STATUS_E1_BIT) -#define STATUS_E2_MASK (1<<STATUS_E2_BIT) -#define STATUS_A1_MASK (1<<STATUS_A1_BIT) -#define STATUS_A2_MASK (1<<STATUS_A2_BIT) #define STATUS_AE_MASK (1<<STATUS_AE_BIT) #define STATUS_DE_MASK (1<<STATUS_DE_BIT) #define STATUS_U_MASK (1<<STATUS_U_BIT) @@ -71,6 +60,7 @@ #define ECR_V_ITLB_MISS 0x21 #define ECR_V_DTLB_MISS 0x22 #define ECR_V_PROTV 0x23 +#define ECR_V_TRAP 0x25 /* Protection Violation Exception Cause Code Values */ #define ECR_C_PROTV_INST_FETCH 0x00 @@ -79,94 +69,23 @@ #define ECR_C_PROTV_XCHG 0x03 #define ECR_C_PROTV_MISALIG_DATA 0x04 +#define ECR_C_BIT_PROTV_MISALIG_DATA 10 + +/* Machine Check Cause Code Values */ +#define ECR_C_MCHK_DUP_TLB 0x01 + /* DTLB Miss Exception Cause Code Values */ #define ECR_C_BIT_DTLB_LD_MISS 8 #define ECR_C_BIT_DTLB_ST_MISS 9 +/* Dummy ECR values for Interrupts */ +#define event_IRQ1 0x0031abcd +#define event_IRQ2 0x0032abcd /* Auxiliary registers */ #define AUX_IDENTITY 4 #define AUX_INTR_VEC_BASE 0x25 -#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */ -#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */ -#define AUX_IRQ_LV12 0x43 /* interrupt level register */ - -#define AUX_IENABLE 0x40c -#define AUX_ITRIGGER 0x40d -#define AUX_IPULSE 0x415 - -/* Timer related Aux registers */ -#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */ -#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */ -#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */ -#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */ -#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */ -#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */ - -#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */ -#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */ - -/* MMU Management regs */ -#define ARC_REG_TLBPD0 0x405 -#define ARC_REG_TLBPD1 0x406 -#define ARC_REG_TLBINDEX 0x407 -#define ARC_REG_TLBCOMMAND 0x408 -#define ARC_REG_PID 0x409 -#define ARC_REG_SCRATCH_DATA0 0x418 - -/* Bits in MMU PID register */ -#define MMU_ENABLE (1 << 31) /* Enable MMU for process */ - -/* Error code if probe fails */ -#define TLB_LKUP_ERR 0x80000000 - -/* TLB Commands */ -#define TLBWrite 0x1 -#define TLBRead 0x2 -#define TLBGetIndex 0x3 -#define TLBProbe 0x4 - -#if (CONFIG_ARC_MMU_VER >= 2) -#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */ -#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */ -#else -#undef TLBWriteNI /* These cmds don't exist on older MMU */ -#undef TLBIVUTLB -#endif -/* Instruction cache related Auxiliary registers */ -#define ARC_REG_IC_BCR 0x77 /* Build Config reg */ -#define ARC_REG_IC_IVIC 0x10 -#define ARC_REG_IC_CTRL 0x11 -#define ARC_REG_IC_IVIL 0x19 -#if (CONFIG_ARC_MMU_VER > 2) -#define ARC_REG_IC_PTAG 0x1E -#endif - -/* Bit val in IC_CTRL */ -#define IC_CTRL_CACHE_DISABLE 0x1 - -/* Data cache related Auxiliary registers */ -#define ARC_REG_DC_BCR 0x72 -#define ARC_REG_DC_IVDC 0x47 -#define ARC_REG_DC_CTRL 0x48 -#define ARC_REG_DC_IVDL 0x4A -#define ARC_REG_DC_FLSH 0x4B -#define ARC_REG_DC_FLDL 0x4C -#if (CONFIG_ARC_MMU_VER > 2) -#define ARC_REG_DC_PTAG 0x5C -#endif - -/* Bit val in DC_CTRL */ -#define DC_CTRL_INV_MODE_FLUSH 0x40 -#define DC_CTRL_FLUSH_STATUS 0x100 - -/* MMU Management regs */ -#define ARC_REG_PID 0x409 -#define ARC_REG_SCRATCH_DATA0 0x418 - -/* Bits in MMU PID register */ -#define MMU_ENABLE (1 << 31) /* Enable MMU for process */ /* * Floating Pt Registers @@ -293,24 +212,6 @@ struct bcr_identity { #endif }; -struct bcr_mmu_1_2 { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8; -#else - unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8; -#endif -}; - -struct bcr_mmu_3 { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4, - u_itlb:4, u_dtlb:4; -#else - unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4, - ways:4, ver:8; -#endif -}; - #define EXTN_SWAP_VALID 0x1 #define EXTN_NORM_VALID 0x2 #define EXTN_MINMAX_VALID 0x2 @@ -343,14 +244,6 @@ struct bcr_extn_xymem { #endif }; -struct bcr_cache { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; -#else - unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; -#endif -}; - struct bcr_perip { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int start:8, pad2:8, sz:8, pad:8; @@ -403,7 +296,7 @@ struct cpuinfo_arc_mmu { }; struct cpuinfo_arc_cache { - unsigned int has_aliasing, sz, line_len, assoc, ver; + unsigned int sz, line_len, assoc, ver; }; struct cpuinfo_arc_ccm { diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h index 2ad8f9b1c54b..5b18e94c6678 100644 --- a/arch/arc/include/asm/bug.h +++ b/arch/arc/include/asm/bug.h @@ -18,9 +18,8 @@ struct task_struct; void show_regs(struct pt_regs *regs); void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs); void show_kernel_fault_diag(const char *str, struct pt_regs *regs, - unsigned long address, unsigned long cause_reg); -void die(const char *str, struct pt_regs *regs, unsigned long address, - unsigned long cause_reg); + unsigned long address); +void die(const char *str, struct pt_regs *regs, unsigned long address); #define BUG() do { \ dump_stack(); \ diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index d5555fe4742a..5802849a6cae 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -18,21 +18,19 @@ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define ARC_ICACHE_WAYS 2 -#define ARC_DCACHE_WAYS 4 - -/* Helpers */ +/* For a rare case where customers have differently config I/D */ #define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES #define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES #define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1)) #define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1)) -#if ARC_ICACHE_LINE_LEN != ARC_DCACHE_LINE_LEN -#error "Need to fix some code as I/D cache lines not same" -#else -#define is_not_cache_aligned(p) ((unsigned long)p & (~DCACHE_LINE_MASK)) -#endif +/* + * ARC700 doesn't cache any access in top 256M. + * Ideal for wiring memory mapped peripherals as we don't need to do + * explicit uncached accesses (LD.di/ST.di) hence more portable drivers + */ +#define ARC_UNCACHED_ADDR_SPACE 0xc0000000 #ifndef __ASSEMBLY__ @@ -57,16 +55,10 @@ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES -/* - * ARC700 doesn't cache any access in top 256M. - * Ideal for wiring memory mapped peripherals as we don't need to do - * explicit uncached accesses (LD.di/ST.di) hence more portable drivers - */ -#define ARC_UNCACHED_ADDR_SPACE 0xc0000000 - extern void arc_cache_init(void); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern void __init read_decode_cache_bcr(void); -#endif + +#endif /* !__ASSEMBLY__ */ #endif /* _ASM_CACHE_H */ diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h index ef62682e8d95..6abc4972bc93 100644 --- a/arch/arc/include/asm/cacheflush.h +++ b/arch/arc/include/asm/cacheflush.h @@ -81,16 +81,19 @@ void flush_anon_page(struct vm_area_struct *vma, #endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ /* + * A new pagecache page has PG_arch_1 clear - thus dcache dirty by default + * This works around some PIO based drivers which don't call flush_dcache_page + * to record that they dirtied the dcache + */ +#define PG_dc_clean PG_arch_1 + +/* * Simple wrapper over config option * Bootup code ensures that hardware matches kernel configuration */ static inline int cache_is_vipt_aliasing(void) { -#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING - return 1; -#else - return 0; -#endif + return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); } #define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1) diff --git a/arch/arc/include/asm/defines.h b/arch/arc/include/asm/defines.h deleted file mode 100644 index 6097bb439cc5..000000000000 --- a/arch/arc/include/asm/defines.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __ARC_ASM_DEFINES_H__ -#define __ARC_ASM_DEFINES_H__ - -#if defined(CONFIG_ARC_MMU_V1) -#define CONFIG_ARC_MMU_VER 1 -#elif defined(CONFIG_ARC_MMU_V2) -#define CONFIG_ARC_MMU_VER 2 -#elif defined(CONFIG_ARC_MMU_V3) -#define CONFIG_ARC_MMU_VER 3 -#endif - -#ifdef CONFIG_ARC_HAS_LLSC -#define __CONFIG_ARC_HAS_LLSC_VAL 1 -#else -#define __CONFIG_ARC_HAS_LLSC_VAL 0 -#endif - -#ifdef CONFIG_ARC_HAS_SWAPE -#define __CONFIG_ARC_HAS_SWAPE_VAL 1 -#else -#define __CONFIG_ARC_HAS_SWAPE_VAL 0 -#endif - -#ifdef CONFIG_ARC_HAS_RTSC -#define __CONFIG_ARC_HAS_RTSC_VAL 1 -#else -#define __CONFIG_ARC_HAS_RTSC_VAL 0 -#endif - -#ifdef CONFIG_ARC_MMU_SASID -#define __CONFIG_ARC_MMU_SASID_VAL 1 -#else -#define __CONFIG_ARC_MMU_SASID_VAL 0 -#endif - -#ifdef CONFIG_ARC_HAS_ICACHE -#define __CONFIG_ARC_HAS_ICACHE 1 -#else -#define __CONFIG_ARC_HAS_ICACHE 0 -#endif - -#ifdef CONFIG_ARC_HAS_DCACHE -#define __CONFIG_ARC_HAS_DCACHE 1 -#else -#define __CONFIG_ARC_HAS_DCACHE 0 -#endif - -#endif /* __ARC_ASM_DEFINES_H__ */ diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h index eb2ae53187d9..8943c028d4bb 100644 --- a/arch/arc/include/asm/entry.h +++ b/arch/arc/include/asm/entry.h @@ -50,194 +50,177 @@ * Eff Addr for load = [reg2] */ +.macro PUSH reg + st.a \reg, [sp, -4] +.endm + +.macro PUSHAX aux + lr r9, [\aux] + PUSH r9 +.endm + +.macro POP reg + ld.ab \reg, [sp, 4] +.endm + +.macro POPAX aux + POP r9 + sr r9, [\aux] +.endm + /*-------------------------------------------------------------- - * Save caller saved registers (scratch registers) ( r0 - r12 ) - * Registers are pushed / popped in the order defined in struct ptregs - * in asm/ptrace.h + * Helpers to save/restore Scratch Regs: + * used by Interrupt/Exception Prologue/Epilogue *-------------------------------------------------------------*/ -.macro SAVE_CALLER_SAVED - st.a r0, [sp, -4] - st.a r1, [sp, -4] - st.a r2, [sp, -4] - st.a r3, [sp, -4] - st.a r4, [sp, -4] - st.a r5, [sp, -4] - st.a r6, [sp, -4] - st.a r7, [sp, -4] - st.a r8, [sp, -4] - st.a r9, [sp, -4] - st.a r10, [sp, -4] - st.a r11, [sp, -4] - st.a r12, [sp, -4] +.macro SAVE_R0_TO_R12 + PUSH r0 + PUSH r1 + PUSH r2 + PUSH r3 + PUSH r4 + PUSH r5 + PUSH r6 + PUSH r7 + PUSH r8 + PUSH r9 + PUSH r10 + PUSH r11 + PUSH r12 +.endm + +.macro RESTORE_R12_TO_R0 + POP r12 + POP r11 + POP r10 + POP r9 + POP r8 + POP r7 + POP r6 + POP r5 + POP r4 + POP r3 + POP r2 + POP r1 + POP r0 + +#ifdef CONFIG_ARC_CURR_IN_REG + ld r25, [sp, 12] +#endif .endm /*-------------------------------------------------------------- - * Restore caller saved registers (scratch registers) + * Helpers to save/restore callee-saved regs: + * used by several macros below *-------------------------------------------------------------*/ -.macro RESTORE_CALLER_SAVED - ld.ab r12, [sp, 4] - ld.ab r11, [sp, 4] - ld.ab r10, [sp, 4] - ld.ab r9, [sp, 4] - ld.ab r8, [sp, 4] - ld.ab r7, [sp, 4] - ld.ab r6, [sp, 4] - ld.ab r5, [sp, 4] - ld.ab r4, [sp, 4] - ld.ab r3, [sp, 4] - ld.ab r2, [sp, 4] - ld.ab r1, [sp, 4] - ld.ab r0, [sp, 4] +.macro SAVE_R13_TO_R24 + PUSH r13 + PUSH r14 + PUSH r15 + PUSH r16 + PUSH r17 + PUSH r18 + PUSH r19 + PUSH r20 + PUSH r21 + PUSH r22 + PUSH r23 + PUSH r24 +.endm + +.macro RESTORE_R24_TO_R13 + POP r24 + POP r23 + POP r22 + POP r21 + POP r20 + POP r19 + POP r18 + POP r17 + POP r16 + POP r15 + POP r14 + POP r13 .endm +#define OFF_USER_R25_FROM_R24 (SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4 /*-------------------------------------------------------------- - * Save callee saved registers (non scratch registers) ( r13 - r25 ) - * on kernel stack. - * User mode callee regs need to be saved in case of - * -fork and friends for replicating from parent to child - * -before going into do_signal( ) for ptrace/core-dump - * Special case handling is required for r25 in case it is used by kernel - * for caching task ptr. Low level exception/ISR save user mode r25 - * into task->thread.user_r25. So it needs to be retrieved from there and - * saved into kernel stack with rest of callee reg-file + * Collect User Mode callee regs as struct callee_regs - needed by + * fork/do_signal/unaligned-access-emulation. + * (By default only scratch regs are saved on entry to kernel) + * + * Special handling for r25 if used for caching Task Pointer. + * It would have been saved in task->thread.user_r25 already, but to keep + * the interface same it is copied into regular r25 placeholder in + * struct callee_regs. *-------------------------------------------------------------*/ .macro SAVE_CALLEE_SAVED_USER - st.a r13, [sp, -4] - st.a r14, [sp, -4] - st.a r15, [sp, -4] - st.a r16, [sp, -4] - st.a r17, [sp, -4] - st.a r18, [sp, -4] - st.a r19, [sp, -4] - st.a r20, [sp, -4] - st.a r21, [sp, -4] - st.a r22, [sp, -4] - st.a r23, [sp, -4] - st.a r24, [sp, -4] + + SAVE_R13_TO_R24 #ifdef CONFIG_ARC_CURR_IN_REG ; Retrieve orig r25 and save it on stack - ld r12, [r25, TASK_THREAD + THREAD_USER_R25] + ld.as r12, [sp, OFF_USER_R25_FROM_R24] st.a r12, [sp, -4] #else - st.a r25, [sp, -4] + PUSH r25 #endif - /* move up by 1 word to "create" callee_regs->"stack_place_holder" */ - sub sp, sp, 4 .endm /*-------------------------------------------------------------- - * Save callee saved registers (non scratch registers) ( r13 - r25 ) - * kernel mode callee regs needed to be saved in case of context switch - * If r25 is used for caching task pointer then that need not be saved - * as it can be re-created from current task global + * Save kernel Mode callee regs at the time of Contect Switch. + * + * Special handling for r25 if used for caching Task Pointer. + * Kernel simply skips saving it since it will be loaded with + * incoming task pointer anyways *-------------------------------------------------------------*/ .macro SAVE_CALLEE_SAVED_KERNEL - st.a r13, [sp, -4] - st.a r14, [sp, -4] - st.a r15, [sp, -4] - st.a r16, [sp, -4] - st.a r17, [sp, -4] - st.a r18, [sp, -4] - st.a r19, [sp, -4] - st.a r20, [sp, -4] - st.a r21, [sp, -4] - st.a r22, [sp, -4] - st.a r23, [sp, -4] - st.a r24, [sp, -4] + + SAVE_R13_TO_R24 + #ifdef CONFIG_ARC_CURR_IN_REG - sub sp, sp, 8 -#else - st.a r25, [sp, -4] sub sp, sp, 4 +#else + PUSH r25 #endif .endm /*-------------------------------------------------------------- - * RESTORE_CALLEE_SAVED_KERNEL: - * Loads callee (non scratch) Reg File by popping from Kernel mode stack. - * This is reverse of SAVE_CALLEE_SAVED, - * - * NOTE: - * Ideally this shd only be called in switch_to for loading - * switched-IN task's CALLEE Reg File. - * For all other cases RESTORE_CALLEE_SAVED_FAST must be used - * which simply pops the stack w/o touching regs. + * Opposite of SAVE_CALLEE_SAVED_KERNEL *-------------------------------------------------------------*/ .macro RESTORE_CALLEE_SAVED_KERNEL - #ifdef CONFIG_ARC_CURR_IN_REG - add sp, sp, 8 /* skip callee_reg gutter and user r25 placeholder */ + add sp, sp, 4 /* skip usual r25 placeholder */ #else - add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */ - ld.ab r25, [sp, 4] + POP r25 #endif - - ld.ab r24, [sp, 4] - ld.ab r23, [sp, 4] - ld.ab r22, [sp, 4] - ld.ab r21, [sp, 4] - ld.ab r20, [sp, 4] - ld.ab r19, [sp, 4] - ld.ab r18, [sp, 4] - ld.ab r17, [sp, 4] - ld.ab r16, [sp, 4] - ld.ab r15, [sp, 4] - ld.ab r14, [sp, 4] - ld.ab r13, [sp, 4] - + RESTORE_R24_TO_R13 .endm /*-------------------------------------------------------------- - * RESTORE_CALLEE_SAVED_USER: - * This is called after do_signal where tracer might have changed callee regs - * thus we need to restore the reg file. - * Special case handling is required for r25 in case it is used by kernel - * for caching task ptr. Ptrace would have modified on-kernel-stack value of - * r25, which needs to be shoved back into task->thread.user_r25 where from - * Low level exception/ISR return code will retrieve to populate with rest of - * callee reg-file. + * Opposite of SAVE_CALLEE_SAVED_USER + * + * ptrace tracer or unaligned-access fixup might have changed a user mode + * callee reg which is saved back to usual r25 storage location *-------------------------------------------------------------*/ .macro RESTORE_CALLEE_SAVED_USER - add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */ - #ifdef CONFIG_ARC_CURR_IN_REG ld.ab r12, [sp, 4] - st r12, [r25, TASK_THREAD + THREAD_USER_R25] + st.as r12, [sp, OFF_USER_R25_FROM_R24] #else - ld.ab r25, [sp, 4] + POP r25 #endif - - ld.ab r24, [sp, 4] - ld.ab r23, [sp, 4] - ld.ab r22, [sp, 4] - ld.ab r21, [sp, 4] - ld.ab r20, [sp, 4] - ld.ab r19, [sp, 4] - ld.ab r18, [sp, 4] - ld.ab r17, [sp, 4] - ld.ab r16, [sp, 4] - ld.ab r15, [sp, 4] - ld.ab r14, [sp, 4] - ld.ab r13, [sp, 4] + RESTORE_R24_TO_R13 .endm /*-------------------------------------------------------------- * Super FAST Restore callee saved regs by simply re-adjusting SP *-------------------------------------------------------------*/ .macro DISCARD_CALLEE_SAVED_USER - add sp, sp, 14 * 4 -.endm - -/*-------------------------------------------------------------- - * Restore User mode r25 saved in task_struct->thread.user_r25 - *-------------------------------------------------------------*/ -.macro RESTORE_USER_R25 - ld r25, [r25, TASK_THREAD + THREAD_USER_R25] + add sp, sp, SZ_CALLEE_REGS .endm /*------------------------------------------------------------- @@ -252,7 +235,7 @@ ld \out, [\tsk, TASK_THREAD_INFO] /* Go to end of page where stack begins (grows upwards) */ - add2 \out, \out, (THREAD_SIZE - 4)/4 /* one word GUTTER */ + add2 \out, \out, (THREAD_SIZE)/4 .endm @@ -305,33 +288,28 @@ * safe-keeping not really needed, but it keeps the epilogue code * (SP restore) simpler/uniform. */ - b.d 77f - - st.a sp, [sp, -12] ; Make room for orig_r0 and orig_r8 + b.d 66f + mov r9, sp 88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */ GET_CURR_TASK_ON_CPU r9 -#ifdef CONFIG_ARC_CURR_IN_REG - - /* If current task pointer cached in r25, time to - * -safekeep USER r25 in task->thread_struct->user_r25 - * -load r25 with current task ptr - */ - st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4] - mov r25, r9 -#endif - /* With current tsk in r9, get it's kernel mode stack base */ GET_TSK_STACK_BASE r9, r9 -#ifdef PT_REGS_CANARY - st 0xabcdabcd, [r9, 0] +66: +#ifdef CONFIG_ARC_CURR_IN_REG + /* + * Treat r25 as scratch reg, save it on stack first + * Load it with current task pointer + */ + st r25, [r9, -4] + GET_CURR_TASK_ON_CPU r25 #endif /* Save Pre Intr/Exception User SP on kernel stack */ - st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8 + st.a sp, [r9, -16] ; Make room for orig_r0, ECR, user_r25 /* CAUTION: * SP should be set at the very end when we are done with everything @@ -342,7 +320,7 @@ /* set SP to point to kernel mode stack */ mov sp, r9 -77: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */ + /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */ .endm @@ -369,7 +347,7 @@ * @reg [OUT] &thread_info of "current" */ .macro GET_CURR_THR_INFO_FROM_SP reg - and \reg, sp, ~(THREAD_SIZE - 1) + bic \reg, sp, (THREAD_SIZE - 1) .endm /* @@ -413,62 +391,25 @@ * Note that syscalls are implemented via TRAP which is also a exception * from CPU's point of view *-------------------------------------------------------------*/ -.macro SAVE_ALL_EXCEPTION marker +.macro SAVE_ALL_SYS - st \marker, [sp, 8] /* orig_r8 */ + lr r9, [ecr] + st r9, [sp, 8] /* ECR */ st r0, [sp, 4] /* orig_r0, needed only for sys calls */ /* Restore r9 used to code the early prologue */ EXCPN_PROLOG_RESTORE_REG r9 - SAVE_CALLER_SAVED - st.a r26, [sp, -4] /* gp */ - st.a fp, [sp, -4] - st.a blink, [sp, -4] - lr r9, [eret] - st.a r9, [sp, -4] - lr r9, [erstatus] - st.a r9, [sp, -4] - st.a lp_count, [sp, -4] - lr r9, [lp_end] - st.a r9, [sp, -4] - lr r9, [lp_start] - st.a r9, [sp, -4] - lr r9, [erbta] - st.a r9, [sp, -4] - -#ifdef PT_REGS_CANARY - mov r9, 0xdeadbeef - st r9, [sp, -4] -#endif - - /* move up by 1 word to "create" pt_regs->"stack_place_holder" */ - sub sp, sp, 4 -.endm - -/*-------------------------------------------------------------- - * Save scratch regs for exceptions - *-------------------------------------------------------------*/ -.macro SAVE_ALL_SYS - SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN -.endm - -/*-------------------------------------------------------------- - * Save scratch regs for sys calls - *-------------------------------------------------------------*/ -.macro SAVE_ALL_TRAP - /* - * Setup pt_regs->orig_r8. - * Encode syscall number (r8) in upper short word of event type (r9) - * N.B. #1: This is already endian safe (see ptrace.h) - * #2: Only r9 can be used as scratch as it is already clobbered - * and it's contents are no longer needed by the latter part - * of exception prologue - */ - lsl r9, r8, 16 - or r9, r9, orig_r8_IS_SCALL - - SAVE_ALL_EXCEPTION r9 + SAVE_R0_TO_R12 + PUSH gp + PUSH fp + PUSH blink + PUSHAX eret + PUSHAX erstatus + PUSH lp_count + PUSHAX lp_end + PUSHAX lp_start + PUSHAX erbta .endm /*-------------------------------------------------------------- @@ -483,28 +424,22 @@ * by hardware and that is not good. *-------------------------------------------------------------*/ .macro RESTORE_ALL_SYS + POPAX erbta + POPAX lp_start + POPAX lp_end + + POP r9 + mov lp_count, r9 ;LD to lp_count is not allowed - add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */ - - ld.ab r9, [sp, 4] - sr r9, [erbta] - ld.ab r9, [sp, 4] - sr r9, [lp_start] - ld.ab r9, [sp, 4] - sr r9, [lp_end] - ld.ab r9, [sp, 4] - mov lp_count, r9 - ld.ab r9, [sp, 4] - sr r9, [erstatus] - ld.ab r9, [sp, 4] - sr r9, [eret] - ld.ab blink, [sp, 4] - ld.ab fp, [sp, 4] - ld.ab r26, [sp, 4] /* gp */ - RESTORE_CALLER_SAVED + POPAX erstatus + POPAX eret + POP blink + POP fp + POP gp + RESTORE_R12_TO_R0 ld sp, [sp] /* restore original sp */ - /* orig_r0 and orig_r8 skipped automatically */ + /* orig_r0, ECR, user_r25 skipped automatically */ .endm @@ -513,9 +448,7 @@ *-------------------------------------------------------------*/ .macro SAVE_ALL_INT1 - /* restore original r9 , saved in int1_saved_reg - * It will be saved on stack in macro: SAVE_CALLER_SAVED - */ + /* restore original r9 to be saved as part of reg-file */ #ifdef CONFIG_SMP lr r9, [ARC_REG_SCRATCH_DATA0] #else @@ -523,29 +456,19 @@ #endif /* now we are ready to save the remaining context :) */ - st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */ + st event_IRQ1, [sp, 8] /* Dummy ECR */ st 0, [sp, 4] /* orig_r0 , N/A for IRQ */ - SAVE_CALLER_SAVED - st.a r26, [sp, -4] /* gp */ - st.a fp, [sp, -4] - st.a blink, [sp, -4] - st.a ilink1, [sp, -4] - lr r9, [status32_l1] - st.a r9, [sp, -4] - st.a lp_count, [sp, -4] - lr r9, [lp_end] - st.a r9, [sp, -4] - lr r9, [lp_start] - st.a r9, [sp, -4] - lr r9, [bta_l1] - st.a r9, [sp, -4] - -#ifdef PT_REGS_CANARY - mov r9, 0xdeadbee1 - st r9, [sp, -4] -#endif - /* move up by 1 word to "create" pt_regs->"stack_place_holder" */ - sub sp, sp, 4 + + SAVE_R0_TO_R12 + PUSH gp + PUSH fp + PUSH blink + PUSH ilink1 + PUSHAX status32_l1 + PUSH lp_count + PUSHAX lp_end + PUSHAX lp_start + PUSHAX bta_l1 .endm .macro SAVE_ALL_INT2 @@ -558,30 +481,19 @@ ld r9, [@int2_saved_reg] /* now we are ready to save the remaining context :) */ - st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */ + st event_IRQ2, [sp, 8] /* Dummy ECR */ st 0, [sp, 4] /* orig_r0 , N/A for IRQ */ - SAVE_CALLER_SAVED - st.a r26, [sp, -4] /* gp */ - st.a fp, [sp, -4] - st.a blink, [sp, -4] - st.a ilink2, [sp, -4] - lr r9, [status32_l2] - st.a r9, [sp, -4] - st.a lp_count, [sp, -4] - lr r9, [lp_end] - st.a r9, [sp, -4] - lr r9, [lp_start] - st.a r9, [sp, -4] - lr r9, [bta_l2] - st.a r9, [sp, -4] - -#ifdef PT_REGS_CANARY - mov r9, 0xdeadbee2 - st r9, [sp, -4] -#endif - /* move up by 1 word to "create" pt_regs->"stack_place_holder" */ - sub sp, sp, 4 + SAVE_R0_TO_R12 + PUSH gp + PUSH fp + PUSH blink + PUSH ilink2 + PUSHAX status32_l2 + PUSH lp_count + PUSHAX lp_end + PUSHAX lp_start + PUSHAX bta_l2 .endm /*-------------------------------------------------------------- @@ -595,52 +507,41 @@ *-------------------------------------------------------------*/ .macro RESTORE_ALL_INT1 - add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */ - - ld.ab r9, [sp, 4] /* Actual reg file */ - sr r9, [bta_l1] - ld.ab r9, [sp, 4] - sr r9, [lp_start] - ld.ab r9, [sp, 4] - sr r9, [lp_end] - ld.ab r9, [sp, 4] - mov lp_count, r9 - ld.ab r9, [sp, 4] - sr r9, [status32_l1] - ld.ab r9, [sp, 4] - mov ilink1, r9 - ld.ab blink, [sp, 4] - ld.ab fp, [sp, 4] - ld.ab r26, [sp, 4] /* gp */ - RESTORE_CALLER_SAVED + POPAX bta_l1 + POPAX lp_start + POPAX lp_end + + POP r9 + mov lp_count, r9 ;LD to lp_count is not allowed + + POPAX status32_l1 + POP ilink1 + POP blink + POP fp + POP gp + RESTORE_R12_TO_R0 ld sp, [sp] /* restore original sp */ - /* orig_r0 and orig_r8 skipped automatically */ + /* orig_r0, ECR, user_r25 skipped automatically */ .endm .macro RESTORE_ALL_INT2 - add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */ - - ld.ab r9, [sp, 4] - sr r9, [bta_l2] - ld.ab r9, [sp, 4] - sr r9, [lp_start] - ld.ab r9, [sp, 4] - sr r9, [lp_end] - ld.ab r9, [sp, 4] - mov lp_count, r9 - ld.ab r9, [sp, 4] - sr r9, [status32_l2] - ld.ab r9, [sp, 4] - mov ilink2, r9 - ld.ab blink, [sp, 4] - ld.ab fp, [sp, 4] - ld.ab r26, [sp, 4] /* gp */ - RESTORE_CALLER_SAVED + POPAX bta_l2 + POPAX lp_start + POPAX lp_end - ld sp, [sp] /* restore original sp */ - /* orig_r0 and orig_r8 skipped automatically */ + POP r9 + mov lp_count, r9 ;LD to lp_count is not allowed + POPAX status32_l2 + POP ilink2 + POP blink + POP fp + POP gp + RESTORE_R12_TO_R0 + + ld sp, [sp] /* restore original sp */ + /* orig_r0, ECR, user_r25 skipped automatically */ .endm diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h index 57898a17eb82..c0a72105ee0b 100644 --- a/arch/arc/include/asm/irq.h +++ b/arch/arc/include/asm/irq.h @@ -21,6 +21,6 @@ extern void __init arc_init_IRQ(void); extern int __init get_hw_config_num_irq(void); -void __cpuinit arc_local_timer_setup(unsigned int cpu); +void arc_local_timer_setup(unsigned int cpu); #endif diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index eac071668201..d99f79bcf865 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h @@ -19,6 +19,26 @@ #include <asm/arcregs.h> +/* status32 Reg bits related to Interrupt Handling */ +#define STATUS_E1_BIT 1 /* Int 1 enable */ +#define STATUS_E2_BIT 2 /* Int 2 enable */ +#define STATUS_A1_BIT 3 /* Int 1 active */ +#define STATUS_A2_BIT 4 /* Int 2 active */ + +#define STATUS_E1_MASK (1<<STATUS_E1_BIT) +#define STATUS_E2_MASK (1<<STATUS_E2_BIT) +#define STATUS_A1_MASK (1<<STATUS_A1_BIT) +#define STATUS_A2_MASK (1<<STATUS_A2_BIT) + +/* Other Interrupt Handling related Aux regs */ +#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */ +#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */ +#define AUX_IRQ_LV12 0x43 /* interrupt level register */ + +#define AUX_IENABLE 0x40c +#define AUX_ITRIGGER 0x40d +#define AUX_IPULSE 0x415 + #ifndef __ASSEMBLY__ /****************************************************************** diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h index 4930957ca3d3..b65fca7ffeb5 100644 --- a/arch/arc/include/asm/kgdb.h +++ b/arch/arc/include/asm/kgdb.h @@ -31,7 +31,7 @@ static inline void arch_kgdb_breakpoint(void) __asm__ __volatile__ ("trap_s 0x4\n"); } -extern void kgdb_trap(struct pt_regs *regs, int param); +extern void kgdb_trap(struct pt_regs *regs); enum arc700_linux_regnums { _R0 = 0, @@ -53,7 +53,7 @@ enum arc700_linux_regnums { }; #else -#define kgdb_trap(regs, param) +#define kgdb_trap(regs) #endif #endif /* __ARC_KGDB_H__ */ diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h index 4d9c211fce70..944dbedb38b5 100644 --- a/arch/arc/include/asm/kprobes.h +++ b/arch/arc/include/asm/kprobes.h @@ -50,11 +50,9 @@ struct kprobe_ctlblk { int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause); void kretprobe_trampoline(void); -void trap_is_kprobe(unsigned long cause, unsigned long address, - struct pt_regs *regs); +void trap_is_kprobe(unsigned long address, struct pt_regs *regs); #else -static void trap_is_kprobe(unsigned long cause, unsigned long address, - struct pt_regs *regs) +static void trap_is_kprobe(unsigned long address, struct pt_regs *regs) { } #endif diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h index 56b02320f1a9..7c03fe61759c 100644 --- a/arch/arc/include/asm/mmu.h +++ b/arch/arc/include/asm/mmu.h @@ -9,6 +9,40 @@ #ifndef _ASM_ARC_MMU_H #define _ASM_ARC_MMU_H +#if defined(CONFIG_ARC_MMU_V1) +#define CONFIG_ARC_MMU_VER 1 +#elif defined(CONFIG_ARC_MMU_V2) +#define CONFIG_ARC_MMU_VER 2 +#elif defined(CONFIG_ARC_MMU_V3) +#define CONFIG_ARC_MMU_VER 3 +#endif + +/* MMU Management regs */ +#define ARC_REG_MMU_BCR 0x06f +#define ARC_REG_TLBPD0 0x405 +#define ARC_REG_TLBPD1 0x406 +#define ARC_REG_TLBINDEX 0x407 +#define ARC_REG_TLBCOMMAND 0x408 +#define ARC_REG_PID 0x409 +#define ARC_REG_SCRATCH_DATA0 0x418 + +/* Bits in MMU PID register */ +#define MMU_ENABLE (1 << 31) /* Enable MMU for process */ + +/* Error code if probe fails */ +#define TLB_LKUP_ERR 0x80000000 + +/* TLB Commands */ +#define TLBWrite 0x1 +#define TLBRead 0x2 +#define TLBGetIndex 0x3 +#define TLBProbe 0x4 + +#if (CONFIG_ARC_MMU_VER >= 2) +#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */ +#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */ +#endif + #ifndef __ASSEMBLY__ typedef struct { @@ -18,6 +52,16 @@ typedef struct { #endif } mm_context_t; +#ifdef CONFIG_ARC_DBG_TLB_PARANOIA +void tlb_paranoid_check(unsigned int pid_sw, unsigned long address); +#else +#define tlb_paranoid_check(a, b) #endif +void arc_mmu_init(void); +extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); +void __init read_decode_mmu_bcr(void); + +#endif /* !__ASSEMBLY__ */ + #endif diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index ab84bf131fe1..9c8aa41e45c2 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -96,13 +96,8 @@ typedef unsigned long pgtable_t; #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -/* Default Permissions for page, used in mmap.c */ -#ifdef CONFIG_ARC_STACK_NONEXEC +/* Default Permissions for stack/heaps pages (Non Executable) */ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE) -#else -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#endif #define WANT_PAGE_VIRTUAL 1 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index c110ac87d22b..4749a0eee1cf 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -135,6 +135,12 @@ /* ioremap */ #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) +/* Masks for actual TLB "PD"s */ +#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) +#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \ + _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \ + _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) + /************************************************************************** * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) * diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index 5f26b2c1cba0..15334ab66b56 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h @@ -19,6 +19,7 @@ #ifndef __ASSEMBLY__ #include <asm/arcregs.h> /* for STATUS_E1_MASK et all */ +#include <asm/ptrace.h> /* Arch specific stuff which needs to be saved per task. * However these items are not so important so as to earn a place in @@ -28,10 +29,6 @@ struct thread_struct { unsigned long ksp; /* kernel mode stack pointer */ unsigned long callee_reg; /* pointer to callee regs */ unsigned long fault_address; /* dbls as brkpt holder as well */ - unsigned long cause_code; /* Exception Cause Code (ECR) */ -#ifdef CONFIG_ARC_CURR_IN_REG - unsigned long user_r25; -#endif #ifdef CONFIG_ARC_FPU_SAVE_RESTORE struct arc_fpu fpu; #endif @@ -50,7 +47,7 @@ struct task_struct; unsigned long thread_saved_pc(struct task_struct *t); #define task_pt_regs(p) \ - ((struct pt_regs *)(THREAD_SIZE - 4 + (void *)task_stack_page(p)) - 1) + ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1) /* Free all resources held by a thread. */ #define release_thread(thread) do { } while (0) @@ -75,11 +72,15 @@ unsigned long thread_saved_pc(struct task_struct *t); /* * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. - * These can't be derived from pt_regs as that would give correp user-mode val + * Look in process.c for details of kernel stack layout */ #define KSTK_ESP(tsk) (tsk->thread.ksp) -#define KSTK_BLINK(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1+1)*4))) -#define KSTK_FP(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1)*4))) + +#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \ + sizeof(struct callee_regs) + off))) + +#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4) +#define KSTK_FP(tsk) KSTK_REG(tsk, 0) /* * Do necessary setup to start up a newly executed thread. diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 6179de7e07c2..c9938e7a7dbd 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -17,12 +17,6 @@ /* THE pt_regs: Defines how regs are saved during entry into kernel */ struct pt_regs { - /* - * 1 word gutter after reg-file has been saved - * Technically not needed, Since SP always points to a "full" location - * (vs. "empty"). But pt_regs is shared with tools.... - */ - long res; /* Real registers */ long bta; /* bta_l1, bta_l2, erbta */ @@ -50,22 +44,32 @@ struct pt_regs { long sp; /* user/kernel sp depending on where we came from */ long orig_r0; - /*to distinguish bet excp, syscall, irq */ + /* + * To distinguish bet excp, syscall, irq + * For traps and exceptions, Exception Cause Register. + * ECR: <00> <VV> <CC> <PP> + * Last word used by Linux for extra state mgmt (syscall-restart) + * For interrupts, use artificial ECR values to note current prio-level + */ union { + struct { #ifdef CONFIG_CPU_BIG_ENDIAN - /* so that assembly code is same for LE/BE */ - unsigned long orig_r8:16, event:16; + unsigned long state:8, ecr_vec:8, + ecr_cause:8, ecr_param:8; #else - unsigned long event:16, orig_r8:16; + unsigned long ecr_param:8, ecr_cause:8, + ecr_vec:8, state:8; #endif - long orig_r8_word; + }; + unsigned long event; }; + + long user_r25; }; /* Callee saved registers - need to be saved only when you are scheduled out */ struct callee_regs { - long res; /* Again this is not needed */ long r25; long r24; long r23; @@ -99,18 +103,20 @@ struct callee_regs { /* return 1 if PC in delay slot */ #define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK) -#define in_syscall(regs) (regs->event & orig_r8_IS_SCALL) -#define in_brkpt_trap(regs) (regs->event & orig_r8_IS_BRKPT) +#define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param) +#define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param) + +#define STATE_SCALL_RESTARTED 0x01 -#define syscall_wont_restart(regs) (regs->event |= orig_r8_IS_SCALL_RESTARTED) -#define syscall_restartable(regs) !(regs->event & orig_r8_IS_SCALL_RESTARTED) +#define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED) +#define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED) #define current_pt_regs() \ ({ \ /* open-coded current_thread_info() */ \ register unsigned long sp asm ("sp"); \ unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \ - (struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1; \ + (struct pt_regs *)(pg_start + THREAD_SIZE) - 1; \ }) static inline long regs_return_value(struct pt_regs *regs) @@ -120,11 +126,4 @@ static inline long regs_return_value(struct pt_regs *regs) #endif /* !__ASSEMBLY__ */ -#define orig_r8_IS_SCALL 0x0001 -#define orig_r8_IS_SCALL_RESTARTED 0x0002 -#define orig_r8_IS_BRKPT 0x0004 -#define orig_r8_IS_EXCPN 0x0008 -#define orig_r8_IS_IRQ1 0x0010 -#define orig_r8_IS_IRQ2 0x0020 - #endif /* __ASM_PTRACE_H */ diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h index 33ab3048e9b2..29de09804306 100644 --- a/arch/arc/include/asm/syscall.h +++ b/arch/arc/include/asm/syscall.h @@ -18,7 +18,7 @@ static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { if (user_mode(regs) && in_syscall(regs)) - return regs->orig_r8; + return regs->r8; else return -1; } @@ -26,8 +26,7 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs) static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { - /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */ - regs->r8 = regs->orig_r8; + regs->r0 = regs->orig_r0; } static inline long diff --git a/arch/arc/include/asm/tlb-mmu1.h b/arch/arc/include/asm/tlb-mmu1.h index a5ff961b1efc..8a1ec96012ae 100644 --- a/arch/arc/include/asm/tlb-mmu1.h +++ b/arch/arc/include/asm/tlb-mmu1.h @@ -9,9 +9,9 @@ #ifndef __ASM_TLB_MMU_V1_H__ #define __ASM_TLB_MMU_V1_H__ -#if defined(__ASSEMBLY__) && defined(CONFIG_ARC_MMU_VER == 1) +#include <asm/mmu.h> -#include <asm/tlb.h> +#if defined(__ASSEMBLY__) && (CONFIG_ARC_MMU_VER == 1) .macro TLB_WRITE_HEURISTICS diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h index cb0c708ca665..a9db5f62aaf3 100644 --- a/arch/arc/include/asm/tlb.h +++ b/arch/arc/include/asm/tlb.h @@ -9,18 +9,6 @@ #ifndef _ASM_ARC_TLB_H #define _ASM_ARC_TLB_H -#ifdef __KERNEL__ - -#include <asm/pgtable.h> - -/* Masks for actual TLB "PD"s */ -#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) -#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \ - _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \ - _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) - -#ifndef __ASSEMBLY__ - #define tlb_flush(tlb) \ do { \ if (tlb->fullmm) \ @@ -56,18 +44,4 @@ do { \ #include <linux/pagemap.h> #include <asm-generic/tlb.h> -#ifdef CONFIG_ARC_DBG_TLB_PARANOIA -void tlb_paranoid_check(unsigned int pid_sw, unsigned long address); -#else -#define tlb_paranoid_check(a, b) -#endif - -void arc_mmu_init(void); -extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); -void __init read_decode_mmu_bcr(void); - -#endif /* __ASSEMBLY__ */ - -#endif /* __KERNEL__ */ - #endif /* _ASM_ARC_TLB_H */ diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h index 5dbe63f17b66..60702f3751d2 100644 --- a/arch/arc/include/asm/unaligned.h +++ b/arch/arc/include/asm/unaligned.h @@ -16,11 +16,11 @@ #ifdef CONFIG_ARC_MISALIGN_ACCESS int misaligned_fixup(unsigned long address, struct pt_regs *regs, - unsigned long cause, struct callee_regs *cregs); + struct callee_regs *cregs); #else static inline int misaligned_fixup(unsigned long address, struct pt_regs *regs, - unsigned long cause, struct callee_regs *cregs) + struct callee_regs *cregs) { return 0; } diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h index 30333cec0fef..2618cc13ba75 100644 --- a/arch/arc/include/uapi/asm/ptrace.h +++ b/arch/arc/include/uapi/asm/ptrace.h @@ -20,28 +20,31 @@ * * This is to decouple pt_regs from user-space ABI, to be able to change it * w/o affecting the ABI. - * Although the layout (initial padding) is similar to pt_regs to have some - * optimizations when copying pt_regs to/from user_regs_struct. + * + * The intermediate pad,pad2 are relics of initial layout based on pt_regs + * for optimizations when copying pt_regs to/from user_regs_struct. + * We no longer need them, but can't be changed as they are part of ABI now. * * Also, sigcontext only care about the scratch regs as that is what we really - * save/restore for signal handling. + * save/restore for signal handling. However gdb also uses the same struct + * hence callee regs need to be in there too. */ struct user_regs_struct { + long pad; struct { - long pad; long bta, lp_start, lp_end, lp_count; long status32, ret, blink, fp, gp; long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; long sp; } scratch; + long pad2; struct { - long pad; long r25, r24, r23, r22, r21, r20; long r19, r18, r17, r16, r15, r14, r13; } callee; long efa; /* break pt addr, for break points in delay slots */ - long stop_pc; /* give dbg stop_pc directly after checking orig_r8 */ + long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ }; #endif /* !__ASSEMBLY__ */ diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c index 7dcda7025241..6c3aa0edb9b5 100644 --- a/arch/arc/kernel/asm-offsets.c +++ b/arch/arc/kernel/asm-offsets.c @@ -24,9 +24,6 @@ int main(void) DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg)); -#ifdef CONFIG_ARC_CURR_IN_REG - DEFINE(THREAD_USER_R25, offsetof(struct thread_struct, user_r25)); -#endif DEFINE(THREAD_FAULT_ADDR, offsetof(struct thread_struct, fault_address)); @@ -49,7 +46,7 @@ int main(void) BLANK(); DEFINE(PT_status32, offsetof(struct pt_regs, status32)); - DEFINE(PT_orig_r8, offsetof(struct pt_regs, orig_r8_word)); + DEFINE(PT_event, offsetof(struct pt_regs, event)); DEFINE(PT_sp, offsetof(struct pt_regs, sp)); DEFINE(PT_r0, offsetof(struct pt_regs, r0)); DEFINE(PT_r1, offsetof(struct pt_regs, r1)); @@ -60,5 +57,7 @@ int main(void) DEFINE(PT_r6, offsetof(struct pt_regs, r6)); DEFINE(PT_r7, offsetof(struct pt_regs, r7)); + DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs)); + DEFINE(SZ_PT_REGS, sizeof(struct pt_regs)); return 0; } diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c index 60844dac6132..34410eb1a308 100644 --- a/arch/arc/kernel/ctx_sw.c +++ b/arch/arc/kernel/ctx_sw.c @@ -23,10 +23,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) unsigned int tmp; unsigned int prev = (unsigned int)prev_task; unsigned int next = (unsigned int)next_task; - int num_words_to_skip = 1; -#ifdef CONFIG_ARC_CURR_IN_REG - num_words_to_skip++; -#endif __asm__ __volatile__( /* FP/BLINK save generated by gcc (standard function prologue */ @@ -44,8 +40,9 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) "st.a r24, [sp, -4] \n\t" #ifndef CONFIG_ARC_CURR_IN_REG "st.a r25, [sp, -4] \n\t" +#else + "sub sp, sp, 4 \n\t" /* usual r25 placeholder */ #endif - "sub sp, sp, %4 \n\t" /* create gutter at top */ /* set ksp of outgoing task in tsk->thread.ksp */ "st.as sp, [%3, %1] \n\t" @@ -76,10 +73,10 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) /* start loading it's CALLEE reg file */ - "add sp, sp, %4 \n\t" /* skip gutter at top */ - #ifndef CONFIG_ARC_CURR_IN_REG "ld.ab r25, [sp, 4] \n\t" +#else + "add sp, sp, 4 \n\t" #endif "ld.ab r24, [sp, 4] \n\t" "ld.ab r23, [sp, 4] \n\t" @@ -100,8 +97,7 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) /* FP/BLINK restore generated by gcc (standard func epilogue */ : "=r"(tmp) - : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev), - "n"(num_words_to_skip * 4) + : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev) : "blink" ); diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 0c6d664d4a83..1d7165156e17 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -142,7 +142,7 @@ VECTOR reserved ; Reserved Exceptions .endr #include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */ -#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */ +#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,SYS...} */ #include <asm/errno.h> #include <asm/arcregs.h> #include <asm/irqflags.h> @@ -274,10 +274,8 @@ ARC_ENTRY instr_service SWITCH_TO_KERNEL_STK SAVE_ALL_SYS - lr r0, [ecr] - lr r1, [efa] - - mov r2, sp + lr r0, [efa] + mov r1, sp FAKE_RET_FROM_EXCPN r9 @@ -298,9 +296,8 @@ ARC_ENTRY mem_service SWITCH_TO_KERNEL_STK SAVE_ALL_SYS - lr r0, [ecr] - lr r1, [efa] - mov r2, sp + lr r0, [efa] + mov r1, sp bl do_memory_error b ret_from_exception ARC_EXIT mem_service @@ -317,11 +314,14 @@ ARC_ENTRY EV_MachineCheck SWITCH_TO_KERNEL_STK SAVE_ALL_SYS - lr r0, [ecr] - lr r1, [efa] - mov r2, sp + lr r2, [ecr] + lr r0, [efa] + mov r1, sp + + lsr r3, r2, 8 + bmsk r3, r3, 7 + brne r3, ECR_C_MCHK_DUP_TLB, 1f - brne r0, 0x200100, 1f bl do_tlb_overlap_fault b ret_from_exception @@ -355,8 +355,8 @@ ARC_ENTRY EV_TLBProtV ; ecr and efa were not saved in case an Intr sneaks in ; after fake rtie ; - lr r3, [ecr] - lr r4, [efa] + lr r2, [ecr] + lr r1, [efa] ; Faulting Data address ; --------(4) Return from CPU Exception Mode --------- ; Fake a rtie, but rtie to next label @@ -368,31 +368,25 @@ ARC_ENTRY EV_TLBProtV ;------ (5) Type of Protection Violation? ---------- ; ; ProtV Hardware Exception is triggered for Access Faults of 2 types - ; -Access Violaton (WRITE to READ ONLY Page) - for linux COW - ; -Unaligned Access (READ/WRITE on odd boundary) + ; -Access Violaton : 00_23_(00|01|02|03)_00 + ; x r w r+w + ; -Unaligned Access : 00_23_04_00 ; - cmp r3, 0x230400 ; Misaligned data access ? - beq 4f + bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f ;========= (6a) Access Violation Processing ======== - cmp r3, 0x230100 - mov r1, 0x0 ; if LD exception ? write = 0 - mov.ne r1, 0x1 ; else write = 1 - - mov r2, r4 ; faulting address mov r0, sp ; pt_regs bl do_page_fault b ret_from_exception ;========== (6b) Non aligned access ============ 4: - mov r0, r3 ; cause code - mov r1, r4 ; faulting address - mov r2, sp ; pt_regs + mov r0, r1 + mov r1, sp ; pt_regs #ifdef CONFIG_ARC_MISALIGN_ACCESS SAVE_CALLEE_SAVED_USER - mov r3, sp ; callee_regs + mov r2, sp ; callee_regs bl do_misaligned_access @@ -419,9 +413,8 @@ ARC_ENTRY EV_PrivilegeV SWITCH_TO_KERNEL_STK SAVE_ALL_SYS - lr r0, [ecr] - lr r1, [efa] - mov r2, sp + lr r0, [efa] + mov r1, sp FAKE_RET_FROM_EXCPN r9 @@ -440,9 +433,8 @@ ARC_ENTRY EV_Extension SWITCH_TO_KERNEL_STK SAVE_ALL_SYS - lr r0, [ecr] - lr r1, [efa] - mov r2, sp + lr r0, [efa] + mov r1, sp bl do_extension_fault b ret_from_exception ARC_EXIT EV_Extension @@ -498,11 +490,8 @@ tracesys_exit: trap_with_param: ; stop_pc info by gdb needs this info - stw orig_r8_IS_BRKPT, [sp, PT_orig_r8] - - mov r0, r12 - lr r1, [efa] - mov r2, sp + lr r0, [efa] + mov r1, sp ; Now that we have read EFA, its safe to do "fake" rtie ; and get out of CPU exception mode @@ -544,11 +533,11 @@ ARC_ENTRY EV_Trap lr r9, [erstatus] SWITCH_TO_KERNEL_STK - SAVE_ALL_TRAP + SAVE_ALL_SYS ;------- (4) What caused the Trap -------------- lr r12, [ecr] - and.f 0, r12, ECR_PARAM_MASK + bmsk.f 0, r12, 7 bnz trap_with_param ; ======= (5a) Trap is due to System Call ======== @@ -589,11 +578,7 @@ ARC_ENTRY ret_from_exception ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32 ld r8, [sp, PT_status32] ; returning to User/Kernel Mode -#ifdef CONFIG_PREEMPT bbit0 r8, STATUS_U_BIT, resume_kernel_mode -#else - bbit0 r8, STATUS_U_BIT, restore_regs -#endif ; Before returning to User mode check-for-and-complete any pending work ; such as rescheduling/signal-delivery etc. @@ -653,10 +638,10 @@ resume_user_mode_begin: b resume_user_mode_begin ; unconditionally back to U mode ret chks ; for single exit point from this block -#ifdef CONFIG_PREEMPT - resume_kernel_mode: +#ifdef CONFIG_PREEMPT + ; Can't preempt if preemption disabled GET_CURR_THR_INFO_FROM_SP r10 ld r8, [r10, THREAD_INFO_PREEMPT_COUNT] @@ -687,17 +672,6 @@ restore_regs : ; XXX can this be optimised out IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy -#ifdef CONFIG_ARC_CURR_IN_REG - ; Restore User R25 - ; Earlier this used to be only for returning to user mode - ; However with 2 levels of IRQ this can also happen even if - ; in kernel mode - ld r9, [sp, PT_sp] - brhs r9, VMALLOC_START, 8f - RESTORE_USER_R25 -8: -#endif - ; Restore REG File. In case multiple Events outstanding, ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None ; Note that we use realtime STATUS32 (not pt_regs->status32) to @@ -714,28 +688,33 @@ not_exception: #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS + ; Level 2 interrupt return Path - from hardware standpoint bbit0 r10, STATUS_A2_BIT, not_level2_interrupt ;------------------------------------------------------------------ + ; However the context returning might not have taken L2 intr itself + ; e.g. Task'A' user-code -> L2 intr -> schedule -> 'B' user-code ret + ; Special considerations needed for the context which took L2 intr + + ld r9, [sp, PT_event] ; Ensure this is L2 intr context + brne r9, event_IRQ2, 149f + + ;------------------------------------------------------------------ ; if L2 IRQ interrupted a L1 ISR, we'd disbaled preemption earlier ; so that sched doesnt move to new task, causing L1 to be delayed ; undeterministically. Now that we've achieved that, lets reset ; things to what they were, before returning from L2 context ;---------------------------------------------------------------- - ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is - brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path - ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal - ; A1 is set in status32_l2 ; decrement thread_info->preempt_count (re-enable preemption) GET_CURR_THR_INFO_FROM_SP r10 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] ; paranoid check, given A1 was active when A2 happened, preempt count - ; must not be 0 beccause we would have incremented it. + ; must not be 0 because we would have incremented it. ; If this does happen we simply HALT as it means a BUG !!! cmp r9, 0 bnz 2f diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 006dec3fc353..2a913f85a747 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -27,6 +27,8 @@ stext: ; Don't clobber r0-r4 yet. It might have bootloader provided info ;------------------------------------------------------------------- + sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE] + #ifdef CONFIG_SMP ; Only Boot (Master) proceeds. Others wait in platform dependent way ; IDENTITY Reg [ 3 2 1 0 ] diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index 8115fa531575..305b3f866aa7 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c @@ -28,25 +28,17 @@ * -Disable all IRQs (on CPU side) * -Optionally, setup the High priority Interrupts as Level 2 IRQs */ -void __cpuinit arc_init_IRQ(void) +void arc_init_IRQ(void) { int level_mask = 0; - write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds); - /* Disable all IRQs: enable them as devices request */ write_aux_reg(AUX_IENABLE, 0); /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ -#ifdef CONFIG_ARC_IRQ3_LV2 - level_mask |= (1 << 3); -#endif -#ifdef CONFIG_ARC_IRQ5_LV2 - level_mask |= (1 << 5); -#endif -#ifdef CONFIG_ARC_IRQ6_LV2 - level_mask |= (1 << 6); -#endif + level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; + level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5; + level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6; if (level_mask) { pr_info("Level-2 interrupts bitset %x\n", level_mask); diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c index 52bdc83c1495..a7698fb14818 100644 --- a/arch/arc/kernel/kgdb.c +++ b/arch/arc/kernel/kgdb.c @@ -169,7 +169,7 @@ int kgdb_arch_init(void) return 0; } -void kgdb_trap(struct pt_regs *regs, int param) +void kgdb_trap(struct pt_regs *regs) { /* trap_s 3 is used for breakpoints that overwrite existing * instructions, while trap_s 4 is used for compiled breakpoints. @@ -181,7 +181,7 @@ void kgdb_trap(struct pt_regs *regs, int param) * with trap_s 4 (compiled) breakpoints, continuation needs to * start after the breakpoint. */ - if (param == 3) + if (regs->ecr_param == 3) instruction_pointer(regs) -= BREAK_INSTR_SIZE; kgdb_handle_exception(1, SIGTRAP, 0, regs); diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c index 5a7b80e2d883..72f97822784a 100644 --- a/arch/arc/kernel/kprobes.c +++ b/arch/arc/kernel/kprobes.c @@ -517,8 +517,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) return 0; } -void trap_is_kprobe(unsigned long cause, unsigned long address, - struct pt_regs *regs) +void trap_is_kprobe(unsigned long address, struct pt_regs *regs) { - notify_die(DIE_TRAP, "kprobe_trap", regs, address, cause, SIGTRAP); + notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP); } diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index cad66851e0c4..07a3a968fe49 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -55,10 +55,8 @@ asmlinkage void ret_from_fork(void); * | ... | * | unused | * | | - * ------------------ <==== top of Stack (thread.ksp) - * | UNUSED 1 word| * ------------------ - * | r25 | + * | r25 | <==== top of Stack (thread.ksp) * ~ ~ * | --to-- | (CALLEE Regs of user mode) * | r13 | @@ -76,7 +74,10 @@ asmlinkage void ret_from_fork(void); * | --to-- | (scratch Regs of user mode) * | r0 | * ------------------ - * | UNUSED 1 word| + * | SP | + * | orig_r0 | + * | event/ECR | + * | user_r25 | * ------------------ <===== END of PAGE */ int copy_thread(unsigned long clone_flags, diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c index c6a81c58d0f3..333238564b67 100644 --- a/arch/arc/kernel/ptrace.c +++ b/arch/arc/kernel/ptrace.c @@ -40,7 +40,15 @@ static int genregs_get(struct task_struct *target, offsetof(struct user_regs_struct, LOC), \ offsetof(struct user_regs_struct, LOC) + 4); +#define REG_O_ZERO(LOC) \ + if (!ret) \ + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, \ + offsetof(struct user_regs_struct, LOC), \ + offsetof(struct user_regs_struct, LOC) + 4); + + REG_O_ZERO(pad); REG_O_CHUNK(scratch, callee, ptregs); + REG_O_ZERO(pad2); REG_O_CHUNK(callee, efa, cregs); REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address); @@ -88,8 +96,10 @@ static int genregs_set(struct task_struct *target, offsetof(struct user_regs_struct, LOC), \ offsetof(struct user_regs_struct, LOC) + 4); - /* TBD: disallow updates to STATUS32, orig_r8 etc*/ - REG_IN_CHUNK(scratch, callee, ptregs); /* pt_regs[bta..orig_r8] */ + REG_IGNORE_ONE(pad); + /* TBD: disallow updates to STATUS32 etc*/ + REG_IN_CHUNK(scratch, pad2, ptregs); /* pt_regs[bta..sp] */ + REG_IGNORE_ONE(pad2); REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */ REG_IGNORE_ONE(efa); /* efa update invalid */ REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */ diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index b2b3731dd1e9..6b083454d039 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -31,14 +31,14 @@ int running_on_hw = 1; /* vs. on ISS */ char __initdata command_line[COMMAND_LINE_SIZE]; -struct machine_desc *machine_desc __cpuinitdata; +struct machine_desc *machine_desc; struct task_struct *_current_task[NR_CPUS]; /* For stack switching */ struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; -void __cpuinit read_arc_build_cfg_regs(void) +void read_arc_build_cfg_regs(void) { struct bcr_perip uncached_space; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; @@ -182,7 +182,7 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) FIX_PTR(cpu); #define IS_AVAIL1(var, str) ((var) ? str : "") #define IS_AVAIL2(var, str) ((var == 0x2) ? str : "") -#define IS_USED(var) ((var) ? "(in-use)" : "(not used)") +#define IS_USED(cfg) (IS_ENABLED(cfg) ? "(in-use)" : "(not used)") n += scnprintf(buf + n, len - n, "Extn [700-Base]\t: %s %s %s %s %s %s\n", @@ -202,9 +202,9 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) if (cpu->core.family == 0x34) { n += scnprintf(buf + n, len - n, "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n", - IS_USED(__CONFIG_ARC_HAS_LLSC_VAL), - IS_USED(__CONFIG_ARC_HAS_SWAPE_VAL), - IS_USED(__CONFIG_ARC_HAS_RTSC_VAL)); + IS_USED(CONFIG_ARC_HAS_LLSC), + IS_USED(CONFIG_ARC_HAS_SWAPE), + IS_USED(CONFIG_ARC_HAS_RTSC)); } n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s", @@ -237,7 +237,7 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) return buf; } -void __cpuinit arc_chk_ccms(void) +void arc_chk_ccms(void) { #if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM) struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; @@ -272,7 +272,7 @@ void __cpuinit arc_chk_ccms(void) * hardware has dedicated regs which need to be saved/restored on ctx-sw * (Single Precision uses core regs), thus kernel is kind of oblivious to it */ -void __cpuinit arc_chk_fpu(void) +void arc_chk_fpu(void) { struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; @@ -293,7 +293,7 @@ void __cpuinit arc_chk_fpu(void) * such as only for boot CPU etc */ -void __cpuinit setup_processor(void) +void setup_processor(void) { char str[512]; int cpu_id = smp_processor_id(); diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 5c7fd603d216..bca3052c956d 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -117,7 +117,7 @@ const char *arc_platform_smp_cpuinfo(void) * Called from asm stub in head.S * "current"/R25 already setup by low level boot code */ -void __cpuinit start_kernel_secondary(void) +void start_kernel_secondary(void) { struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); @@ -154,7 +154,7 @@ void __cpuinit start_kernel_secondary(void) * * Essential requirements being where to run from (PC) and stack (SP) */ -int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) +int __cpu_up(unsigned int cpu, struct task_struct *idle) { unsigned long wait_till; diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index ca0207b9d5b6..f8b7d880304d 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -79,7 +79,7 @@ static void seed_unwind_frame_info(struct task_struct *tsk, * assembly code */ frame_info->regs.r27 = 0; - frame_info->regs.r28 += 64; + frame_info->regs.r28 += 60; frame_info->call_frame = 0; } else { diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index 09f4309aa2c0..0e51e69cf30d 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -44,13 +44,24 @@ #include <asm/clk.h> #include <asm/mach_desc.h> +/* Timer related Aux registers */ +#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */ +#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */ +#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */ +#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */ +#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */ +#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */ + +#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */ +#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */ + #define ARC_TIMER_MAX 0xFFFFFFFF /********** Clock Source Device *********/ #ifdef CONFIG_ARC_HAS_RTSC -int __cpuinit arc_counter_setup(void) +int arc_counter_setup(void) { /* RTSC insn taps into cpu clk, needs no setup */ @@ -105,7 +116,7 @@ static bool is_usable_as_clocksource(void) /* * set 32bit TIMER1 to keep counting monotonically and wraparound */ -int __cpuinit arc_counter_setup(void) +int arc_counter_setup(void) { write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX); write_aux_reg(ARC_REG_TIMER1_CNT, 0); @@ -212,7 +223,7 @@ static struct irqaction arc_timer_irq = { * Setup the local event timer for @cpu * N.B. weak so that some exotic ARC SoCs can completely override it */ -void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu) +void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu) { struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c index 0471d9c9dd54..e21692d2fdab 100644 --- a/arch/arc/kernel/traps.c +++ b/arch/arc/kernel/traps.c @@ -28,10 +28,9 @@ void __init trap_init(void) return; } -void die(const char *str, struct pt_regs *regs, unsigned long address, - unsigned long cause_reg) +void die(const char *str, struct pt_regs *regs, unsigned long address) { - show_kernel_fault_diag(str, regs, address, cause_reg); + show_kernel_fault_diag(str, regs, address); /* DEAD END */ __asm__("flag 1"); @@ -42,14 +41,13 @@ void die(const char *str, struct pt_regs *regs, unsigned long address, * -for user faults enqueues requested signal * -for kernel, chk if due to copy_(to|from)_user, otherwise die() */ -static noinline int handle_exception(unsigned long cause, char *str, - struct pt_regs *regs, siginfo_t *info) +static noinline int +handle_exception(const char *str, struct pt_regs *regs, siginfo_t *info) { if (user_mode(regs)) { struct task_struct *tsk = current; tsk->thread.fault_address = (__force unsigned int)info->si_addr; - tsk->thread.cause_code = cause; force_sig_info(info->si_signo, info, tsk); @@ -58,14 +56,14 @@ static noinline int handle_exception(unsigned long cause, char *str, if (fixup_exception(regs)) return 0; - die(str, regs, (unsigned long)info->si_addr, cause); + die(str, regs, (unsigned long)info->si_addr); } return 1; } #define DO_ERROR_INFO(signr, str, name, sicode) \ -int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \ +int name(unsigned long address, struct pt_regs *regs) \ { \ siginfo_t info = { \ .si_signo = signr, \ @@ -73,7 +71,7 @@ int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \ .si_code = sicode, \ .si_addr = (void __user *)address, \ }; \ - return handle_exception(cause, str, regs, &info);\ + return handle_exception(str, regs, &info);\ } /* @@ -90,11 +88,11 @@ DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN) /* * Entry Point for Misaligned Data access Exception, for emulating in software */ -int do_misaligned_access(unsigned long cause, unsigned long address, - struct pt_regs *regs, struct callee_regs *cregs) +int do_misaligned_access(unsigned long address, struct pt_regs *regs, + struct callee_regs *cregs) { - if (misaligned_fixup(address, regs, cause, cregs) != 0) - return do_misaligned_error(cause, address, regs); + if (misaligned_fixup(address, regs, cregs) != 0) + return do_misaligned_error(address, regs); return 0; } @@ -104,10 +102,9 @@ int do_misaligned_access(unsigned long cause, unsigned long address, * Entry point for miscll errors such as Nested Exceptions * -Duplicate TLB entry is handled seperately though */ -void do_machine_check_fault(unsigned long cause, unsigned long address, - struct pt_regs *regs) +void do_machine_check_fault(unsigned long address, struct pt_regs *regs) { - die("Machine Check Exception", regs, address, cause); + die("Machine Check Exception", regs, address); } @@ -120,23 +117,22 @@ void do_machine_check_fault(unsigned long cause, unsigned long address, * -1 used for software breakpointing (gdb) * -2 used by kprobes */ -void do_non_swi_trap(unsigned long cause, unsigned long address, - struct pt_regs *regs) +void do_non_swi_trap(unsigned long address, struct pt_regs *regs) { - unsigned int param = cause & 0xff; + unsigned int param = regs->ecr_param; switch (param) { case 1: - trap_is_brkpt(cause, address, regs); + trap_is_brkpt(address, regs); break; case 2: - trap_is_kprobe(param, address, regs); + trap_is_kprobe(address, regs); break; case 3: case 4: - kgdb_trap(regs, param); + kgdb_trap(regs); break; default: @@ -149,14 +145,14 @@ void do_non_swi_trap(unsigned long cause, unsigned long address, * -For a corner case, ARC kprobes implementation resorts to using * this exception, hence the check */ -void do_insterror_or_kprobe(unsigned long cause, - unsigned long address, - struct pt_regs *regs) +void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs) { + int rc; + /* Check if this exception is caused by kprobes */ - if (notify_die(DIE_IERR, "kprobe_ierr", regs, address, - cause, SIGILL) == NOTIFY_STOP) + rc = notify_die(DIE_IERR, "kprobe_ierr", regs, address, 0, SIGILL); + if (rc == NOTIFY_STOP) return; - insterror_is_error(cause, address, regs); + insterror_is_error(address, regs); } diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index a03528ecd276..73a7450ee622 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -117,23 +117,22 @@ static void show_faulting_vma(unsigned long address, char *buf) static void show_ecr_verbose(struct pt_regs *regs) { - unsigned int vec, cause_code, cause_reg; + unsigned int vec, cause_code; unsigned long address; - cause_reg = current->thread.cause_code; - pr_info("\n[ECR ]: 0x%08x => ", cause_reg); + pr_info("\n[ECR ]: 0x%08lx => ", regs->event); /* For Data fault, this is data address not instruction addr */ address = current->thread.fault_address; - vec = cause_reg >> 16; - cause_code = (cause_reg >> 8) & 0xFF; + vec = regs->ecr_vec; + cause_code = regs->ecr_cause; /* For DTLB Miss or ProtV, display the memory involved too */ if (vec == ECR_V_DTLB_MISS) { - pr_cont("Invalid %s 0x%08lx by insn @ 0x%08lx\n", - (cause_code == 0x01) ? "Read From" : - ((cause_code == 0x02) ? "Write to" : "EX"), + pr_cont("Invalid %s @ 0x%08lx by insn @ 0x%08lx\n", + (cause_code == 0x01) ? "Read" : + ((cause_code == 0x02) ? "Write" : "EX"), address, regs->ret); } else if (vec == ECR_V_ITLB_MISS) { pr_cont("Insn could not be fetched\n"); @@ -144,14 +143,12 @@ static void show_ecr_verbose(struct pt_regs *regs) } else if (vec == ECR_V_PROTV) { if (cause_code == ECR_C_PROTV_INST_FETCH) pr_cont("Execute from Non-exec Page\n"); - else if (cause_code == ECR_C_PROTV_LOAD) - pr_cont("Read from Non-readable Page\n"); - else if (cause_code == ECR_C_PROTV_STORE) - pr_cont("Write to Non-writable Page\n"); - else if (cause_code == ECR_C_PROTV_XCHG) - pr_cont("Data exchange protection violation\n"); else if (cause_code == ECR_C_PROTV_MISALIG_DATA) pr_cont("Misaligned r/w from 0x%08lx\n", address); + else + pr_cont("%s access not allowed on page\n", + (cause_code == 0x01) ? "Read" : + ((cause_code == 0x02) ? "Write" : "EX")); } else if (vec == ECR_V_INSN_ERR) { pr_cont("Illegal Insn\n"); } else { @@ -176,8 +173,7 @@ void show_regs(struct pt_regs *regs) print_task_path_n_nm(tsk, buf); show_regs_print_info(KERN_INFO); - if (current->thread.cause_code) - show_ecr_verbose(regs); + show_ecr_verbose(regs); pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n", current->thread.fault_address, @@ -213,10 +209,9 @@ void show_regs(struct pt_regs *regs) } void show_kernel_fault_diag(const char *str, struct pt_regs *regs, - unsigned long address, unsigned long cause_reg) + unsigned long address) { current->thread.fault_address = address; - current->thread.cause_code = cause_reg; /* Caller and Callee regs */ show_regs(regs); diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index 4cd81633febd..c0f832f595d3 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c @@ -187,7 +187,7 @@ fault: state->fault = 1; * Returns 0 if successfully handled, 1 if some error happened */ int misaligned_fixup(unsigned long address, struct pt_regs *regs, - unsigned long cause, struct callee_regs *cregs) + struct callee_regs *cregs) { struct disasm_state state; char buf[TASK_COMM_LEN]; diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index a8d02223da44..e550b117ec4f 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -289,6 +289,8 @@ static void __init setup_unwind_table(struct unwind_table *table, * instead of the initial loc addr * return; */ + WARN(1, "unwinder: FDE->initial_location NULL %p\n", + (const u8 *)(fde + 1) + *fde); } ++n; } diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index d3c92f52d444..2555f5886af6 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -125,6 +125,11 @@ SECTIONS *(.debug_frame) __end_unwind = .; } + /* + * gcc 4.8 generates this for -fasynchonous-unwind-tables, + * while we still use the .debug_frame based unwinder + */ + /DISCARD/ : { *(.eh_frame) } #else /DISCARD/ : { *(.debug_frame) } #endif @@ -142,15 +147,18 @@ SECTIONS *(.arcextmap.*) } +#ifndef CONFIG_DEBUG_INFO /* open-coded because we need .debug_frame seperately for unwinding */ - .debug_aranges 0 : { *(.debug_aranges) } - .debug_pubnames 0 : { *(.debug_pubnames) } - .debug_info 0 : { *(.debug_info) } - .debug_abbrev 0 : { *(.debug_abbrev) } - .debug_line 0 : { *(.debug_line) } - .debug_str 0 : { *(.debug_str) } - .debug_loc 0 : { *(.debug_loc) } - .debug_macinfo 0 : { *(.debug_macinfo) } + /DISCARD/ : { *(.debug_aranges) } + /DISCARD/ : { *(.debug_pubnames) } + /DISCARD/ : { *(.debug_info) } + /DISCARD/ : { *(.debug_abbrev) } + /DISCARD/ : { *(.debug_line) } + /DISCARD/ : { *(.debug_str) } + /DISCARD/ : { *(.debug_loc) } + /DISCARD/ : { *(.debug_macinfo) } + /DISCARD/ : { *(.debug_ranges) } +#endif #ifdef CONFIG_ARC_HAS_DCCM . = CONFIG_ARC_DCCM_BASE; diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index aedce1905441..f415d851b765 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c @@ -73,6 +73,33 @@ #include <asm/cachectl.h> #include <asm/setup.h> +/* Instruction cache related Auxiliary registers */ +#define ARC_REG_IC_BCR 0x77 /* Build Config reg */ +#define ARC_REG_IC_IVIC 0x10 +#define ARC_REG_IC_CTRL 0x11 +#define ARC_REG_IC_IVIL 0x19 +#if (CONFIG_ARC_MMU_VER > 2) +#define ARC_REG_IC_PTAG 0x1E +#endif + +/* Bit val in IC_CTRL */ +#define IC_CTRL_CACHE_DISABLE 0x1 + +/* Data cache related Auxiliary registers */ +#define ARC_REG_DC_BCR 0x72 /* Build Config reg */ +#define ARC_REG_DC_IVDC 0x47 +#define ARC_REG_DC_CTRL 0x48 +#define ARC_REG_DC_IVDL 0x4A +#define ARC_REG_DC_FLSH 0x4B +#define ARC_REG_DC_FLDL 0x4C +#if (CONFIG_ARC_MMU_VER > 2) +#define ARC_REG_DC_PTAG 0x5C +#endif + +/* Bit val in DC_CTRL */ +#define DC_CTRL_INV_MODE_FLUSH 0x40 +#define DC_CTRL_FLUSH_STATUS 0x100 + char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) { int n = 0; @@ -89,8 +116,10 @@ char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) enb ? "" : "DISABLED (kernel-build)"); \ } - PR_CACHE(&cpuinfo_arc700[c].icache, __CONFIG_ARC_HAS_ICACHE, "I-Cache"); - PR_CACHE(&cpuinfo_arc700[c].dcache, __CONFIG_ARC_HAS_DCACHE, "D-Cache"); + PR_CACHE(&cpuinfo_arc700[c].icache, IS_ENABLED(CONFIG_ARC_HAS_ICACHE), + "I-Cache"); + PR_CACHE(&cpuinfo_arc700[c].dcache, IS_ENABLED(CONFIG_ARC_HAS_DCACHE), + "D-Cache"); return buf; } @@ -100,17 +129,23 @@ char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) * the cpuinfo structure for later use. * No Validation done here, simply read/convert the BCRs */ -void __cpuinit read_decode_cache_bcr(void) +void read_decode_cache_bcr(void) { - struct bcr_cache ibcr, dbcr; struct cpuinfo_arc_cache *p_ic, *p_dc; unsigned int cpu = smp_processor_id(); + struct bcr_cache { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; +#else + unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; +#endif + } ibcr, dbcr; p_ic = &cpuinfo_arc700[cpu].icache; READ_BCR(ARC_REG_IC_BCR, ibcr); - if (ibcr.config == 0x3) - p_ic->assoc = 2; + BUG_ON(ibcr.config != 3); + p_ic->assoc = 2; /* Fixed to 2w set assoc */ p_ic->line_len = 8 << ibcr.line_len; p_ic->sz = 0x200 << ibcr.sz; p_ic->ver = ibcr.ver; @@ -118,8 +153,8 @@ void __cpuinit read_decode_cache_bcr(void) p_dc = &cpuinfo_arc700[cpu].dcache; READ_BCR(ARC_REG_DC_BCR, dbcr); - if (dbcr.config == 0x2) - p_dc->assoc = 4; + BUG_ON(dbcr.config != 2); + p_dc->assoc = 4; /* Fixed to 4w set assoc */ p_dc->line_len = 16 << dbcr.line_len; p_dc->sz = 0x200 << dbcr.sz; p_dc->ver = dbcr.ver; @@ -132,14 +167,12 @@ void __cpuinit read_decode_cache_bcr(void) * 3. Enable the Caches, setup default flush mode for D-Cache * 3. Calculate the SHMLBA used by user space */ -void __cpuinit arc_cache_init(void) +void arc_cache_init(void) { - unsigned int temp; unsigned int cpu = smp_processor_id(); struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; - int way_pg_ratio = way_pg_ratio; - int dcache_does_alias; + unsigned int dcache_does_alias, temp; char str[256]; printk(arc_cache_mumbojumbo(0, str, sizeof(str))); @@ -149,20 +182,11 @@ void __cpuinit arc_cache_init(void) #ifdef CONFIG_ARC_HAS_ICACHE /* 1. Confirm some of I-cache params which Linux assumes */ - if ((ic->assoc != ARC_ICACHE_WAYS) || - (ic->line_len != ARC_ICACHE_LINE_LEN)) { + if (ic->line_len != ARC_ICACHE_LINE_LEN) panic("Cache H/W doesn't match kernel Config"); - } -#if (CONFIG_ARC_MMU_VER > 2) - if (ic->ver != 3) { - if (running_on_hw) - panic("Cache ver doesn't match MMU ver\n"); - - /* For ISS - suggest the toggles to use */ - pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n"); - } -#endif + if (ic->ver != CONFIG_ARC_MMU_VER) + panic("Cache ver doesn't match MMU ver\n"); #endif /* Enable/disable I-Cache */ @@ -181,14 +205,12 @@ chk_dc: return; #ifdef CONFIG_ARC_HAS_DCACHE - if ((dc->assoc != ARC_DCACHE_WAYS) || - (dc->line_len != ARC_DCACHE_LINE_LEN)) { + if (dc->line_len != ARC_DCACHE_LINE_LEN) panic("Cache H/W doesn't match kernel Config"); - } - - dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE; /* check for D-Cache aliasing */ + dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE; + if (dcache_does_alias && !cache_is_vipt_aliasing()) panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); else if (!dcache_does_alias && cache_is_vipt_aliasing()) @@ -239,11 +261,9 @@ static inline void wait_for_flush(void) */ static inline void __dc_entire_op(const int cacheop) { - unsigned long flags, tmp = tmp; + unsigned int tmp = tmp; int aux; - local_irq_save(flags); - if (cacheop == OP_FLUSH_N_INV) { /* Dcache provides 2 cmd: FLUSH or INV * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE @@ -267,8 +287,6 @@ static inline void __dc_entire_op(const int cacheop) /* Switch back the DISCARD ONLY Invalidate mode */ if (cacheop == OP_FLUSH_N_INV) write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH); - - local_irq_restore(flags); } /* @@ -459,8 +477,15 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, local_irq_restore(flags); } +static inline void __ic_entire_inv(void) +{ + write_aux_reg(ARC_REG_IC_IVIC, 1); + read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ +} + #else +#define __ic_entire_inv() #define __ic_line_inv_vaddr(pstart, vstart, sz) #endif /* CONFIG_ARC_HAS_ICACHE */ @@ -487,7 +512,7 @@ void flush_dcache_page(struct page *page) struct address_space *mapping; if (!cache_is_vipt_aliasing()) { - set_bit(PG_arch_1, &page->flags); + clear_bit(PG_dc_clean, &page->flags); return; } @@ -501,7 +526,7 @@ void flush_dcache_page(struct page *page) * Make a note that K-mapping is dirty */ if (!mapping_mapped(mapping)) { - set_bit(PG_arch_1, &page->flags); + clear_bit(PG_dc_clean, &page->flags); } else if (page_mapped(page)) { /* kernel reading from page with U-mapping */ @@ -629,26 +654,13 @@ void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr) __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); } -void flush_icache_all(void) -{ - unsigned long flags; - - local_irq_save(flags); - - write_aux_reg(ARC_REG_IC_IVIC, 1); - - /* lr will not complete till the icache inv operation is not over */ - read_aux_reg(ARC_REG_IC_CTRL); - local_irq_restore(flags); -} - noinline void flush_cache_all(void) { unsigned long flags; local_irq_save(flags); - flush_icache_all(); + __ic_entire_inv(); __dc_entire_op(OP_FLUSH_N_INV); local_irq_restore(flags); @@ -667,7 +679,12 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, { unsigned int paddr = pfn << PAGE_SHIFT; - __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE); + u_vaddr &= PAGE_MASK; + + ___flush_dcache_page(paddr, u_vaddr); + + if (vma->vm_flags & VM_EXEC) + __inv_icache_page(paddr, u_vaddr); } void flush_cache_range(struct vm_area_struct *vma, unsigned long start, @@ -717,7 +734,7 @@ void copy_user_highpage(struct page *to, struct page *from, * non copied user pages (e.g. read faults which wire in pagecache page * directly). */ - set_bit(PG_arch_1, &to->flags); + clear_bit(PG_dc_clean, &to->flags); /* * if SRC was already usermapped and non-congruent to kernel mapping @@ -725,15 +742,16 @@ void copy_user_highpage(struct page *to, struct page *from, */ if (clean_src_k_mappings) { __flush_dcache_page(kfrom, kfrom); + set_bit(PG_dc_clean, &from->flags); } else { - set_bit(PG_arch_1, &from->flags); + clear_bit(PG_dc_clean, &from->flags); } } void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) { clear_page(to); - set_bit(PG_arch_1, &page->flags); + clear_bit(PG_dc_clean, &page->flags); } diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 689ffd86d5e9..318164cabdfc 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -15,6 +15,7 @@ #include <linux/uaccess.h> #include <linux/kdebug.h> #include <asm/pgalloc.h> +#include <asm/mmu.h> static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) { @@ -51,14 +52,14 @@ bad_area: return 1; } -void do_page_fault(struct pt_regs *regs, int write, unsigned long address, - unsigned long cause_code) +void do_page_fault(struct pt_regs *regs, unsigned long address) { struct vm_area_struct *vma = NULL; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; siginfo_t info; int fault, ret; + int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | (write ? FAULT_FLAG_WRITE : 0); @@ -109,7 +110,8 @@ good_area: /* Handle protection violation, execute on heap or stack */ - if (cause_code == ((ECR_V_PROTV << 16) | ECR_C_PROTV_INST_FETCH)) + if ((regs->ecr_vec == ECR_V_PROTV) && + (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) goto bad_area; if (write) { @@ -176,7 +178,6 @@ bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { tsk->thread.fault_address = address; - tsk->thread.cause_code = cause_code; info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ @@ -197,7 +198,7 @@ no_context: if (fixup_exception(regs)) return; - die("Oops", regs, address, cause_code); + die("Oops", regs, address); out_of_memory: if (is_global_init(tsk)) { @@ -218,7 +219,6 @@ do_sigbus: goto no_context; tsk->thread.fault_address = address; - tsk->thread.cause_code = cause_code; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index fe1c5a073afe..7957dc4e4d4a 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -55,7 +55,7 @@ #include <asm/arcregs.h> #include <asm/setup.h> #include <asm/mmu_context.h> -#include <asm/tlb.h> +#include <asm/mmu.h> /* Need for ARC MMU v2 * @@ -97,6 +97,7 @@ * J-TLB entry got evicted/replaced. */ + /* A copy of the ASID from the PID reg is kept in asid_cache */ int asid_cache = FIRST_ASID; @@ -432,9 +433,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, { unsigned long vaddr = vaddr_unaligned & PAGE_MASK; unsigned long paddr = pte_val(*ptep) & PAGE_MASK; + struct page *page = pfn_to_page(pte_pfn(*ptep)); create_tlb(vma, vaddr, ptep); + if (page == ZERO_PAGE(0)) { + return; + } + /* * Exec page : Independent of aliasing/page-color considerations, * since icache doesn't snoop dcache on ARC, any dirty @@ -446,9 +452,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, */ if ((vma->vm_flags & VM_EXEC) || addr_not_cache_congruent(paddr, vaddr)) { - struct page *page = pfn_to_page(pte_pfn(*ptep)); - int dirty = test_and_clear_bit(PG_arch_1, &page->flags); + int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); if (dirty) { /* wback + inv dcache lines */ __flush_dcache_page(paddr, paddr); @@ -464,12 +469,27 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, * the cpuinfo structure for later use. * No Validation is done here, simply read/convert the BCRs */ -void __cpuinit read_decode_mmu_bcr(void) +void read_decode_mmu_bcr(void) { - unsigned int tmp; - struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */ - struct bcr_mmu_3 *mmu3; /* encoded MMU3 attr */ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; + unsigned int tmp; + struct bcr_mmu_1_2 { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8; +#else + unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8; +#endif + } *mmu2; + + struct bcr_mmu_3 { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4, + u_itlb:4, u_dtlb:4; +#else + unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4, + ways:4, ver:8; +#endif + } *mmu3; tmp = read_aux_reg(ARC_REG_MMU_BCR); mmu->ver = (tmp >> 24); @@ -505,12 +525,12 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n", p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, p_mmu->u_dtlb, p_mmu->u_itlb, - __CONFIG_ARC_MMU_SASID_VAL ? "SASID" : ""); + IS_ENABLED(CONFIG_ARC_MMU_SASID) ? "SASID" : ""); return buf; } -void __cpuinit arc_mmu_init(void) +void arc_mmu_init(void) { char str[256]; struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index 3357d26ffe54..5c5bb23001b0 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S @@ -39,7 +39,7 @@ #include <linux/linkage.h> #include <asm/entry.h> -#include <asm/tlb.h> +#include <asm/mmu.h> #include <asm/pgtable.h> #include <asm/arcregs.h> #include <asm/cache.h> @@ -147,9 +147,9 @@ ex_saved_reg1: #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT and.f 0, r0, _PAGE_PRESENT bz 1f - ld r2, [num_pte_not_present] - add r2, r2, 1 - st r2, [num_pte_not_present] + ld r3, [num_pte_not_present] + add r3, r3, 1 + st r3, [num_pte_not_present] 1: #endif @@ -271,22 +271,22 @@ ARC_ENTRY EV_TLBMissI #endif ;---------------------------------------------------------------- - ; Get the PTE corresponding to V-addr accessed + ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA LOAD_FAULT_PTE ;---------------------------------------------------------------- ; VERIFY_PTE: Check if PTE permissions approp for executing code cmp_s r2, VMALLOC_START - mov.lo r2, (_PAGE_PRESENT | _PAGE_U_READ | _PAGE_U_EXECUTE) - mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE) + mov.lo r2, (_PAGE_PRESENT | _PAGE_U_EXECUTE) + mov.hs r2, (_PAGE_PRESENT | _PAGE_K_EXECUTE) and r3, r0, r2 ; Mask out NON Flag bits from PTE xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) bnz do_slow_path_pf ; Let Linux VM know that the page was accessed - or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; set Accessed Bit - st_s r0, [r1] ; Write back PTE + or r0, r0, _PAGE_ACCESSED ; set Accessed Bit + st_s r0, [r1] ; Write back PTE CONV_PTE_TO_TLB COMMIT_ENTRY_TO_MMU @@ -311,7 +311,7 @@ ARC_ENTRY EV_TLBMissD ;---------------------------------------------------------------- ; Get the PTE corresponding to V-addr accessed - ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE + ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA LOAD_FAULT_PTE ;---------------------------------------------------------------- @@ -345,7 +345,7 @@ ARC_ENTRY EV_TLBMissD ;---------------------------------------------------------------- ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty lr r3, [ecr] - or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; Accessed bit always + or r0, r0, _PAGE_ACCESSED ; Accessed bit always btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ? or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well st_s r0, [r1] ; Write back PTE @@ -381,18 +381,7 @@ do_slow_path_pf: ; ------- setup args for Linux Page fault Hanlder --------- mov_s r0, sp - lr r2, [efa] - lr r3, [ecr] - - ; Both st and ex imply WRITE access of some sort, hence do_page_fault( ) - ; invoked with write=1 for DTLB-st/ex Miss and write=0 for ITLB miss or - ; DTLB-ld Miss - ; DTLB Miss Cause code is ld = 0x01 , st = 0x02, ex = 0x03 - ; Following code uses that fact that st/ex have one bit in common - - btst_s r3, ECR_C_BIT_DTLB_ST_MISS - mov.z r1, 0 - mov.nz r1, 1 + lr r1, [efa] ; We don't want exceptions to be disabled while the fault is handled. ; Now that we have saved the context we return from exception hence diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c index b3700c064c06..d71f3c3bcf24 100644 --- a/arch/arc/plat-arcfpga/platform.c +++ b/arch/arc/plat-arcfpga/platform.c @@ -77,6 +77,7 @@ static void __init setup_bvci_lat_unit(void) /*----------------------- Platform Devices -----------------------------*/ +#if IS_ENABLED(CONFIG_SERIAL_ARC) static unsigned long arc_uart_info[] = { 0, /* uart->is_emulated (runtime @running_on_hw) */ 0, /* uart->port.uartclk */ @@ -115,7 +116,7 @@ static struct platform_device arc_uart0_dev = { static struct platform_device *fpga_early_devs[] __initdata = { &arc_uart0_dev, }; -#endif +#endif /* CONFIG_SERIAL_ARC_CONSOLE */ static void arc_fpga_serial_init(void) { @@ -152,8 +153,13 @@ static void arc_fpga_serial_init(void) * otherwise the early console never gets a chance to run. */ add_preferred_console("ttyARC", 0, "115200"); -#endif +#endif /* CONFIG_SERIAL_ARC_CONSOLE */ +} +#else /* !IS_ENABLED(CONFIG_SERIAL_ARC) */ +static void arc_fpga_serial_init(void) +{ } +#endif static void __init plat_fpga_early_init(void) { @@ -169,7 +175,7 @@ static void __init plat_fpga_early_init(void) } static struct of_dev_auxdata plat_auxdata_lookup[] __initdata = { -#if defined(CONFIG_SERIAL_ARC) || defined(CONFIG_SERIAL_ARC_MODULE) +#if IS_ENABLED(CONFIG_SERIAL_ARC) OF_DEV_AUXDATA("snps,arc-uart", UART0_BASE, "arc-uart", arc_uart_info), #endif {} |