diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/Makefile | 18 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 262 | ||||
-rw-r--r-- | arch/powerpc/kernel/fpu.S | 133 | ||||
-rw-r--r-- | arch/powerpc/kernel/head.S | 1545 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_44x.S | 778 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_4xx.S | 1016 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 2011 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_8xx.S | 860 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_fsl_booke.S | 1058 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle_6xx.S | 233 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 724 | ||||
-rw-r--r-- | arch/powerpc/kernel/semaphore.c | 135 | ||||
-rw-r--r-- | arch/powerpc/kernel/traps.c | 1047 | ||||
-rw-r--r-- | arch/powerpc/kernel/vector.S | 197 | ||||
-rw-r--r-- | arch/powerpc/kernel/vmlinux.lds | 174 | ||||
-rw-r--r-- | arch/powerpc/kernel/vmlinux.lds.S | 172 |
16 files changed, 10363 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile new file mode 100644 index 000000000000..62c4a51a23d7 --- /dev/null +++ b/arch/powerpc/kernel/Makefile @@ -0,0 +1,18 @@ +# +# Makefile for the linux kernel. +# + +extra-$(CONFIG_PPC_STD_MMU) := head.o +extra_$(CONFIG_PPC64) := head_64.o +extra-$(CONFIG_40x) := head_4xx.o +extra-$(CONFIG_44x) := head_44x.o +extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o +extra-$(CONFIG_8xx) := head_8xx.o +extra-$(CONFIG_6xx) += idle_6xx.o +extra-$(CONFIG_POWER4) += idle_power4.o +extra-$(CONFIG_PPC_FPU) += fpu.o +extra-y += vmlinux.lds + +obj-y := semaphore.o traps.o process.o + +obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c new file mode 100644 index 000000000000..16cf0b7ee2b7 --- /dev/null +++ b/arch/powerpc/kernel/asm-offsets.c @@ -0,0 +1,262 @@ +/* + * This program is used to generate definitions needed by + * assembly language modules. + * + * We use the technique used in the OSF Mach kernel code: + * generate asm statements containing #defines, + * compile this file to assembler, and then extract the + * #defines from the assembly-language output. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/config.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/suspend.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/time.h> +#include <linux/hardirq.h> +#include <asm/io.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/processor.h> + +#include <asm/cputable.h> +#include <asm/thread_info.h> +#ifdef CONFIG_PPC64 +#include <asm/paca.h> +#include <asm/lppaca.h> +#include <asm/iSeries/HvLpEvent.h> +#include <asm/rtas.h> +#include <asm/cache.h> +#include <asm/systemcfg.h> +#include <asm/compat.h> +#endif + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +#define BLANK() asm volatile("\n->" : : ) + +int main(void) +{ + /* thread struct on stack */ + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); +#ifdef CONFIG_PPC32 + DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); +#endif +#ifdef CONFIG_PPC64 + DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror)); + DEFINE(THREAD_SHIFT, THREAD_SHIFT); +#endif + DEFINE(THREAD_SIZE, THREAD_SIZE); + + /* task_struct->thread */ + DEFINE(THREAD, offsetof(struct task_struct, thread)); + DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); + DEFINE(MM, offsetof(struct task_struct, mm)); + DEFINE(PTRACE, offsetof(struct task_struct, ptrace)); + DEFINE(KSP, offsetof(struct thread_struct, ksp)); + DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); + DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall)); + DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); + DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); + DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); + DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); +#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) + DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); + DEFINE(PT_PTRACED, PT_PTRACED); +#endif +#ifdef CONFIG_PPC64 + DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid)); +#endif + +#ifdef CONFIG_ALTIVEC + DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0])); + DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); + DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr)); + DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); +#endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_SPE + DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); + DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc)); + DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr)); + DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); +#endif /* CONFIG_SPE */ + /* Interrupt register frame */ + DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); +#ifndef CONFIG_PPC64 + DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); +#else + DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); + + /* 288 = # of volatile regs, int & fp, for leaf routines */ + /* which do not stack a frame. See the PPC64 ABI. */ + DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288); +#endif + /* in fact we only use gpr0 - gpr9 and gpr20 - gpr23 */ + DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0])); + DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1])); + DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2])); + DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3])); + DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4])); + DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5])); + DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6])); + DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7])); + DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8])); + DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9])); + DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10])); + DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11])); + DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12])); + DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13])); + DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14])); + DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15])); + DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16])); + DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17])); + DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18])); + DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19])); + DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20])); + DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21])); + DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22])); + DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23])); + DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24])); + DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25])); + DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26])); + DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27])); + DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28])); + DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29])); + DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30])); + DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31])); + /* + * Note: these symbols include _ because they overlap with special + * register names + */ + DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip)); + DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr)); + DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr)); + DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link)); + DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr)); + DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq)); + DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer)); + DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); + DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); + /* The PowerPC 400-class & Book-E processors have neither the DAR nor the DSISR + * SPRs. Hence, we overload them to hold the similar DEAR and ESR SPRs + * for such processors. For critical interrupts we use them to + * hold SRR0 and SRR1. + */ + DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); + DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); + DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3)); + DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result)); + DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); + DEFINE(CLONE_VM, CLONE_VM); + DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); + DEFINE(MM_PGD, offsetof(struct mm_struct, pgd)); + + /* About the CPU features table */ + DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec)); + DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask)); + DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value)); + DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); + DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); + +#ifdef CONFIG_PPC64 + DEFINE(MM, offsetof(struct task_struct, mm)); + DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); + + DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size)); + DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size)); + DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page)); + DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size)); + DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); + DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); + DEFINE(PLATFORM, offsetof(struct systemcfg, platform)); + + /* paca */ + DEFINE(PACA_SIZE, sizeof(struct paca_struct)); + DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); + DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); + DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); + DEFINE(PACACURRENT, offsetof(struct paca_struct, __current)); + DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr)); + DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real)); + DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr)); + DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); + DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); + DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); + DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled)); + DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); + DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); + DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); +#ifdef CONFIG_HUGETLB_PAGE + DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); + DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); +#endif /* CONFIG_HUGETLB_PAGE */ + DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr)); + DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); + DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); + DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); + DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi)); + DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); + DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca)); + DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); + DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); + DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); + DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); + DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); + + /* RTAS */ + DEFINE(RTASBASE, offsetof(struct rtas_t, base)); + DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); + + DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); + DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe)); + + /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */ + DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); + DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); + + /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */ + DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)); + DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); + + /* systemcfg offsets for use by vdso */ + DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp)); + DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec)); + DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs)); + DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec)); + DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count)); + DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest)); + DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime)); + DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32)); + DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64)); + + /* timeval/timezone offsets for use by vdso */ + DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec)); + DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec)); + DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec)); + DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec)); + DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); + DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); +#endif + + DEFINE(pbe_address, offsetof(struct pbe, address)); + DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); + DEFINE(pbe_next, offsetof(struct pbe, next)); + + DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); + return 0; +} diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S new file mode 100644 index 000000000000..665d7d34304c --- /dev/null +++ b/arch/powerpc/kernel/fpu.S @@ -0,0 +1,133 @@ +/* + * FPU support code, moved here from head.S so that it can be used + * by chips which use other head-whatever.S files. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/config.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/pgtable.h> +#include <asm/cputable.h> +#include <asm/cache.h> +#include <asm/thread_info.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> + +/* + * This task wants to use the FPU now. + * On UP, disable FP for the task which had the FPU previously, + * and save its floating-point registers in its thread_struct. + * Load up this task's FP registers from its thread_struct, + * enable the FPU for the current task and return to the task. + */ + .globl load_up_fpu +load_up_fpu: + mfmsr r5 + ori r5,r5,MSR_FP +#ifdef CONFIG_PPC64BRIDGE + clrldi r5,r5,1 /* turn off 64-bit mode */ +#endif /* CONFIG_PPC64BRIDGE */ + SYNC + MTMSRD(r5) /* enable use of fpu now */ + isync +/* + * For SMP, we don't do lazy FPU switching because it just gets too + * horrendously complex, especially when a task switches from one CPU + * to another. Instead we call giveup_fpu in switch_to. + */ +#ifndef CONFIG_SMP + tophys(r6,0) /* get __pa constant */ + addis r3,r6,last_task_used_math@ha + lwz r4,last_task_used_math@l(r3) + cmpwi 0,r4,0 + beq 1f + add r4,r4,r6 + addi r4,r4,THREAD /* want last_task_used_math->thread */ + SAVE_32FPRS(0, r4) + mffs fr0 + stfd fr0,THREAD_FPSCR-4(r4) + lwz r5,PT_REGS(r4) + add r5,r5,r6 + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + li r10,MSR_FP|MSR_FE0|MSR_FE1 + andc r4,r4,r10 /* disable FP for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + /* enable use of FP after return */ + mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ + lwz r4,THREAD_FPEXC_MODE(r5) + ori r9,r9,MSR_FP /* enable FP for current */ + or r9,r9,r4 + lfd fr0,THREAD_FPSCR-4(r5) + mtfsf 0xff,fr0 + REST_32FPRS(0, r5) +#ifndef CONFIG_SMP + subi r4,r5,THREAD + sub r4,r4,r6 + stw r4,last_task_used_math@l(r3) +#endif /* CONFIG_SMP */ + /* restore registers and return */ + /* we haven't used ctr or xer or lr */ + b fast_exception_return + +/* + * FP unavailable trap from kernel - print a message, but let + * the task use FP in the kernel until it returns to user mode. + */ + .globl KernelFP +KernelFP: + lwz r3,_MSR(r1) + ori r3,r3,MSR_FP + stw r3,_MSR(r1) /* enable use of FP after return */ + lis r3,86f@h + ori r3,r3,86f@l + mr r4,r2 /* current */ + lwz r5,_NIP(r1) + bl printk + b ret_from_except +86: .string "floating point used in kernel (task=%p, pc=%x)\n" + .align 4,0 + +/* + * giveup_fpu(tsk) + * Disable FP for the task given as the argument, + * and save the floating-point registers in its thread_struct. + * Enables the FPU for use in the kernel on return. + */ + .globl giveup_fpu +giveup_fpu: + mfmsr r5 + ori r5,r5,MSR_FP + SYNC_601 + ISYNC_601 + MTMSRD(r5) /* enable use of fpu now */ + SYNC_601 + isync + cmpwi 0,r3,0 + beqlr- /* if no previous owner, done */ + addi r3,r3,THREAD /* want THREAD of task */ + lwz r5,PT_REGS(r3) + cmpwi 0,r5,0 + SAVE_32FPRS(0, r3) + mffs fr0 + stfd fr0,THREAD_FPSCR-4(r3) + beq 1f + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + li r3,MSR_FP|MSR_FE0|MSR_FE1 + andc r4,r4,r3 /* disable FP for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#ifndef CONFIG_SMP + li r5,0 + lis r4,last_task_used_math@ha + stw r5,last_task_used_math@l(r4) +#endif /* CONFIG_SMP */ + blr diff --git a/arch/powerpc/kernel/head.S b/arch/powerpc/kernel/head.S new file mode 100644 index 000000000000..d05509f197d0 --- /dev/null +++ b/arch/powerpc/kernel/head.S @@ -0,0 +1,1545 @@ +/* + * PowerPC version + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP + * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> + * Adapted for Power Macintosh by Paul Mackerras. + * Low-level exception handlers and MMU support + * rewritten by Paul Mackerras. + * Copyright (C) 1996 Paul Mackerras. + * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). + * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). + * + * This file contains the low-level support and setup for the + * PowerPC platform, including trap and interrupt dispatch. + * (The PPC 8xx embedded CPUs use head_8xx.S instead.) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/config.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/pgtable.h> +#include <asm/cputable.h> +#include <asm/cache.h> +#include <asm/thread_info.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> + +#ifdef CONFIG_APUS +#include <asm/amigappc.h> +#endif + +#ifdef CONFIG_PPC64BRIDGE +#define LOAD_BAT(n, reg, RA, RB) \ + ld RA,(n*32)+0(reg); \ + ld RB,(n*32)+8(reg); \ + mtspr SPRN_IBAT##n##U,RA; \ + mtspr SPRN_IBAT##n##L,RB; \ + ld RA,(n*32)+16(reg); \ + ld RB,(n*32)+24(reg); \ + mtspr SPRN_DBAT##n##U,RA; \ + mtspr SPRN_DBAT##n##L,RB; \ + +#else /* CONFIG_PPC64BRIDGE */ + +/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ +#define LOAD_BAT(n, reg, RA, RB) \ + /* see the comment for clear_bats() -- Cort */ \ + li RA,0; \ + mtspr SPRN_IBAT##n##U,RA; \ + mtspr SPRN_DBAT##n##U,RA; \ + lwz RA,(n*16)+0(reg); \ + lwz RB,(n*16)+4(reg); \ + mtspr SPRN_IBAT##n##U,RA; \ + mtspr SPRN_IBAT##n##L,RB; \ + beq 1f; \ + lwz RA,(n*16)+8(reg); \ + lwz RB,(n*16)+12(reg); \ + mtspr SPRN_DBAT##n##U,RA; \ + mtspr SPRN_DBAT##n##L,RB; \ +1: +#endif /* CONFIG_PPC64BRIDGE */ + + .text + .stabs "arch/ppc/kernel/",N_SO,0,0,0f + .stabs "head.S",N_SO,0,0,0f +0: + .globl _stext +_stext: + +/* + * _start is defined this way because the XCOFF loader in the OpenFirmware + * on the powermac expects the entry point to be a procedure descriptor. + */ + .text + .globl _start +_start: + /* + * These are here for legacy reasons, the kernel used to + * need to look like a coff function entry for the pmac + * but we're always started by some kind of bootloader now. + * -- Cort + */ + nop /* used by __secondary_hold on prep (mtx) and chrp smp */ + nop /* used by __secondary_hold on prep (mtx) and chrp smp */ + nop + +/* PMAC + * Enter here with the kernel text, data and bss loaded starting at + * 0, running with virtual == physical mapping. + * r5 points to the prom entry point (the client interface handler + * address). Address translation is turned on, with the prom + * managing the hash table. Interrupts are disabled. The stack + * pointer (r1) points to just below the end of the half-meg region + * from 0x380000 - 0x400000, which is mapped in already. + * + * If we are booted from MacOS via BootX, we enter with the kernel + * image loaded somewhere, and the following values in registers: + * r3: 'BooX' (0x426f6f58) + * r4: virtual address of boot_infos_t + * r5: 0 + * + * APUS + * r3: 'APUS' + * r4: physical address of memory base + * Linux/m68k style BootInfo structure at &_end. + * + * PREP + * This is jumped to on prep systems right after the kernel is relocated + * to its proper place in memory by the boot loader. The expected layout + * of the regs is: + * r3: ptr to residual data + * r4: initrd_start or if no initrd then 0 + * r5: initrd_end - unused if r4 is 0 + * r6: Start of command line string + * r7: End of command line string + * + * This just gets a minimal mmu environment setup so we can call + * start_here() to do the real work. + * -- Cort + */ + + .globl __start +__start: +/* + * We have to do any OF calls before we map ourselves to KERNELBASE, + * because OF may have I/O devices mapped into that area + * (particularly on CHRP). + */ + mr r31,r3 /* save parameters */ + mr r30,r4 + mr r29,r5 + mr r28,r6 + mr r27,r7 + li r24,0 /* cpu # */ + +/* + * early_init() does the early machine identification and does + * the necessary low-level setup and clears the BSS + * -- Cort <cort@fsmlabs.com> + */ + bl early_init + +/* + * On POWER4, we first need to tweak some CPU configuration registers + * like real mode cache inhibit or exception base + */ +#ifdef CONFIG_POWER4 + bl __970_cpu_preinit +#endif /* CONFIG_POWER4 */ + +#ifdef CONFIG_APUS +/* On APUS the __va/__pa constants need to be set to the correct + * values before continuing. + */ + mr r4,r30 + bl fix_mem_constants +#endif /* CONFIG_APUS */ + +/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains + * the physical address we are running at, returned by early_init() + */ + bl mmu_off +__after_mmu_off: +#ifndef CONFIG_POWER4 + bl clear_bats + bl flush_tlbs + + bl initial_bats +#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) + bl setup_disp_bat +#endif +#else /* CONFIG_POWER4 */ + bl reloc_offset + bl initial_mm_power4 +#endif /* CONFIG_POWER4 */ + +/* + * Call setup_cpu for CPU 0 and initialize 6xx Idle + */ + bl reloc_offset + li r24,0 /* cpu# */ + bl call_setup_cpu /* Call setup_cpu for this CPU */ +#ifdef CONFIG_6xx + bl reloc_offset + bl init_idle_6xx +#endif /* CONFIG_6xx */ +#ifdef CONFIG_POWER4 + bl reloc_offset + bl init_idle_power4 +#endif /* CONFIG_POWER4 */ + + +#ifndef CONFIG_APUS +/* + * We need to run with _start at physical address 0. + * On CHRP, we are loaded at 0x10000 since OF on CHRP uses + * the exception vectors at 0 (and therefore this copy + * overwrites OF's exception vectors with our own). + * If the MMU is already turned on, we copy stuff to KERNELBASE, + * otherwise we copy it to 0. + */ + bl reloc_offset + mr r26,r3 + addis r4,r3,KERNELBASE@h /* current address of _start */ + cmpwi 0,r4,0 /* are we already running at 0? */ + bne relocate_kernel +#endif /* CONFIG_APUS */ +/* + * we now have the 1st 16M of ram mapped with the bats. + * prep needs the mmu to be turned on here, but pmac already has it on. + * this shouldn't bother the pmac since it just gets turned on again + * as we jump to our code at KERNELBASE. -- Cort + * Actually no, pmac doesn't have it on any more. BootX enters with MMU + * off, and in other cases, we now turn it off before changing BATs above. + */ +turn_on_mmu: + mfmsr r0 + ori r0,r0,MSR_DR|MSR_IR + mtspr SPRN_SRR1,r0 + lis r0,start_here@h + ori r0,r0,start_here@l + mtspr SPRN_SRR0,r0 + SYNC + RFI /* enables MMU */ + +/* + * We need __secondary_hold as a place to hold the other cpus on + * an SMP machine, even when we are running a UP kernel. + */ + . = 0xc0 /* for prep bootloader */ + li r3,1 /* MTX only has 1 cpu */ + .globl __secondary_hold +__secondary_hold: + /* tell the master we're here */ + stw r3,4(0) +#ifdef CONFIG_SMP +100: lwz r4,0(0) + /* wait until we're told to start */ + cmpw 0,r4,r3 + bne 100b + /* our cpu # was at addr 0 - go */ + mr r24,r3 /* cpu # */ + b __secondary_start +#else + b . +#endif /* CONFIG_SMP */ + +/* + * Exception entry code. This code runs with address translation + * turned off, i.e. using physical addresses. + * We assume sprg3 has the physical address of the current + * task's thread_struct. + */ +#define EXCEPTION_PROLOG \ + mtspr SPRN_SPRG0,r10; \ + mtspr SPRN_SPRG1,r11; \ + mfcr r10; \ + EXCEPTION_PROLOG_1; \ + EXCEPTION_PROLOG_2 + +#define EXCEPTION_PROLOG_1 \ + mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ + andi. r11,r11,MSR_PR; \ + tophys(r11,r1); /* use tophys(r1) if kernel */ \ + beq 1f; \ + mfspr r11,SPRN_SPRG3; \ + lwz r11,THREAD_INFO-THREAD(r11); \ + addi r11,r11,THREAD_SIZE; \ + tophys(r11,r11); \ +1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ + + +#define EXCEPTION_PROLOG_2 \ + CLR_TOP32(r11); \ + stw r10,_CCR(r11); /* save registers */ \ + stw r12,GPR12(r11); \ + stw r9,GPR9(r11); \ + mfspr r10,SPRN_SPRG0; \ + stw r10,GPR10(r11); \ + mfspr r12,SPRN_SPRG1; \ + stw r12,GPR11(r11); \ + mflr r10; \ + stw r10,_LINK(r11); \ + mfspr r12,SPRN_SRR0; \ + mfspr r9,SPRN_SRR1; \ + stw r1,GPR1(r11); \ + stw r1,0(r11); \ + tovirt(r1,r11); /* set new kernel sp */ \ + li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ + MTMSRD(r10); /* (except for mach check in rtas) */ \ + stw r0,GPR0(r11); \ + SAVE_4GPRS(3, r11); \ + SAVE_2GPRS(7, r11) + +/* + * Note: code which follows this uses cr0.eq (set if from kernel), + * r11, r12 (SRR0), and r9 (SRR1). + * + * Note2: once we have set r1 we are in a position to take exceptions + * again, and we could thus set MSR:RI at that point. + */ + +/* + * Exception vectors. + */ +#define EXCEPTION(n, label, hdlr, xfer) \ + . = n; \ +label: \ + EXCEPTION_PROLOG; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + xfer(n, hdlr) + +#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ + li r10,trap; \ + stw r10,TRAP(r11); \ + li r10,MSR_KERNEL; \ + copyee(r10, r9); \ + bl tfer; \ +i##n: \ + .long hdlr; \ + .long ret + +#define COPY_EE(d, s) rlwimi d,s,0,16,16 +#define NOCOPY(d, s) + +#define EXC_XFER_STD(n, hdlr) \ + EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ + ret_from_except) + +#define EXC_XFER_EE(n, hdlr) \ + EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_EE_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ + ret_from_except) + +/* System reset */ +/* core99 pmac starts the seconary here by changing the vector, and + putting it back to what it was (UnknownException) when done. */ +#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP) + . = 0x100 + b __secondary_start_gemini +#else + EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) +#endif + +/* Machine check */ +/* + * On CHRP, this is complicated by the fact that we could get a + * machine check inside RTAS, and we have no guarantee that certain + * critical registers will have the values we expect. The set of + * registers that might have bad values includes all the GPRs + * and all the BATs. We indicate that we are in RTAS by putting + * a non-zero value, the address of the exception frame to use, + * in SPRG2. The machine check handler checks SPRG2 and uses its + * value if it is non-zero. If we ever needed to free up SPRG2, + * we could use a field in the thread_info or thread_struct instead. + * (Other exception handlers assume that r1 is a valid kernel stack + * pointer when we take an exception from supervisor mode.) + * -- paulus. + */ + . = 0x200 + mtspr SPRN_SPRG0,r10 + mtspr SPRN_SPRG1,r11 + mfcr r10 +#ifdef CONFIG_PPC_CHRP + mfspr r11,SPRN_SPRG2 + cmpwi 0,r11,0 + bne 7f +#endif /* CONFIG_PPC_CHRP */ + EXCEPTION_PROLOG_1 +7: EXCEPTION_PROLOG_2 + addi r3,r1,STACK_FRAME_OVERHEAD +#ifdef CONFIG_PPC_CHRP + mfspr r4,SPRN_SPRG2 + cmpwi cr1,r4,0 + bne cr1,1f +#endif + EXC_XFER_STD(0x200, MachineCheckException) +#ifdef CONFIG_PPC_CHRP +1: b machine_check_in_rtas +#endif + +/* Data access exception. */ + . = 0x300 +#ifdef CONFIG_PPC64BRIDGE + b DataAccess +DataAccessCont: +#else +DataAccess: + EXCEPTION_PROLOG +#endif /* CONFIG_PPC64BRIDGE */ + mfspr r10,SPRN_DSISR + andis. r0,r10,0xa470 /* weird error? */ + bne 1f /* if not, try to put a PTE */ + mfspr r4,SPRN_DAR /* into the hash table */ + rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ + bl hash_page +1: stw r10,_DSISR(r11) + mr r5,r10 + mfspr r4,SPRN_DAR + EXC_XFER_EE_LITE(0x300, handle_page_fault) + +#ifdef CONFIG_PPC64BRIDGE +/* SLB fault on data access. */ + . = 0x380 + b DataSegment +#endif /* CONFIG_PPC64BRIDGE */ + +/* Instruction access exception. */ + . = 0x400 +#ifdef CONFIG_PPC64BRIDGE + b InstructionAccess +InstructionAccessCont: +#else +InstructionAccess: + EXCEPTION_PROLOG +#endif /* CONFIG_PPC64BRIDGE */ + andis. r0,r9,0x4000 /* no pte found? */ + beq 1f /* if so, try to put a PTE */ + li r3,0 /* into the hash table */ + mr r4,r12 /* SRR0 is fault address */ + bl hash_page +1: mr r4,r12 + mr r5,r9 + EXC_XFER_EE_LITE(0x400, handle_page_fault) + +#ifdef CONFIG_PPC64BRIDGE +/* SLB fault on instruction access. */ + . = 0x480 + b InstructionSegment +#endif /* CONFIG_PPC64BRIDGE */ + +/* External interrupt */ + EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) + +/* Alignment exception */ + . = 0x600 +Alignment: + EXCEPTION_PROLOG + mfspr r4,SPRN_DAR + stw r4,_DAR(r11) + mfspr r5,SPRN_DSISR + stw r5,_DSISR(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE(0x600, AlignmentException) + +/* Program check exception */ + EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) + +/* Floating-point unavailable */ + . = 0x800 +FPUnavailable: + EXCEPTION_PROLOG + bne load_up_fpu /* if from user, just load it up */ + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE_LITE(0x800, KernelFP) + +/* Decrementer */ + EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) + + EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) + EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) + +/* System call */ + . = 0xc00 +SystemCall: + EXCEPTION_PROLOG + EXC_XFER_EE_LITE(0xc00, DoSyscall) + +/* Single step - not used on 601 */ + EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) + EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) + +/* + * The Altivec unavailable trap is at 0x0f20. Foo. + * We effectively remap it to 0x3000. + * We include an altivec unavailable exception vector even if + * not configured for Altivec, so that you can't panic a + * non-altivec kernel running on a machine with altivec just + * by executing an altivec instruction. + */ + . = 0xf00 + b Trap_0f + + . = 0xf20 + b AltiVecUnavailable + +Trap_0f: + EXCEPTION_PROLOG + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE(0xf00, UnknownException) + +/* + * Handle TLB miss for instruction on 603/603e. + * Note: we get an alternate set of r0 - r3 to use automatically. + */ + . = 0x1000 +InstructionTLBMiss: +/* + * r0: stored ctr + * r1: linux style pte ( later becomes ppc hardware pte ) + * r2: ptr to linux-style pte + * r3: scratch + */ + mfctr r0 + /* Get PTE (linux-style) and check access */ + mfspr r3,SPRN_IMISS + lis r1,KERNELBASE@h /* check if kernel address */ + cmplw 0,r3,r1 + mfspr r2,SPRN_SPRG3 + li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ + lwz r2,PGDIR(r2) + blt+ 112f + lis r2,swapper_pg_dir@ha /* if kernel address, use */ + addi r2,r2,swapper_pg_dir@l /* kernel page table */ + mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ + rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ +112: tophys(r2,r2) + rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r2) /* get pmd entry */ + rlwinm. r2,r2,0,0,19 /* extract address of pte page */ + beq- InstructionAddressInvalid /* return if no mapping */ + rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ + lwz r3,0(r2) /* get linux-style pte */ + andc. r1,r1,r3 /* check access & ~permission */ + bne- InstructionAddressInvalid /* return if access not permitted */ + ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ + /* + * NOTE! We are assuming this is not an SMP system, otherwise + * we would need to update the pte atomically with lwarx/stwcx. + */ + stw r3,0(r2) /* update PTE (accessed bit) */ + /* Convert linux-style PTE to low word of PPC-style PTE */ + rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ + rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ + and r1,r1,r2 /* writable if _RW and _DIRTY */ + rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ + rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ + ori r1,r1,0xe14 /* clear out reserved bits and M */ + andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ + mtspr SPRN_RPA,r1 + mfspr r3,SPRN_IMISS + tlbli r3 + mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ + mtcrf 0x80,r3 + rfi +InstructionAddressInvalid: + mfspr r3,SPRN_SRR1 + rlwinm r1,r3,9,6,6 /* Get load/store bit */ + + addis r1,r1,0x2000 + mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */ + mtctr r0 /* Restore CTR */ + andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ + or r2,r2,r1 + mtspr SPRN_SRR1,r2 + mfspr r1,SPRN_IMISS /* Get failing address */ + rlwinm. r2,r2,0,31,31 /* Check for little endian access */ + rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ + xor r1,r1,r2 + mtspr SPRN_DAR,r1 /* Set fault address */ + mfmsr r0 /* Restore "normal" registers */ + xoris r0,r0,MSR_TGPR>>16 + mtcrf 0x80,r3 /* Restore CR0 */ + mtmsr r0 + b InstructionAccess + +/* + * Handle TLB miss for DATA Load operation on 603/603e + */ + . = 0x1100 +DataLoadTLBMiss: +/* + * r0: stored ctr + * r1: linux style pte ( later becomes ppc hardware pte ) + * r2: ptr to linux-style pte + * r3: scratch + */ + mfctr r0 + /* Get PTE (linux-style) and check access */ + mfspr r3,SPRN_DMISS + lis r1,KERNELBASE@h /* check if kernel address */ + cmplw 0,r3,r1 + mfspr r2,SPRN_SPRG3 + li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ + lwz r2,PGDIR(r2) + blt+ 112f + lis r2,swapper_pg_dir@ha /* if kernel address, use */ + addi r2,r2,swapper_pg_dir@l /* kernel page table */ + mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ + rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ +112: tophys(r2,r2) + rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r2) /* get pmd entry */ + rlwinm. r2,r2,0,0,19 /* extract address of pte page */ + beq- DataAddressInvalid /* return if no mapping */ + rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ + lwz r3,0(r2) /* get linux-style pte */ + andc. r1,r1,r3 /* check access & ~permission */ + bne- DataAddressInvalid /* return if access not permitted */ + ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ + /* + * NOTE! We are assuming this is not an SMP system, otherwise + * we would need to update the pte atomically with lwarx/stwcx. + */ + stw r3,0(r2) /* update PTE (accessed bit) */ + /* Convert linux-style PTE to low word of PPC-style PTE */ + rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ + rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ + and r1,r1,r2 /* writable if _RW and _DIRTY */ + rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ + rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ + ori r1,r1,0xe14 /* clear out reserved bits and M */ + andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ + mtspr SPRN_RPA,r1 + mfspr r3,SPRN_DMISS + tlbld r3 + mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ + mtcrf 0x80,r3 + rfi +DataAddressInvalid: + mfspr r3,SPRN_SRR1 + rlwinm r1,r3,9,6,6 /* Get load/store bit */ + addis r1,r1,0x2000 + mtspr SPRN_DSISR,r1 + mtctr r0 /* Restore CTR */ + andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ + mtspr SPRN_SRR1,r2 + mfspr r1,SPRN_DMISS /* Get failing address */ + rlwinm. r2,r2,0,31,31 /* Check for little endian access */ + beq 20f /* Jump if big endian */ + xori r1,r1,3 +20: mtspr SPRN_DAR,r1 /* Set fault address */ + mfmsr r0 /* Restore "normal" registers */ + xoris r0,r0,MSR_TGPR>>16 + mtcrf 0x80,r3 /* Restore CR0 */ + mtmsr r0 + b DataAccess + +/* + * Handle TLB miss for DATA Store on 603/603e + */ + . = 0x1200 +DataStoreTLBMiss: +/* + * r0: stored ctr + * r1: linux style pte ( later becomes ppc hardware pte ) + * r2: ptr to linux-style pte + * r3: scratch + */ + mfctr r0 + /* Get PTE (linux-style) and check access */ + mfspr r3,SPRN_DMISS + lis r1,KERNELBASE@h /* check if kernel address */ + cmplw 0,r3,r1 + mfspr r2,SPRN_SPRG3 + li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ + lwz r2,PGDIR(r2) + blt+ 112f + lis r2,swapper_pg_dir@ha /* if kernel address, use */ + addi r2,r2,swapper_pg_dir@l /* kernel page table */ + mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ + rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ +112: tophys(r2,r2) + rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r2) /* get pmd entry */ + rlwinm. r2,r2,0,0,19 /* extract address of pte page */ + beq- DataAddressInvalid /* return if no mapping */ + rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ + lwz r3,0(r2) /* get linux-style pte */ + andc. r1,r1,r3 /* check access & ~permission */ + bne- DataAddressInvalid /* return if access not permitted */ + ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY + /* + * NOTE! We are assuming this is not an SMP system, otherwise + * we would need to update the pte atomically with lwarx/stwcx. + */ + stw r3,0(r2) /* update PTE (accessed/dirty bits) */ + /* Convert linux-style PTE to low word of PPC-style PTE */ + rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ + li r1,0xe15 /* clear out reserved bits and M */ + andc r1,r3,r1 /* PP = user? 2: 0 */ + mtspr SPRN_RPA,r1 + mfspr r3,SPRN_DMISS + tlbld r3 + mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ + mtcrf 0x80,r3 + rfi + +#ifndef CONFIG_ALTIVEC +#define AltivecAssistException UnknownException +#endif + + EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE) + EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) + EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) +#ifdef CONFIG_POWER4 + EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE) + EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD) +#else /* !CONFIG_POWER4 */ + EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE) + EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) + EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) +#endif /* CONFIG_POWER4 */ + EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE) + EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE) + + .globl mol_trampoline + .set mol_trampoline, i0x2f00 + + . = 0x3000 + +AltiVecUnavailable: + EXCEPTION_PROLOG +#ifdef CONFIG_ALTIVEC + bne load_up_altivec /* if from user, just load it up */ +#endif /* CONFIG_ALTIVEC */ + EXC_XFER_EE_LITE(0xf20, AltivecUnavailException) + +#ifdef CONFIG_PPC64BRIDGE +DataAccess: + EXCEPTION_PROLOG + b DataAccessCont + +InstructionAccess: + EXCEPTION_PROLOG + b InstructionAccessCont + +DataSegment: + EXCEPTION_PROLOG + addi r3,r1,STACK_FRAME_OVERHEAD + mfspr r4,SPRN_DAR + stw r4,_DAR(r11) + EXC_XFER_STD(0x380, UnknownException) + +InstructionSegment: + EXCEPTION_PROLOG + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_STD(0x480, UnknownException) +#endif /* CONFIG_PPC64BRIDGE */ + +#ifdef CONFIG_ALTIVEC +/* Note that the AltiVec support is closely modeled after the FP + * support. Changes to one are likely to be applicable to the + * other! */ +load_up_altivec: +/* + * Disable AltiVec for the task which had AltiVec previously, + * and save its AltiVec registers in its thread_struct. + * Enables AltiVec for use in the kernel on return. + * On SMP we know the AltiVec units are free, since we give it up every + * switch. -- Kumar + */ + mfmsr r5 + oris r5,r5,MSR_VEC@h + MTMSRD(r5) /* enable use of AltiVec now */ + isync +/* + * For SMP, we don't do lazy AltiVec switching because it just gets too + * horrendously complex, especially when a task switches from one CPU + * to another. Instead we call giveup_altivec in switch_to. + */ +#ifndef CONFIG_SMP + tophys(r6,0) + addis r3,r6,last_task_used_altivec@ha + lwz r4,last_task_used_altivec@l(r3) + cmpwi 0,r4,0 + beq 1f + add r4,r4,r6 + addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ + SAVE_32VRS(0,r10,r4) + mfvscr vr0 + li r10,THREAD_VSCR + stvx vr0,r10,r4 + lwz r5,PT_REGS(r4) + add r5,r5,r6 + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r10,MSR_VEC@h + andc r4,r4,r10 /* disable altivec for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + /* enable use of AltiVec after return */ + oris r9,r9,MSR_VEC@h + mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ + li r4,1 + li r10,THREAD_VSCR + stw r4,THREAD_USED_VR(r5) + lvx vr0,r10,r5 + mtvscr vr0 + REST_32VRS(0,r10,r5) +#ifndef CONFIG_SMP + subi r4,r5,THREAD + sub r4,r4,r6 + stw r4,last_task_used_altivec@l(r3) +#endif /* CONFIG_SMP */ + /* restore registers and return */ + /* we haven't used ctr or xer or lr */ + b fast_exception_return + +/* + * AltiVec unavailable trap from kernel - print a message, but let + * the task use AltiVec in the kernel until it returns to user mode. + */ +KernelAltiVec: + lwz r3,_MSR(r1) + oris r3,r3,MSR_VEC@h + stw r3,_MSR(r1) /* enable use of AltiVec after return */ + lis r3,87f@h + ori r3,r3,87f@l + mr r4,r2 /* current */ + lwz r5,_NIP(r1) + bl printk + b ret_from_except +87: .string "AltiVec used in kernel (task=%p, pc=%x) \n" + .align 4,0 + +/* + * giveup_altivec(tsk) + * Disable AltiVec for the task given as the argument, + * and save the AltiVec registers in its thread_struct. + * Enables AltiVec for use in the kernel on return. + */ + + .globl giveup_altivec +giveup_altivec: + mfmsr r5 + oris r5,r5,MSR_VEC@h + SYNC + MTMSRD(r5) /* enable use of AltiVec now */ + isync + cmpwi 0,r3,0 + beqlr- /* if no previous owner, done */ + addi r3,r3,THREAD /* want THREAD of task */ + lwz r5,PT_REGS(r3) + cmpwi 0,r5,0 + SAVE_32VRS(0, r4, r3) + mfvscr vr0 + li r4,THREAD_VSCR + stvx vr0,r4,r3 + beq 1f + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r3,MSR_VEC@h + andc r4,r4,r3 /* disable AltiVec for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#ifndef CONFIG_SMP + li r5,0 + lis r4,last_task_used_altivec@ha + stw r5,last_task_used_altivec@l(r4) +#endif /* CONFIG_SMP */ + blr +#endif /* CONFIG_ALTIVEC */ + +/* + * This code is jumped to from the startup code to copy + * the kernel image to physical address 0. + */ +relocate_kernel: + addis r9,r26,klimit@ha /* fetch klimit */ + lwz r25,klimit@l(r9) + addis r25,r25,-KERNELBASE@h + li r3,0 /* Destination base address */ + li r6,0 /* Destination offset */ + li r5,0x4000 /* # bytes of memory to copy */ + bl copy_and_flush /* copy the first 0x4000 bytes */ + addi r0,r3,4f@l /* jump to the address of 4f */ + mtctr r0 /* in copy and do the rest. */ + bctr /* jump to the copy */ +4: mr r5,r25 + bl copy_and_flush /* copy the rest */ + b turn_on_mmu + +/* + * Copy routine used to copy the kernel to start at physical address 0 + * and flush and invalidate the caches as needed. + * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset + * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. + */ +copy_and_flush: + addi r5,r5,-4 + addi r6,r6,-4 +4: li r0,L1_CACHE_LINE_SIZE/4 + mtctr r0 +3: addi r6,r6,4 /* copy a cache line */ + lwzx r0,r6,r4 + stwx r0,r6,r3 + bdnz 3b + dcbst r6,r3 /* write it to memory */ + sync + icbi r6,r3 /* flush the icache line */ + cmplw 0,r6,r5 + blt 4b + sync /* additional sync needed on g4 */ + isync + addi r5,r5,4 + addi r6,r6,4 + blr + +#ifdef CONFIG_APUS +/* + * On APUS the physical base address of the kernel is not known at compile + * time, which means the __pa/__va constants used are incorrect. In the + * __init section is recorded the virtual addresses of instructions using + * these constants, so all that has to be done is fix these before + * continuing the kernel boot. + * + * r4 = The physical address of the kernel base. + */ +fix_mem_constants: + mr r10,r4 + addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */ + neg r11,r10 /* phys_to_virt constant */ + + lis r12,__vtop_table_begin@h + ori r12,r12,__vtop_table_begin@l + add r12,r12,r10 /* table begin phys address */ + lis r13,__vtop_table_end@h + ori r13,r13,__vtop_table_end@l + add r13,r13,r10 /* table end phys address */ + subi r12,r12,4 + subi r13,r13,4 +1: lwzu r14,4(r12) /* virt address of instruction */ + add r14,r14,r10 /* phys address of instruction */ + lwz r15,0(r14) /* instruction, now insert top */ + rlwimi r15,r10,16,16,31 /* half of vp const in low half */ + stw r15,0(r14) /* of instruction and restore. */ + dcbst r0,r14 /* write it to memory */ + sync + icbi r0,r14 /* flush the icache line */ + cmpw r12,r13 + bne 1b + sync /* additional sync needed on g4 */ + isync + +/* + * Map the memory where the exception handlers will + * be copied to when hash constants have been patched. + */ +#ifdef CONFIG_APUS_FAST_EXCEPT + lis r8,0xfff0 +#else + lis r8,0 +#endif + ori r8,r8,0x2 /* 128KB, supervisor */ + mtspr SPRN_DBAT3U,r8 + mtspr SPRN_DBAT3L,r8 + + lis r12,__ptov_table_begin@h + ori r12,r12,__ptov_table_begin@l + add r12,r12,r10 /* table begin phys address */ + lis r13,__ptov_table_end@h + ori r13,r13,__ptov_table_end@l + add r13,r13,r10 /* table end phys address */ + subi r12,r12,4 + subi r13,r13,4 +1: lwzu r14,4(r12) /* virt address of instruction */ + add r14,r14,r10 /* phys address of instruction */ + lwz r15,0(r14) /* instruction, now insert top */ + rlwimi r15,r11,16,16,31 /* half of pv const in low half*/ + stw r15,0(r14) /* of instruction and restore. */ + dcbst r0,r14 /* write it to memory */ + sync + icbi r0,r14 /* flush the icache line */ + cmpw r12,r13 + bne 1b + + sync /* additional sync needed on g4 */ + isync /* No speculative loading until now */ + blr + +/*********************************************************************** + * Please note that on APUS the exception handlers are located at the + * physical address 0xfff0000. For this reason, the exception handlers + * cannot use relative branches to access the code below. + ***********************************************************************/ +#endif /* CONFIG_APUS */ + +#ifdef CONFIG_SMP +#ifdef CONFIG_GEMINI + .globl __secondary_start_gemini +__secondary_start_gemini: + mfspr r4,SPRN_HID0 + ori r4,r4,HID0_ICFI + li r3,0 + ori r3,r3,HID0_ICE + andc r4,r4,r3 + mtspr SPRN_HID0,r4 + sync + b __secondary_start +#endif /* CONFIG_GEMINI */ + + .globl __secondary_start_pmac_0 +__secondary_start_pmac_0: + /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ + li r24,0 + b 1f + li r24,1 + b 1f + li r24,2 + b 1f + li r24,3 +1: + /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0 + set to map the 0xf0000000 - 0xffffffff region */ + mfmsr r0 + rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ + SYNC + mtmsr r0 + isync + + .globl __secondary_start +__secondary_start: +#ifdef CONFIG_PPC64BRIDGE + mfmsr r0 + clrldi r0,r0,1 /* make sure it's in 32-bit mode */ + SYNC + MTMSRD(r0) + isync +#endif + /* Copy some CPU settings from CPU 0 */ + bl __restore_cpu_setup + + lis r3,-KERNELBASE@h + mr r4,r24 + bl identify_cpu + bl call_setup_cpu /* Call setup_cpu for this CPU */ +#ifdef CONFIG_6xx + lis r3,-KERNELBASE@h + bl init_idle_6xx +#endif /* CONFIG_6xx */ +#ifdef CONFIG_POWER4 + lis r3,-KERNELBASE@h + bl init_idle_power4 +#endif /* CONFIG_POWER4 */ + + /* get current_thread_info and current */ + lis r1,secondary_ti@ha + tophys(r1,r1) + lwz r1,secondary_ti@l(r1) + tophys(r2,r1) + lwz r2,TI_TASK(r2) + + /* stack */ + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + li r0,0 + tophys(r3,r1) + stw r0,0(r3) + + /* load up the MMU */ + bl load_up_mmu + + /* ptr to phys current thread */ + tophys(r4,r2) + addi r4,r4,THREAD /* phys address of our thread_struct */ + CLR_TOP32(r4) + mtspr SPRN_SPRG3,r4 + li r3,0 + mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ + + /* enable MMU and jump to start_secondary */ + li r4,MSR_KERNEL + FIX_SRR1(r4,r5) + lis r3,start_secondary@h + ori r3,r3,start_secondary@l + mtspr SPRN_SRR0,r3 + mtspr SPRN_SRR1,r4 + SYNC + RFI +#endif /* CONFIG_SMP */ + +/* + * Those generic dummy functions are kept for CPUs not + * included in CONFIG_6xx + */ +_GLOBAL(__setup_cpu_power3) + blr +_GLOBAL(__setup_cpu_generic) + blr + +#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) +_GLOBAL(__save_cpu_setup) + blr +_GLOBAL(__restore_cpu_setup) + blr +#endif /* !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) */ + + +/* + * Load stuff into the MMU. Intended to be called with + * IR=0 and DR=0. + */ +load_up_mmu: + sync /* Force all PTE updates to finish */ + isync + tlbia /* Clear all TLB entries */ + sync /* wait for tlbia/tlbie to finish */ + TLBSYNC /* ... on all CPUs */ + /* Load the SDR1 register (hash table base & size) */ + lis r6,_SDR1@ha + tophys(r6,r6) + lwz r6,_SDR1@l(r6) + mtspr SPRN_SDR1,r6 +#ifdef CONFIG_PPC64BRIDGE + /* clear the ASR so we only use the pseudo-segment registers. */ + li r6,0 + mtasr r6 +#endif /* CONFIG_PPC64BRIDGE */ + li r0,16 /* load up segment register values */ + mtctr r0 /* for context 0 */ + lis r3,0x2000 /* Ku = 1, VSID = 0 */ + li r4,0 +3: mtsrin r3,r4 + addi r3,r3,0x111 /* increment VSID */ + addis r4,r4,0x1000 /* address of next segment */ + bdnz 3b +#ifndef CONFIG_POWER4 +/* Load the BAT registers with the values set up by MMU_init. + MMU_init takes care of whether we're on a 601 or not. */ + mfpvr r3 + srwi r3,r3,16 + cmpwi r3,1 + lis r3,BATS@ha + addi r3,r3,BATS@l + tophys(r3,r3) + LOAD_BAT(0,r3,r4,r5) + LOAD_BAT(1,r3,r4,r5) + LOAD_BAT(2,r3,r4,r5) + LOAD_BAT(3,r3,r4,r5) +#endif /* CONFIG_POWER4 */ + blr + +/* + * This is where the main kernel code starts. + */ +start_here: + /* ptr to current */ + lis r2,init_task@h + ori r2,r2,init_task@l + /* Set up for using our exception vectors */ + /* ptr to phys current thread */ + tophys(r4,r2) + addi r4,r4,THREAD /* init task's THREAD */ + CLR_TOP32(r4) + mtspr SPRN_SPRG3,r4 + li r3,0 + mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ + + /* stack */ + lis r1,init_thread_union@ha + addi r1,r1,init_thread_union@l + li r0,0 + stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) +/* + * Do early bootinfo parsing, platform-specific initialization, + * and set up the MMU. + */ + mr r3,r31 + mr r4,r30 + mr r5,r29 + mr r6,r28 + mr r7,r27 + bl machine_init + bl MMU_init + +#ifdef CONFIG_APUS + /* Copy exception code to exception vector base on APUS. */ + lis r4,KERNELBASE@h +#ifdef CONFIG_APUS_FAST_EXCEPT + lis r3,0xfff0 /* Copy to 0xfff00000 */ +#else + lis r3,0 /* Copy to 0x00000000 */ +#endif + li r5,0x4000 /* # bytes of memory to copy */ + li r6,0 + bl copy_and_flush /* copy the first 0x4000 bytes */ +#endif /* CONFIG_APUS */ + +/* + * Go back to running unmapped so we can load up new values + * for SDR1 (hash table pointer) and the segment registers + * and change to using our exception vectors. + */ + lis r4,2f@h + ori r4,r4,2f@l + tophys(r4,r4) + li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) + FIX_SRR1(r3,r5) + mtspr SPRN_SRR0,r4 + mtspr SPRN_SRR1,r3 + SYNC + RFI +/* Load up the kernel context */ +2: bl load_up_mmu + +#ifdef CONFIG_BDI_SWITCH + /* Add helper information for the Abatron bdiGDB debugger. + * We do this here because we know the mmu is disabled, and + * will be enabled for real in just a few instructions. + */ + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l + stw r5, 0xf0(r0) /* This much match your Abatron config */ + lis r6, swapper_pg_dir@h + ori r6, r6, swapper_pg_dir@l + tophys(r5, r5) + stw r6, 0(r5) +#endif /* CONFIG_BDI_SWITCH */ + +/* Now turn on the MMU for real! */ + li r4,MSR_KERNEL + FIX_SRR1(r4,r5) + lis r3,start_kernel@h + ori r3,r3,start_kernel@l + mtspr SPRN_SRR0,r3 + mtspr SPRN_SRR1,r4 + SYNC + RFI + +/* + * Set up the segment registers for a new context. + */ +_GLOBAL(set_context) + mulli r3,r3,897 /* multiply context by skew factor */ + rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ + addis r3,r3,0x6000 /* Set Ks, Ku bits */ + li r0,NUM_USER_SEGMENTS + mtctr r0 + +#ifdef CONFIG_BDI_SWITCH + /* Context switch the PTE pointer for the Abatron BDI2000. + * The PGDIR is passed as second argument. + */ + lis r5, KERNELBASE@h + lwz r5, 0xf0(r5) + stw r4, 0x4(r5) +#endif + li r4,0 + isync +3: +#ifdef CONFIG_PPC64BRIDGE + slbie r4 +#endif /* CONFIG_PPC64BRIDGE */ + mtsrin r3,r4 + addi r3,r3,0x111 /* next VSID */ + rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */ + addis r4,r4,0x1000 /* address of next segment */ + bdnz 3b + sync + isync + blr + +/* + * An undocumented "feature" of 604e requires that the v bit + * be cleared before changing BAT values. + * + * Also, newer IBM firmware does not clear bat3 and 4 so + * this makes sure it's done. + * -- Cort + */ +clear_bats: + li r10,0 + mfspr r9,SPRN_PVR + rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ + cmpwi r9, 1 + beq 1f + + mtspr SPRN_DBAT0U,r10 + mtspr SPRN_DBAT0L,r10 + mtspr SPRN_DBAT1U,r10 + mtspr SPRN_DBAT1L,r10 + mtspr SPRN_DBAT2U,r10 + mtspr SPRN_DBAT2L,r10 + mtspr SPRN_DBAT3U,r10 + mtspr SPRN_DBAT3L,r10 +1: + mtspr SPRN_IBAT0U,r10 + mtspr SPRN_IBAT0L,r10 + mtspr SPRN_IBAT1U,r10 + mtspr SPRN_IBAT1L,r10 + mtspr SPRN_IBAT2U,r10 + mtspr SPRN_IBAT2L,r10 + mtspr SPRN_IBAT3U,r10 + mtspr SPRN_IBAT3L,r10 +BEGIN_FTR_SECTION + /* Here's a tweak: at this point, CPU setup have + * not been called yet, so HIGH_BAT_EN may not be + * set in HID0 for the 745x processors. However, it + * seems that doesn't affect our ability to actually + * write to these SPRs. + */ + mtspr SPRN_DBAT4U,r10 + mtspr SPRN_DBAT4L,r10 + mtspr SPRN_DBAT5U,r10 + mtspr SPRN_DBAT5L,r10 + mtspr SPRN_DBAT6U,r10 + mtspr SPRN_DBAT6L,r10 + mtspr SPRN_DBAT7U,r10 + mtspr SPRN_DBAT7L,r10 + mtspr SPRN_IBAT4U,r10 + mtspr SPRN_IBAT4L,r10 + mtspr SPRN_IBAT5U,r10 + mtspr SPRN_IBAT5L,r10 + mtspr SPRN_IBAT6U,r10 + mtspr SPRN_IBAT6L,r10 + mtspr SPRN_IBAT7U,r10 + mtspr SPRN_IBAT7L,r10 +END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) + blr + +flush_tlbs: + lis r10, 0x40 +1: addic. r10, r10, -0x1000 + tlbie r10 + blt 1b + sync + blr + +mmu_off: + addi r4, r3, __after_mmu_off - _start + mfmsr r3 + andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ + beqlr + andc r3,r3,r0 + mtspr SPRN_SRR0,r4 + mtspr SPRN_SRR1,r3 + sync + RFI + +#ifndef CONFIG_POWER4 +/* + * Use the first pair of BAT registers to map the 1st 16MB + * of RAM to KERNELBASE. From this point on we can't safely + * call OF any more. + */ +initial_bats: + lis r11,KERNELBASE@h +#ifndef CONFIG_PPC64BRIDGE + mfspr r9,SPRN_PVR + rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ + cmpwi 0,r9,1 + bne 4f + ori r11,r11,4 /* set up BAT registers for 601 */ + li r8,0x7f /* valid, block length = 8MB */ + oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */ + oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */ + mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ + mtspr SPRN_IBAT0L,r8 /* lower BAT register */ + mtspr SPRN_IBAT1U,r9 + mtspr SPRN_IBAT1L,r10 + isync + blr +#endif /* CONFIG_PPC64BRIDGE */ + +4: tophys(r8,r11) +#ifdef CONFIG_SMP + ori r8,r8,0x12 /* R/W access, M=1 */ +#else + ori r8,r8,2 /* R/W access */ +#endif /* CONFIG_SMP */ +#ifdef CONFIG_APUS + ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */ +#else + ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ +#endif /* CONFIG_APUS */ + +#ifdef CONFIG_PPC64BRIDGE + /* clear out the high 32 bits in the BAT */ + clrldi r11,r11,32 + clrldi r8,r8,32 +#endif /* CONFIG_PPC64BRIDGE */ + mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */ + mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */ + mtspr SPRN_IBAT0L,r8 + mtspr SPRN_IBAT0U,r11 + isync + blr + +#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) +setup_disp_bat: + /* + * setup the display bat prepared for us in prom.c + */ + mflr r8 + bl reloc_offset + mtlr r8 + addis r8,r3,disp_BAT@ha + addi r8,r8,disp_BAT@l + lwz r11,0(r8) + lwz r8,4(r8) + mfspr r9,SPRN_PVR + rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ + cmpwi 0,r9,1 + beq 1f + mtspr SPRN_DBAT3L,r8 + mtspr SPRN_DBAT3U,r11 + blr +1: mtspr SPRN_IBAT3L,r8 + mtspr SPRN_IBAT3U,r11 + blr + +#endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */ + +#else /* CONFIG_POWER4 */ +/* + * Load up the SDR1 and segment register values now + * since we don't have the BATs. + * Also make sure we are running in 32-bit mode. + */ + +initial_mm_power4: + addis r14,r3,_SDR1@ha /* get the value from _SDR1 */ + lwz r14,_SDR1@l(r14) /* assume hash table below 4GB */ + mtspr SPRN_SDR1,r14 + slbia + lis r4,0x2000 /* set pseudo-segment reg 12 */ + ori r5,r4,0x0ccc + mtsr 12,r5 +#if 0 + ori r5,r4,0x0888 /* set pseudo-segment reg 8 */ + mtsr 8,r5 /* (for access to serial port) */ +#endif +#ifdef CONFIG_BOOTX_TEXT + ori r5,r4,0x0999 /* set pseudo-segment reg 9 */ + mtsr 9,r5 /* (for access to screen) */ +#endif + mfmsr r0 + clrldi r0,r0,1 + sync + mtmsr r0 + isync + blr + +#endif /* CONFIG_POWER4 */ + +#ifdef CONFIG_8260 +/* Jump into the system reset for the rom. + * We first disable the MMU, and then jump to the ROM reset address. + * + * r3 is the board info structure, r4 is the location for starting. + * I use this for building a small kernel that can load other kernels, + * rather than trying to write or rely on a rom monitor that can tftp load. + */ + .globl m8260_gorom +m8260_gorom: + mfmsr r0 + rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ + sync + mtmsr r0 + sync + mfspr r11, SPRN_HID0 + lis r10, 0 + ori r10,r10,HID0_ICE|HID0_DCE + andc r11, r11, r10 + mtspr SPRN_HID0, r11 + isync + li r5, MSR_ME|MSR_RI + lis r6,2f@h + addis r6,r6,-KERNELBASE@h + ori r6,r6,2f@l + mtspr SPRN_SRR0,r6 + mtspr SPRN_SRR1,r5 + isync + sync + rfi +2: + mtlr r4 + blr +#endif + + +/* + * We put a few things here that have to be page-aligned. + * This stuff goes at the beginning of the data segment, + * which is page-aligned. + */ + .data + .globl sdata +sdata: + .globl empty_zero_page +empty_zero_page: + .space 4096 + + .globl swapper_pg_dir +swapper_pg_dir: + .space 4096 + +/* + * This space gets a copy of optional info passed to us by the bootstrap + * Used to pass parameters into the kernel like root=/dev/sda1, etc. + */ + .globl cmd_line +cmd_line: + .space 512 + + .globl intercept_table +intercept_table: + .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700 + .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0 + .long 0, 0, 0, i0x1300, 0, 0, 0, 0 + .long 0, 0, 0, 0, 0, 0, 0, 0 + .long 0, 0, 0, 0, 0, 0, 0, 0 + .long 0, 0, 0, 0, 0, 0, 0, 0 + +/* Room for two PTE pointers, usually the kernel and current user pointers + * to their respective root page table. + */ +abatron_pteptrs: + .space 8 diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S new file mode 100644 index 000000000000..599245b0407e --- /dev/null +++ b/arch/powerpc/kernel/head_44x.S @@ -0,0 +1,778 @@ +/* + * arch/ppc/kernel/head_44x.S + * + * Kernel execution entry point code. + * + * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> + * Initial PowerPC version. + * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> + * Rewritten for PReP + * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> + * Low-level exception handers, MMU support, and rewrite. + * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> + * PowerPC 8xx modifications. + * Copyright (c) 1998-1999 TiVo, Inc. + * PowerPC 403GCX modifications. + * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> + * PowerPC 403GCX/405GP modifications. + * Copyright 2000 MontaVista Software Inc. + * PPC405 modifications + * PowerPC 403GCX/405GP modifications. + * Author: MontaVista Software, Inc. + * frank_rowand@mvista.com or source@mvista.com + * debbie_chu@mvista.com + * Copyright 2002-2005 MontaVista Software, Inc. + * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/config.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/pgtable.h> +#include <asm/ibm4xx.h> +#include <asm/ibm44x.h> +#include <asm/cputable.h> +#include <asm/thread_info.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> +#include "head_booke.h" + + +/* As with the other PowerPC ports, it is expected that when code + * execution begins here, the following registers contain valid, yet + * optional, information: + * + * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) + * r4 - Starting address of the init RAM disk + * r5 - Ending address of the init RAM disk + * r6 - Start of kernel command line string (e.g. "mem=128") + * r7 - End of kernel command line string + * + */ + .text +_GLOBAL(_stext) +_GLOBAL(_start) + /* + * Reserve a word at a fixed location to store the address + * of abatron_pteptrs + */ + nop +/* + * Save parameters we are passed + */ + mr r31,r3 + mr r30,r4 + mr r29,r5 + mr r28,r6 + mr r27,r7 + li r24,0 /* CPU number */ + +/* + * Set up the initial MMU state + * + * We are still executing code at the virtual address + * mappings set by the firmware for the base of RAM. + * + * We first invalidate all TLB entries but the one + * we are running from. We then load the KERNELBASE + * mappings so we can begin to use kernel addresses + * natively and so the interrupt vector locations are + * permanently pinned (necessary since Book E + * implementations always have translation enabled). + * + * TODO: Use the known TLB entry we are running from to + * determine which physical region we are located + * in. This can be used to determine where in RAM + * (on a shared CPU system) or PCI memory space + * (on a DRAMless system) we are located. + * For now, we assume a perfect world which means + * we are located at the base of DRAM (physical 0). + */ + +/* + * Search TLB for entry that we are currently using. + * Invalidate all entries but the one we are using. + */ + /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ + mfspr r3,SPRN_PID /* Get PID */ + mfmsr r4 /* Get MSR */ + andi. r4,r4,MSR_IS@l /* TS=1? */ + beq wmmucr /* If not, leave STS=0 */ + oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ +wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ + sync + + bl invstr /* Find our address */ +invstr: mflr r5 /* Make it accessible */ + tlbsx r23,0,r5 /* Find entry we are in */ + li r4,0 /* Start at TLB entry 0 */ + li r3,0 /* Set PAGEID inval value */ +1: cmpw r23,r4 /* Is this our entry? */ + beq skpinv /* If so, skip the inval */ + tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ +skpinv: addi r4,r4,1 /* Increment */ + cmpwi r4,64 /* Are we done? */ + bne 1b /* If not, repeat */ + isync /* If so, context change */ + +/* + * Configure and load pinned entry into TLB slot 63. + */ + + lis r3,KERNELBASE@h /* Load the kernel virtual address */ + ori r3,r3,KERNELBASE@l + + /* Kernel is at the base of RAM */ + li r4, 0 /* Load the kernel physical address */ + + /* Load the kernel PID = 0 */ + li r0,0 + mtspr SPRN_PID,r0 + sync + + /* Initialize MMUCR */ + li r5,0 + mtspr SPRN_MMUCR,r5 + sync + + /* pageid fields */ + clrrwi r3,r3,10 /* Mask off the effective page number */ + ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M + + /* xlat fields */ + clrrwi r4,r4,10 /* Mask off the real page number */ + /* ERPN is 0 for first 4GB page */ + + /* attrib fields */ + /* Added guarded bit to protect against speculative loads/stores */ + li r5,0 + ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) + + li r0,63 /* TLB slot 63 */ + + tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ + tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ + tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ + + /* Force context change */ + mfmsr r0 + mtspr SPRN_SRR1, r0 + lis r0,3f@h + ori r0,r0,3f@l + mtspr SPRN_SRR0,r0 + sync + rfi + + /* If necessary, invalidate original entry we used */ +3: cmpwi r23,63 + beq 4f + li r6,0 + tlbwe r6,r23,PPC44x_TLB_PAGEID + isync + +4: +#ifdef CONFIG_SERIAL_TEXT_DEBUG + /* + * Add temporary UART mapping for early debug. + * We can map UART registers wherever we want as long as they don't + * interfere with other system mappings (e.g. with pinned entries). + * For an example of how we handle this - see ocotea.h. --ebs + */ + /* pageid fields */ + lis r3,UART0_IO_BASE@h + ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K + + /* xlat fields */ + lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */ +#ifndef CONFIG_440EP + ori r4,r4,0x0001 /* ERPN is 1 for second 4GB page */ +#endif + + /* attrib fields */ + li r5,0 + ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G) + + li r0,0 /* TLB slot 0 */ + + tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ + tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ + tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ + + /* Force context change */ + isync +#endif /* CONFIG_SERIAL_TEXT_DEBUG */ + + /* Establish the interrupt vector offsets */ + SET_IVOR(0, CriticalInput); + SET_IVOR(1, MachineCheck); + SET_IVOR(2, DataStorage); + SET_IVOR(3, InstructionStorage); + SET_IVOR(4, ExternalInput); + SET_IVOR(5, Alignment); + SET_IVOR(6, Program); + SET_IVOR(7, FloatingPointUnavailable); + SET_IVOR(8, SystemCall); + SET_IVOR(9, AuxillaryProcessorUnavailable); + SET_IVOR(10, Decrementer); + SET_IVOR(11, FixedIntervalTimer); + SET_IVOR(12, WatchdogTimer); + SET_IVOR(13, DataTLBError); + SET_IVOR(14, InstructionTLBError); + SET_IVOR(15, Debug); + + /* Establish the interrupt vector base */ + lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ + mtspr SPRN_IVPR,r4 + +#ifdef CONFIG_440EP + /* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */ + mfspr r2,SPRN_CCR0 + lis r3,0xffef + ori r3,r3,0xffff + and r2,r2,r3 + mtspr SPRN_CCR0,r2 + isync +#endif + + /* + * This is where the main kernel code starts. + */ + + /* ptr to current */ + lis r2,init_task@h + ori r2,r2,init_task@l + + /* ptr to current thread */ + addi r4,r2,THREAD /* init task's THREAD */ + mtspr SPRN_SPRG3,r4 + + /* stack */ + lis r1,init_thread_union@h + ori r1,r1,init_thread_union@l + li r0,0 + stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) + + bl early_init + +/* + * Decide what sort of machine this is and initialize the MMU. + */ + mr r3,r31 + mr r4,r30 + mr r5,r29 + mr r6,r28 + mr r7,r27 + bl machine_init + bl MMU_init + + /* Setup PTE pointers for the Abatron bdiGDB */ + lis r6, swapper_pg_dir@h + ori r6, r6, swapper_pg_dir@l + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l + lis r4, KERNELBASE@h + ori r4, r4, KERNELBASE@l + stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ + stw r6, 0(r5) + + /* Let's move on */ + lis r4,start_kernel@h + ori r4,r4,start_kernel@l + lis r3,MSR_KERNEL@h + ori r3,r3,MSR_KERNEL@l + mtspr SPRN_SRR0,r4 + mtspr SPRN_SRR1,r3 + rfi /* change context and jump to start_kernel */ + +/* + * Interrupt vector entry code + * + * The Book E MMUs are always on so we don't need to handle + * interrupts in real mode as with previous PPC processors. In + * this case we handle interrupts in the kernel virtual address + * space. + * + * Interrupt vectors are dynamically placed relative to the + * interrupt prefix as determined by the address of interrupt_base. + * The interrupt vectors offsets are programmed using the labels + * for each interrupt vector entry. + * + * Interrupt vectors must be aligned on a 16 byte boundary. + * We align on a 32 byte cache line boundary for good measure. + */ + +interrupt_base: + /* Critical Input Interrupt */ + CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) + + /* Machine Check Interrupt */ +#ifdef CONFIG_440A + MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) +#else + CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) +#endif + + /* Data Storage Interrupt */ + START_EXCEPTION(DataStorage) + mtspr SPRN_SPRG0, r10 /* Save some working registers */ + mtspr SPRN_SPRG1, r11 + mtspr SPRN_SPRG4W, r12 + mtspr SPRN_SPRG5W, r13 + mfcr r11 + mtspr SPRN_SPRG7W, r11 + + /* + * Check if it was a store fault, if not then bail + * because a user tried to access a kernel or + * read-protected page. Otherwise, get the + * offending address and handle it. + */ + mfspr r10, SPRN_ESR + andis. r10, r10, ESR_ST@h + beq 2f + + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + cmplw r10, r11 + blt+ 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + + mfspr r12,SPRN_MMUCR + rlwinm r12,r12,0,0,23 /* Clear TID */ + + b 4f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRN_SPRG3 + lwz r11,PGDIR(r11) + + /* Load PID into MMUCR TID */ + mfspr r12,SPRN_MMUCR /* Get MMUCR */ + mfspr r13,SPRN_PID /* Get PID */ + rlwimi r12,r13,0,24,31 /* Set TID */ + +4: + mtspr SPRN_MMUCR,r12 + + rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ + lwzx r11, r12, r11 /* Get pgd/pmd entry */ + rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ + lwz r11, 4(r12) /* Get pte entry */ + + andi. r13, r11, _PAGE_RW /* Is it writeable? */ + beq 2f /* Bail if not */ + + /* Update 'changed'. + */ + ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE + stw r11, 4(r12) /* Update Linux page table */ + + li r13, PPC44x_TLB_SR@l /* Set SR */ + rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */ + rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */ + rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */ + rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ + rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */ + and r12, r12, r11 /* HWEXEC/RW & USER */ + rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */ + rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */ + + rlwimi r11,r13,0,26,31 /* Insert static perms */ + + rlwinm r11,r11,0,20,15 /* Clear U0-U3 */ + + /* find the TLB index that caused the fault. It has to be here. */ + tlbsx r10, 0, r10 + + tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */ + + /* Done...restore registers and get out of here. + */ + mfspr r11, SPRN_SPRG7R + mtcr r11 + mfspr r13, SPRN_SPRG5R + mfspr r12, SPRN_SPRG4R + + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + rfi /* Force context change */ + +2: + /* + * The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11, SPRN_SPRG7R + mtcr r11 + mfspr r13, SPRN_SPRG5R + mfspr r12, SPRN_SPRG4R + + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + b data_access + + /* Instruction Storage Interrupt */ + INSTRUCTION_STORAGE_EXCEPTION + + /* External Input Interrupt */ + EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) + + /* Alignment Interrupt */ + ALIGNMENT_EXCEPTION + + /* Program Interrupt */ + PROGRAM_EXCEPTION + + /* Floating Point Unavailable Interrupt */ +#ifdef CONFIG_PPC_FPU + FP_UNAVAILABLE_EXCEPTION +#else + EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) +#endif + + /* System Call Interrupt */ + START_EXCEPTION(SystemCall) + NORMAL_EXCEPTION_PROLOG + EXC_XFER_EE_LITE(0x0c00, DoSyscall) + + /* Auxillary Processor Unavailable Interrupt */ + EXCEPTION(0x2020, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) + + /* Decrementer Interrupt */ + DECREMENTER_EXCEPTION + + /* Fixed Internal Timer Interrupt */ + /* TODO: Add FIT support */ + EXCEPTION(0x1010, FixedIntervalTimer, UnknownException, EXC_XFER_EE) + + /* Watchdog Timer Interrupt */ + /* TODO: Add watchdog support */ +#ifdef CONFIG_BOOKE_WDT + CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException) +#else + CRITICAL_EXCEPTION(0x1020, WatchdogTimer, UnknownException) +#endif + + /* Data TLB Error Interrupt */ + START_EXCEPTION(DataTLBError) + mtspr SPRN_SPRG0, r10 /* Save some working registers */ + mtspr SPRN_SPRG1, r11 + mtspr SPRN_SPRG4W, r12 + mtspr SPRN_SPRG5W, r13 + mfcr r11 + mtspr SPRN_SPRG7W, r11 + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + cmplw r10, r11 + blt+ 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + + mfspr r12,SPRN_MMUCR + rlwinm r12,r12,0,0,23 /* Clear TID */ + + b 4f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRN_SPRG3 + lwz r11,PGDIR(r11) + + /* Load PID into MMUCR TID */ + mfspr r12,SPRN_MMUCR + mfspr r13,SPRN_PID /* Get PID */ + rlwimi r12,r13,0,24,31 /* Set TID */ + +4: + mtspr SPRN_MMUCR,r12 + + rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ + lwzx r11, r12, r11 /* Get pgd/pmd entry */ + rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ + lwz r11, 4(r12) /* Get pte entry */ + andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ + beq 2f /* Bail if not present */ + + ori r11, r11, _PAGE_ACCESSED + stw r11, 4(r12) + + /* Jump to common tlb load */ + b finish_tlb_load + +2: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11, SPRN_SPRG7R + mtcr r11 + mfspr r13, SPRN_SPRG5R + mfspr r12, SPRN_SPRG4R + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + b data_access + + /* Instruction TLB Error Interrupt */ + /* + * Nearly the same as above, except we get our + * information from different registers and bailout + * to a different point. + */ + START_EXCEPTION(InstructionTLBError) + mtspr SPRN_SPRG0, r10 /* Save some working registers */ + mtspr SPRN_SPRG1, r11 + mtspr SPRN_SPRG4W, r12 + mtspr SPRN_SPRG5W, r13 + mfcr r11 + mtspr SPRN_SPRG7W, r11 + mfspr r10, SPRN_SRR0 /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + cmplw r10, r11 + blt+ 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + + mfspr r12,SPRN_MMUCR + rlwinm r12,r12,0,0,23 /* Clear TID */ + + b 4f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRN_SPRG3 + lwz r11,PGDIR(r11) + + /* Load PID into MMUCR TID */ + mfspr r12,SPRN_MMUCR + mfspr r13,SPRN_PID /* Get PID */ + rlwimi r12,r13,0,24,31 /* Set TID */ + +4: + mtspr SPRN_MMUCR,r12 + + rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ + lwzx r11, r12, r11 /* Get pgd/pmd entry */ + rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ + lwz r11, 4(r12) /* Get pte entry */ + andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ + beq 2f /* Bail if not present */ + + ori r11, r11, _PAGE_ACCESSED + stw r11, 4(r12) + + /* Jump to common TLB load point */ + b finish_tlb_load + +2: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11, SPRN_SPRG7R + mtcr r11 + mfspr r13, SPRN_SPRG5R + mfspr r12, SPRN_SPRG4R + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + b InstructionStorage + + /* Debug Interrupt */ + DEBUG_EXCEPTION + +/* + * Local functions + */ + /* + * Data TLB exceptions will bail out to this point + * if they can't resolve the lightweight TLB fault. + */ +data_access: + NORMAL_EXCEPTION_PROLOG + mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ + stw r5,_ESR(r11) + mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ + EXC_XFER_EE_LITE(0x0300, handle_page_fault) + +/* + + * Both the instruction and data TLB miss get to this + * point to load the TLB. + * r10 - EA of fault + * r11 - available to use + * r12 - Pointer to the 64-bit PTE + * r13 - available to use + * MMUCR - loaded with proper value when we get here + * Upon exit, we reload everything and RFI. + */ +finish_tlb_load: + /* + * We set execute, because we don't have the granularity to + * properly set this at the page level (Linux problem). + * If shared is set, we cause a zero PID->TID load. + * Many of these bits are software only. Bits we don't set + * here we (properly should) assume have the appropriate value. + */ + + /* Load the next available TLB index */ + lis r13, tlb_44x_index@ha + lwz r13, tlb_44x_index@l(r13) + /* Load the TLB high watermark */ + lis r11, tlb_44x_hwater@ha + lwz r11, tlb_44x_hwater@l(r11) + + /* Increment, rollover, and store TLB index */ + addi r13, r13, 1 + cmpw 0, r13, r11 /* reserve entries */ + ble 7f + li r13, 0 +7: + /* Store the next available TLB index */ + lis r11, tlb_44x_index@ha + stw r13, tlb_44x_index@l(r11) + + lwz r11, 0(r12) /* Get MS word of PTE */ + lwz r12, 4(r12) /* Get LS word of PTE */ + rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */ + tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */ + + /* + * Create PAGEID. This is the faulting address, + * page size, and valid flag. + */ + li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K + rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */ + tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */ + + li r10, PPC44x_TLB_SR@l /* Set SR */ + rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */ + rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */ + rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */ + rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ + and r11, r12, r11 /* HWEXEC & USER */ + rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */ + + rlwimi r12, r10, 0, 26, 31 /* Insert static perms */ + rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */ + tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */ + + /* Done...restore registers and get out of here. + */ + mfspr r11, SPRN_SPRG7R + mtcr r11 + mfspr r13, SPRN_SPRG5R + mfspr r12, SPRN_SPRG4R + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + rfi /* Force context change */ + +/* + * Global functions + */ + +/* + * extern void giveup_altivec(struct task_struct *prev) + * + * The 44x core does not have an AltiVec unit. + */ +_GLOBAL(giveup_altivec) + blr + +/* + * extern void giveup_fpu(struct task_struct *prev) + * + * The 44x core does not have an FPU. + */ +#ifndef CONFIG_PPC_FPU +_GLOBAL(giveup_fpu) + blr +#endif + +/* + * extern void abort(void) + * + * At present, this routine just applies a system reset. + */ +_GLOBAL(abort) + mfspr r13,SPRN_DBCR0 + oris r13,r13,DBCR0_RST_SYSTEM@h + mtspr SPRN_DBCR0,r13 + +_GLOBAL(set_context) + +#ifdef CONFIG_BDI_SWITCH + /* Context switch the PTE pointer for the Abatron BDI2000. + * The PGDIR is the second parameter. + */ + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l + stw r4, 0x4(r5) +#endif + mtspr SPRN_PID,r3 + isync /* Force context change */ + blr + +/* + * We put a few things here that have to be page-aligned. This stuff + * goes at the beginning of the data segment, which is page-aligned. + */ + .data +_GLOBAL(sdata) +_GLOBAL(empty_zero_page) + .space 4096 + +/* + * To support >32-bit physical addresses, we use an 8KB pgdir. + */ +_GLOBAL(swapper_pg_dir) + .space 8192 + +/* Reserved 4k for the critical exception stack & 4k for the machine + * check stack per CPU for kernel mode exceptions */ + .section .bss + .align 12 +exception_stack_bottom: + .space BOOKE_EXCEPTION_STACK_SIZE +_GLOBAL(exception_stack_top) + +/* + * This space gets a copy of optional info passed to us by the bootstrap + * which is used to pass parameters into the kernel like root=/dev/sda1, etc. + */ +_GLOBAL(cmd_line) + .space 512 + +/* + * Room for two PTE pointers, usually the kernel and current user pointers + * to their respective root page table. + */ +abatron_pteptrs: + .space 8 + + diff --git a/arch/powerpc/kernel/head_4xx.S b/arch/powerpc/kernel/head_4xx.S new file mode 100644 index 000000000000..8562b807b37c --- /dev/null +++ b/arch/powerpc/kernel/head_4xx.S @@ -0,0 +1,1016 @@ +/* + * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> + * Initial PowerPC version. + * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> + * Rewritten for PReP + * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> + * Low-level exception handers, MMU support, and rewrite. + * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> + * PowerPC 8xx modifications. + * Copyright (c) 1998-1999 TiVo, Inc. + * PowerPC 403GCX modifications. + * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> + * PowerPC 403GCX/405GP modifications. + * Copyright 2000 MontaVista Software Inc. + * PPC405 modifications + * PowerPC 403GCX/405GP modifications. + * Author: MontaVista Software, Inc. + * frank_rowand@mvista.com or source@mvista.com + * debbie_chu@mvista.com + * + * + * Module name: head_4xx.S + * + * Description: + * Kernel execution entry point code. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/config.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/pgtable.h> +#include <asm/ibm4xx.h> +#include <asm/cputable.h> +#include <asm/thread_info.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> + +/* As with the other PowerPC ports, it is expected that when code + * execution begins here, the following registers contain valid, yet + * optional, information: + * + * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) + * r4 - Starting address of the init RAM disk + * r5 - Ending address of the init RAM disk + * r6 - Start of kernel command line string (e.g. "mem=96m") + * r7 - End of kernel command line string + * + * This is all going to change RSN when we add bi_recs....... -- Dan + */ + .text +_GLOBAL(_stext) +_GLOBAL(_start) + + /* Save parameters we are passed. + */ + mr r31,r3 + mr r30,r4 + mr r29,r5 + mr r28,r6 + mr r27,r7 + + /* We have to turn on the MMU right away so we get cache modes + * set correctly. + */ + bl initial_mmu + +/* We now have the lower 16 Meg mapped into TLB entries, and the caches + * ready to work. + */ +turn_on_mmu: + lis r0,MSR_KERNEL@h + ori r0,r0,MSR_KERNEL@l + mtspr SPRN_SRR1,r0 + lis r0,start_here@h + ori r0,r0,start_here@l + mtspr SPRN_SRR0,r0 + SYNC + rfi /* enables MMU */ + b . /* prevent prefetch past rfi */ + +/* + * This area is used for temporarily saving registers during the + * critical exception prolog. + */ + . = 0xc0 +crit_save: +_GLOBAL(crit_r10) + .space 4 +_GLOBAL(crit_r11) + .space 4 + +/* + * Exception vector entry code. This code runs with address translation + * turned off (i.e. using physical addresses). We assume SPRG3 has the + * physical address of the current task thread_struct. + * Note that we have to have decremented r1 before we write to any fields + * of the exception frame, since a critical interrupt could occur at any + * time, and it will write to the area immediately below the current r1. + */ +#define NORMAL_EXCEPTION_PROLOG \ + mtspr SPRN_SPRG0,r10; /* save two registers to work with */\ + mtspr SPRN_SPRG1,r11; \ + mtspr SPRN_SPRG2,r1; \ + mfcr r10; /* save CR in r10 for now */\ + mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ + andi. r11,r11,MSR_PR; \ + beq 1f; \ + mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\ + lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ + addi r1,r1,THREAD_SIZE; \ +1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ + tophys(r11,r1); \ + stw r10,_CCR(r11); /* save various registers */\ + stw r12,GPR12(r11); \ + stw r9,GPR9(r11); \ + mfspr r10,SPRN_SPRG0; \ + stw r10,GPR10(r11); \ + mfspr r12,SPRN_SPRG1; \ + stw r12,GPR11(r11); \ + mflr r10; \ + stw r10,_LINK(r11); \ + mfspr r10,SPRN_SPRG2; \ + mfspr r12,SPRN_SRR0; \ + stw r10,GPR1(r11); \ + mfspr r9,SPRN_SRR1; \ + stw r10,0(r11); \ + rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ + stw r0,GPR0(r11); \ + SAVE_4GPRS(3, r11); \ + SAVE_2GPRS(7, r11) + +/* + * Exception prolog for critical exceptions. This is a little different + * from the normal exception prolog above since a critical exception + * can potentially occur at any point during normal exception processing. + * Thus we cannot use the same SPRG registers as the normal prolog above. + * Instead we use a couple of words of memory at low physical addresses. + * This is OK since we don't support SMP on these processors. + */ +#define CRITICAL_EXCEPTION_PROLOG \ + stw r10,crit_r10@l(0); /* save two registers to work with */\ + stw r11,crit_r11@l(0); \ + mfcr r10; /* save CR in r10 for now */\ + mfspr r11,SPRN_SRR3; /* check whether user or kernel */\ + andi. r11,r11,MSR_PR; \ + lis r11,critical_stack_top@h; \ + ori r11,r11,critical_stack_top@l; \ + beq 1f; \ + /* COMING FROM USER MODE */ \ + mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ + lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ + addi r11,r11,THREAD_SIZE; \ +1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ + tophys(r11,r11); \ + stw r10,_CCR(r11); /* save various registers */\ + stw r12,GPR12(r11); \ + stw r9,GPR9(r11); \ + mflr r10; \ + stw r10,_LINK(r11); \ + mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ + stw r12,_DEAR(r11); /* since they may have had stuff */\ + mfspr r9,SPRN_ESR; /* in them at the point where the */\ + stw r9,_ESR(r11); /* exception was taken */\ + mfspr r12,SPRN_SRR2; \ + stw r1,GPR1(r11); \ + mfspr r9,SPRN_SRR3; \ + stw r1,0(r11); \ + tovirt(r1,r11); \ + rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ + stw r0,GPR0(r11); \ + SAVE_4GPRS(3, r11); \ + SAVE_2GPRS(7, r11) + + /* + * State at this point: + * r9 saved in stack frame, now saved SRR3 & ~MSR_WE + * r10 saved in crit_r10 and in stack frame, trashed + * r11 saved in crit_r11 and in stack frame, + * now phys stack/exception frame pointer + * r12 saved in stack frame, now saved SRR2 + * CR saved in stack frame, CR0.EQ = !SRR3.PR + * LR, DEAR, ESR in stack frame + * r1 saved in stack frame, now virt stack/excframe pointer + * r0, r3-r8 saved in stack frame + */ + +/* + * Exception vectors. + */ +#define START_EXCEPTION(n, label) \ + . = n; \ +label: + +#define EXCEPTION(n, label, hdlr, xfer) \ + START_EXCEPTION(n, label); \ + NORMAL_EXCEPTION_PROLOG; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + xfer(n, hdlr) + +#define CRITICAL_EXCEPTION(n, label, hdlr) \ + START_EXCEPTION(n, label); \ + CRITICAL_EXCEPTION_PROLOG; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ + NOCOPY, crit_transfer_to_handler, \ + ret_from_crit_exc) + +#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \ + li r10,trap; \ + stw r10,TRAP(r11); \ + lis r10,msr@h; \ + ori r10,r10,msr@l; \ + copyee(r10, r9); \ + bl tfer; \ + .long hdlr; \ + .long ret + +#define COPY_EE(d, s) rlwimi d,s,0,16,16 +#define NOCOPY(d, s) + +#define EXC_XFER_STD(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \ + ret_from_except) + +#define EXC_XFER_EE(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_EE_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \ + ret_from_except) + + +/* + * 0x0100 - Critical Interrupt Exception + */ + CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, UnknownException) + +/* + * 0x0200 - Machine Check Exception + */ + CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) + +/* + * 0x0300 - Data Storage Exception + * This happens for just a few reasons. U0 set (but we don't do that), + * or zone protection fault (user violation, write to protected page). + * If this is just an update of modified status, we do that quickly + * and exit. Otherwise, we call heavywight functions to do the work. + */ + START_EXCEPTION(0x0300, DataStorage) + mtspr SPRN_SPRG0, r10 /* Save some working registers */ + mtspr SPRN_SPRG1, r11 +#ifdef CONFIG_403GCX + stw r12, 0(r0) + stw r9, 4(r0) + mfcr r11 + mfspr r12, SPRN_PID + stw r11, 8(r0) + stw r12, 12(r0) +#else + mtspr SPRN_SPRG4, r12 + mtspr SPRN_SPRG5, r9 + mfcr r11 + mfspr r12, SPRN_PID + mtspr SPRN_SPRG7, r11 + mtspr SPRN_SPRG6, r12 +#endif + + /* First, check if it was a zone fault (which means a user + * tried to access a kernel or read-protected page - always + * a SEGV). All other faults here must be stores, so no + * need to check ESR_DST as well. */ + mfspr r10, SPRN_ESR + andis. r10, r10, ESR_DIZ@h + bne 2f + + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + cmplw r10, r11 + blt+ 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + li r9, 0 + mtspr SPRN_PID, r9 /* TLB will have 0 TID */ + b 4f + + /* Get the PGD for the current thread. + */ +3: + mfspr r11,SPRN_SPRG3 + lwz r11,PGDIR(r11) +4: + tophys(r11, r11) + rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ + lwz r11, 0(r11) /* Get L1 entry */ + rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r12) /* Get Linux PTE */ + + andi. r9, r11, _PAGE_RW /* Is it writeable? */ + beq 2f /* Bail if not */ + + /* Update 'changed'. + */ + ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE + stw r11, 0(r12) /* Update Linux page table */ + + /* Most of the Linux PTE is ready to load into the TLB LO. + * We set ZSEL, where only the LS-bit determines user access. + * We set execute, because we don't have the granularity to + * properly set this at the page level (Linux problem). + * If shared is set, we cause a zero PID->TID load. + * Many of these bits are software only. Bits we don't set + * here we (properly should) assume have the appropriate value. + */ + li r12, 0x0ce2 + andc r11, r11, r12 /* Make sure 20, 21 are zero */ + + /* find the TLB index that caused the fault. It has to be here. + */ + tlbsx r9, 0, r10 + + tlbwe r11, r9, TLB_DATA /* Load TLB LO */ + + /* Done...restore registers and get out of here. + */ +#ifdef CONFIG_403GCX + lwz r12, 12(r0) + lwz r11, 8(r0) + mtspr SPRN_PID, r12 + mtcr r11 + lwz r9, 4(r0) + lwz r12, 0(r0) +#else + mfspr r12, SPRN_SPRG6 + mfspr r11, SPRN_SPRG7 + mtspr SPRN_PID, r12 + mtcr r11 + mfspr r9, SPRN_SPRG5 + mfspr r12, SPRN_SPRG4 +#endif + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + PPC405_ERR77_SYNC + rfi /* Should sync shadow TLBs */ + b . /* prevent prefetch past rfi */ + +2: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ +#ifdef CONFIG_403GCX + lwz r12, 12(r0) + lwz r11, 8(r0) + mtspr SPRN_PID, r12 + mtcr r11 + lwz r9, 4(r0) + lwz r12, 0(r0) +#else + mfspr r12, SPRN_SPRG6 + mfspr r11, SPRN_SPRG7 + mtspr SPRN_PID, r12 + mtcr r11 + mfspr r9, SPRN_SPRG5 + mfspr r12, SPRN_SPRG4 +#endif + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + b DataAccess + +/* + * 0x0400 - Instruction Storage Exception + * This is caused by a fetch from non-execute or guarded pages. + */ + START_EXCEPTION(0x0400, InstructionAccess) + NORMAL_EXCEPTION_PROLOG + mr r4,r12 /* Pass SRR0 as arg2 */ + li r5,0 /* Pass zero as arg3 */ + EXC_XFER_EE_LITE(0x400, handle_page_fault) + +/* 0x0500 - External Interrupt Exception */ + EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) + +/* 0x0600 - Alignment Exception */ + START_EXCEPTION(0x0600, Alignment) + NORMAL_EXCEPTION_PROLOG + mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */ + stw r4,_DEAR(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE(0x600, AlignmentException) + +/* 0x0700 - Program Exception */ + START_EXCEPTION(0x0700, ProgramCheck) + NORMAL_EXCEPTION_PROLOG + mfspr r4,SPRN_ESR /* Grab the ESR and save it */ + stw r4,_ESR(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_STD(0x700, ProgramCheckException) + + EXCEPTION(0x0800, Trap_08, UnknownException, EXC_XFER_EE) + EXCEPTION(0x0900, Trap_09, UnknownException, EXC_XFER_EE) + EXCEPTION(0x0A00, Trap_0A, UnknownException, EXC_XFER_EE) + EXCEPTION(0x0B00, Trap_0B, UnknownException, EXC_XFER_EE) + +/* 0x0C00 - System Call Exception */ + START_EXCEPTION(0x0C00, SystemCall) + NORMAL_EXCEPTION_PROLOG + EXC_XFER_EE_LITE(0xc00, DoSyscall) + + EXCEPTION(0x0D00, Trap_0D, UnknownException, EXC_XFER_EE) + EXCEPTION(0x0E00, Trap_0E, UnknownException, EXC_XFER_EE) + EXCEPTION(0x0F00, Trap_0F, UnknownException, EXC_XFER_EE) + +/* 0x1000 - Programmable Interval Timer (PIT) Exception */ + START_EXCEPTION(0x1000, Decrementer) + NORMAL_EXCEPTION_PROLOG + lis r0,TSR_PIS@h + mtspr SPRN_TSR,r0 /* Clear the PIT exception */ + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_LITE(0x1000, timer_interrupt) + +#if 0 +/* NOTE: + * FIT and WDT handlers are not implemented yet. + */ + +/* 0x1010 - Fixed Interval Timer (FIT) Exception +*/ + STND_EXCEPTION(0x1010, FITException, UnknownException) + +/* 0x1020 - Watchdog Timer (WDT) Exception +*/ +#ifdef CONFIG_BOOKE_WDT + CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException) +#else + CRITICAL_EXCEPTION(0x1020, WDTException, UnknownException) +#endif +#endif + +/* 0x1100 - Data TLB Miss Exception + * As the name implies, translation is not in the MMU, so search the + * page tables and fix it. The only purpose of this function is to + * load TLB entries from the page table if they exist. + */ + START_EXCEPTION(0x1100, DTLBMiss) + mtspr SPRN_SPRG0, r10 /* Save some working registers */ + mtspr SPRN_SPRG1, r11 +#ifdef CONFIG_403GCX + stw r12, 0(r0) + stw r9, 4(r0) + mfcr r11 + mfspr r12, SPRN_PID + stw r11, 8(r0) + stw r12, 12(r0) +#else + mtspr SPRN_SPRG4, r12 + mtspr SPRN_SPRG5, r9 + mfcr r11 + mfspr r12, SPRN_PID + mtspr SPRN_SPRG7, r11 + mtspr SPRN_SPRG6, r12 +#endif + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + cmplw r10, r11 + blt+ 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + li r9, 0 + mtspr SPRN_PID, r9 /* TLB will have 0 TID */ + b 4f + + /* Get the PGD for the current thread. + */ +3: + mfspr r11,SPRN_SPRG3 + lwz r11,PGDIR(r11) +4: + tophys(r11, r11) + rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ + lwz r12, 0(r11) /* Get L1 entry */ + andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r12) /* Get Linux PTE */ + andi. r9, r11, _PAGE_PRESENT + beq 5f + + ori r11, r11, _PAGE_ACCESSED + stw r11, 0(r12) + + /* Create TLB tag. This is the faulting address plus a static + * set of bits. These are size, valid, E, U0. + */ + li r12, 0x00c0 + rlwimi r10, r12, 0, 20, 31 + + b finish_tlb_load + +2: /* Check for possible large-page pmd entry */ + rlwinm. r9, r12, 2, 22, 24 + beq 5f + + /* Create TLB tag. This is the faulting address, plus a static + * set of bits (valid, E, U0) plus the size from the PMD. + */ + ori r9, r9, 0x40 + rlwimi r10, r9, 0, 20, 31 + mr r11, r12 + + b finish_tlb_load + +5: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ +#ifdef CONFIG_403GCX + lwz r12, 12(r0) + lwz r11, 8(r0) + mtspr SPRN_PID, r12 + mtcr r11 + lwz r9, 4(r0) + lwz r12, 0(r0) +#else + mfspr r12, SPRN_SPRG6 + mfspr r11, SPRN_SPRG7 + mtspr SPRN_PID, r12 + mtcr r11 + mfspr r9, SPRN_SPRG5 + mfspr r12, SPRN_SPRG4 +#endif + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + b DataAccess + +/* 0x1200 - Instruction TLB Miss Exception + * Nearly the same as above, except we get our information from different + * registers and bailout to a different point. + */ + START_EXCEPTION(0x1200, ITLBMiss) + mtspr SPRN_SPRG0, r10 /* Save some working registers */ + mtspr SPRN_SPRG1, r11 +#ifdef CONFIG_403GCX + stw r12, 0(r0) + stw r9, 4(r0) + mfcr r11 + mfspr r12, SPRN_PID + stw r11, 8(r0) + stw r12, 12(r0) +#else + mtspr SPRN_SPRG4, r12 + mtspr SPRN_SPRG5, r9 + mfcr r11 + mfspr r12, SPRN_PID + mtspr SPRN_SPRG7, r11 + mtspr SPRN_SPRG6, r12 +#endif + mfspr r10, SPRN_SRR0 /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + cmplw r10, r11 + blt+ 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + li r9, 0 + mtspr SPRN_PID, r9 /* TLB will have 0 TID */ + b 4f + + /* Get the PGD for the current thread. + */ +3: + mfspr r11,SPRN_SPRG3 + lwz r11,PGDIR(r11) +4: + tophys(r11, r11) + rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ + lwz r12, 0(r11) /* Get L1 entry */ + andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r12) /* Get Linux PTE */ + andi. r9, r11, _PAGE_PRESENT + beq 5f + + ori r11, r11, _PAGE_ACCESSED + stw r11, 0(r12) + + /* Create TLB tag. This is the faulting address plus a static + * set of bits. These are size, valid, E, U0. + */ + li r12, 0x00c0 + rlwimi r10, r12, 0, 20, 31 + + b finish_tlb_load + +2: /* Check for possible large-page pmd entry */ + rlwinm. r9, r12, 2, 22, 24 + beq 5f + + /* Create TLB tag. This is the faulting address, plus a static + * set of bits (valid, E, U0) plus the size from the PMD. + */ + ori r9, r9, 0x40 + rlwimi r10, r9, 0, 20, 31 + mr r11, r12 + + b finish_tlb_load + +5: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ +#ifdef CONFIG_403GCX + lwz r12, 12(r0) + lwz r11, 8(r0) + mtspr SPRN_PID, r12 + mtcr r11 + lwz r9, 4(r0) + lwz r12, 0(r0) +#else + mfspr r12, SPRN_SPRG6 + mfspr r11, SPRN_SPRG7 + mtspr SPRN_PID, r12 + mtcr r11 + mfspr r9, SPRN_SPRG5 + mfspr r12, SPRN_SPRG4 +#endif + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + b InstructionAccess + + EXCEPTION(0x1300, Trap_13, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1400, Trap_14, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) +#ifdef CONFIG_IBM405_ERR51 + /* 405GP errata 51 */ + START_EXCEPTION(0x1700, Trap_17) + b DTLBMiss +#else + EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) +#endif + EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1A00, Trap_1A, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1B00, Trap_1B, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1C00, Trap_1C, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1D00, Trap_1D, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1E00, Trap_1E, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1F00, Trap_1F, UnknownException, EXC_XFER_EE) + +/* Check for a single step debug exception while in an exception + * handler before state has been saved. This is to catch the case + * where an instruction that we are trying to single step causes + * an exception (eg ITLB/DTLB miss) and thus the first instruction of + * the exception handler generates a single step debug exception. + * + * If we get a debug trap on the first instruction of an exception handler, + * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is + * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). + * The exception handler was handling a non-critical interrupt, so it will + * save (and later restore) the MSR via SPRN_SRR1, which will still have + * the MSR_DE bit set. + */ + /* 0x2000 - Debug Exception */ + START_EXCEPTION(0x2000, DebugTrap) + CRITICAL_EXCEPTION_PROLOG + + /* + * If this is a single step or branch-taken exception in an + * exception entry sequence, it was probably meant to apply to + * the code where the exception occurred (since exception entry + * doesn't turn off DE automatically). We simulate the effect + * of turning off DE on entry to an exception handler by turning + * off DE in the SRR3 value and clearing the debug status. + */ + mfspr r10,SPRN_DBSR /* check single-step/branch taken */ + andis. r10,r10,DBSR_IC@h + beq+ 2f + + andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */ + beq 1f /* branch and fix it up */ + + mfspr r10,SPRN_SRR2 /* Faulting instruction address */ + cmplwi r10,0x2100 + bgt+ 2f /* address above exception vectors */ + + /* here it looks like we got an inappropriate debug exception. */ +1: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */ + lis r10,DBSR_IC@h /* clear the IC event */ + mtspr SPRN_DBSR,r10 + /* restore state and get out */ + lwz r10,_CCR(r11) + lwz r0,GPR0(r11) + lwz r1,GPR1(r11) + mtcrf 0x80,r10 + mtspr SPRN_SRR2,r12 + mtspr SPRN_SRR3,r9 + lwz r9,GPR9(r11) + lwz r12,GPR12(r11) + lwz r10,crit_r10@l(0) + lwz r11,crit_r11@l(0) + PPC405_ERR77_SYNC + rfci + b . + + /* continue normal handling for a critical exception... */ +2: mfspr r4,SPRN_DBSR + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_TEMPLATE(DebugException, 0x2002, \ + (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ + NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) + +/* + * The other Data TLB exceptions bail out to this point + * if they can't resolve the lightweight TLB fault. + */ +DataAccess: + NORMAL_EXCEPTION_PROLOG + mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ + stw r5,_ESR(r11) + mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ + EXC_XFER_EE_LITE(0x300, handle_page_fault) + +/* Other PowerPC processors, namely those derived from the 6xx-series + * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved. + * However, for the 4xx-series processors these are neither defined nor + * reserved. + */ + + /* Damn, I came up one instruction too many to fit into the + * exception space :-). Both the instruction and data TLB + * miss get to this point to load the TLB. + * r10 - TLB_TAG value + * r11 - Linux PTE + * r12, r9 - avilable to use + * PID - loaded with proper value when we get here + * Upon exit, we reload everything and RFI. + * Actually, it will fit now, but oh well.....a common place + * to load the TLB. + */ +tlb_4xx_index: + .long 0 +finish_tlb_load: + /* load the next available TLB index. + */ + lwz r9, tlb_4xx_index@l(0) + addi r9, r9, 1 + andi. r9, r9, (PPC4XX_TLB_SIZE-1) + stw r9, tlb_4xx_index@l(0) + +6: + /* + * Clear out the software-only bits in the PTE to generate the + * TLB_DATA value. These are the bottom 2 bits of the RPM, the + * top 3 bits of the zone field, and M. + */ + li r12, 0x0ce2 + andc r11, r11, r12 + + tlbwe r11, r9, TLB_DATA /* Load TLB LO */ + tlbwe r10, r9, TLB_TAG /* Load TLB HI */ + + /* Done...restore registers and get out of here. + */ +#ifdef CONFIG_403GCX + lwz r12, 12(r0) + lwz r11, 8(r0) + mtspr SPRN_PID, r12 + mtcr r11 + lwz r9, 4(r0) + lwz r12, 0(r0) +#else + mfspr r12, SPRN_SPRG6 + mfspr r11, SPRN_SPRG7 + mtspr SPRN_PID, r12 + mtcr r11 + mfspr r9, SPRN_SPRG5 + mfspr r12, SPRN_SPRG4 +#endif + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + PPC405_ERR77_SYNC + rfi /* Should sync shadow TLBs */ + b . /* prevent prefetch past rfi */ + +/* extern void giveup_fpu(struct task_struct *prev) + * + * The PowerPC 4xx family of processors do not have an FPU, so this just + * returns. + */ +_GLOBAL(giveup_fpu) + blr + +/* This is where the main kernel code starts. + */ +start_here: + + /* ptr to current */ + lis r2,init_task@h + ori r2,r2,init_task@l + + /* ptr to phys current thread */ + tophys(r4,r2) + addi r4,r4,THREAD /* init task's THREAD */ + mtspr SPRN_SPRG3,r4 + + /* stack */ + lis r1,init_thread_union@ha + addi r1,r1,init_thread_union@l + li r0,0 + stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) + + bl early_init /* We have to do this with MMU on */ + +/* + * Decide what sort of machine this is and initialize the MMU. + */ + mr r3,r31 + mr r4,r30 + mr r5,r29 + mr r6,r28 + mr r7,r27 + bl machine_init + bl MMU_init + +/* Go back to running unmapped so we can load up new values + * and change to using our exception vectors. + * On the 4xx, all we have to do is invalidate the TLB to clear + * the old 16M byte TLB mappings. + */ + lis r4,2f@h + ori r4,r4,2f@l + tophys(r4,r4) + lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h + ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l + mtspr SPRN_SRR0,r4 + mtspr SPRN_SRR1,r3 + rfi + b . /* prevent prefetch past rfi */ + +/* Load up the kernel context */ +2: + sync /* Flush to memory before changing TLB */ + tlbia + isync /* Flush shadow TLBs */ + + /* set up the PTE pointers for the Abatron bdiGDB. + */ + lis r6, swapper_pg_dir@h + ori r6, r6, swapper_pg_dir@l + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l + stw r5, 0xf0(r0) /* Must match your Abatron config file */ + tophys(r5,r5) + stw r6, 0(r5) + +/* Now turn on the MMU for real! */ + lis r4,MSR_KERNEL@h + ori r4,r4,MSR_KERNEL@l + lis r3,start_kernel@h + ori r3,r3,start_kernel@l + mtspr SPRN_SRR0,r3 + mtspr SPRN_SRR1,r4 + rfi /* enable MMU and jump to start_kernel */ + b . /* prevent prefetch past rfi */ + +/* Set up the initial MMU state so we can do the first level of + * kernel initialization. This maps the first 16 MBytes of memory 1:1 + * virtual to physical and more importantly sets the cache mode. + */ +initial_mmu: + tlbia /* Invalidate all TLB entries */ + isync + + /* We should still be executing code at physical address 0x0000xxxx + * at this point. However, start_here is at virtual address + * 0xC000xxxx. So, set up a TLB mapping to cover this once + * translation is enabled. + */ + + lis r3,KERNELBASE@h /* Load the kernel virtual address */ + ori r3,r3,KERNELBASE@l + tophys(r4,r3) /* Load the kernel physical address */ + + iccci r0,r3 /* Invalidate the i-cache before use */ + + /* Load the kernel PID. + */ + li r0,0 + mtspr SPRN_PID,r0 + sync + + /* Configure and load two entries into TLB slots 62 and 63. + * In case we are pinning TLBs, these are reserved in by the + * other TLB functions. If not reserving, then it doesn't + * matter where they are loaded. + */ + clrrwi r4,r4,10 /* Mask off the real page number */ + ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ + + clrrwi r3,r3,10 /* Mask off the effective page number */ + ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) + + li r0,63 /* TLB slot 63 */ + + tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ + tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ + +#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE) + + /* Load a TLB entry for the UART, so that ppc4xx_progress() can use + * the UARTs nice and early. We use a 4k real==virtual mapping. */ + + lis r3,SERIAL_DEBUG_IO_BASE@h + ori r3,r3,SERIAL_DEBUG_IO_BASE@l + mr r4,r3 + clrrwi r4,r4,12 + ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G) + + clrrwi r3,r3,12 + ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) + + li r0,0 /* TLB slot 0 */ + tlbwe r4,r0,TLB_DATA + tlbwe r3,r0,TLB_TAG +#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */ + + isync + + /* Establish the exception vector base + */ + lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */ + tophys(r0,r4) /* Use the physical address */ + mtspr SPRN_EVPR,r0 + + blr + +_GLOBAL(abort) + mfspr r13,SPRN_DBCR0 + oris r13,r13,DBCR0_RST_SYSTEM@h + mtspr SPRN_DBCR0,r13 + +_GLOBAL(set_context) + +#ifdef CONFIG_BDI_SWITCH + /* Context switch the PTE pointer for the Abatron BDI2000. + * The PGDIR is the second parameter. + */ + lis r5, KERNELBASE@h + lwz r5, 0xf0(r5) + stw r4, 0x4(r5) +#endif + sync + mtspr SPRN_PID,r3 + isync /* Need an isync to flush shadow */ + /* TLBs after changing PID */ + blr + +/* We put a few things here that have to be page-aligned. This stuff + * goes at the beginning of the data segment, which is page-aligned. + */ + .data +_GLOBAL(sdata) +_GLOBAL(empty_zero_page) + .space 4096 +_GLOBAL(swapper_pg_dir) + .space 4096 + + +/* Stack for handling critical exceptions from kernel mode */ + .section .bss + .align 12 +exception_stack_bottom: + .space 4096 +critical_stack_top: +_GLOBAL(exception_stack_top) + +/* This space gets a copy of optional info passed to us by the bootstrap + * which is used to pass parameters into the kernel like root=/dev/sda1, etc. + */ +_GLOBAL(cmd_line) + .space 512 + +/* Room for two PTE pointers, usually the kernel and current user pointers + * to their respective root page table. + */ +abatron_pteptrs: + .space 8 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S new file mode 100644 index 000000000000..22a5ee07e1ea --- /dev/null +++ b/arch/powerpc/kernel/head_64.S @@ -0,0 +1,2011 @@ +/* + * arch/ppc64/kernel/head.S + * + * PowerPC version + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP + * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> + * Adapted for Power Macintosh by Paul Mackerras. + * Low-level exception handlers and MMU support + * rewritten by Paul Mackerras. + * Copyright (C) 1996 Paul Mackerras. + * + * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and + * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com + * + * This file contains the low-level support and setup for the + * PowerPC-64 platform, including trap and interrupt dispatch. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/config.h> +#include <linux/threads.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/systemcfg.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> +#include <asm/bug.h> +#include <asm/cputable.h> +#include <asm/setup.h> +#include <asm/hvcall.h> +#include <asm/iSeries/LparMap.h> + +#ifdef CONFIG_PPC_ISERIES +#define DO_SOFT_DISABLE +#endif + +/* + * We layout physical memory as follows: + * 0x0000 - 0x00ff : Secondary processor spin code + * 0x0100 - 0x2fff : pSeries Interrupt prologs + * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs + * 0x6000 - 0x6fff : Initial (CPU0) segment table + * 0x7000 - 0x7fff : FWNMI data area + * 0x8000 - : Early init and support code + */ + +/* + * SPRG Usage + * + * Register Definition + * + * SPRG0 reserved for hypervisor + * SPRG1 temp - used to save gpr + * SPRG2 temp - used to save gpr + * SPRG3 virt addr of paca + */ + +/* + * Entering into this code we make the following assumptions: + * For pSeries: + * 1. The MMU is off & open firmware is running in real mode. + * 2. The kernel is entered at __start + * + * For iSeries: + * 1. The MMU is on (as it always is for iSeries) + * 2. The kernel is entered at system_reset_iSeries + */ + + .text + .globl _stext +_stext: +#ifdef CONFIG_PPC_MULTIPLATFORM +_GLOBAL(__start) + /* NOP this out unconditionally */ +BEGIN_FTR_SECTION + b .__start_initialization_multiplatform +END_FTR_SECTION(0, 1) +#endif /* CONFIG_PPC_MULTIPLATFORM */ + + /* Catch branch to 0 in real mode */ + trap + +#ifdef CONFIG_PPC_ISERIES + /* + * At offset 0x20, there is a pointer to iSeries LPAR data. + * This is required by the hypervisor + */ + . = 0x20 + .llong hvReleaseData-KERNELBASE + + /* + * At offset 0x28 and 0x30 are offsets to the mschunks_map + * array (used by the iSeries LPAR debugger to do translation + * between physical addresses and absolute addresses) and + * to the pidhash table (also used by the debugger) + */ + .llong mschunks_map-KERNELBASE + .llong 0 /* pidhash-KERNELBASE SFRXXX */ + + /* Offset 0x38 - Pointer to start of embedded System.map */ + .globl embedded_sysmap_start +embedded_sysmap_start: + .llong 0 + /* Offset 0x40 - Pointer to end of embedded System.map */ + .globl embedded_sysmap_end +embedded_sysmap_end: + .llong 0 + +#endif /* CONFIG_PPC_ISERIES */ + + /* Secondary processors spin on this value until it goes to 1. */ + .globl __secondary_hold_spinloop +__secondary_hold_spinloop: + .llong 0x0 + + /* Secondary processors write this value with their cpu # */ + /* after they enter the spin loop immediately below. */ + .globl __secondary_hold_acknowledge +__secondary_hold_acknowledge: + .llong 0x0 + + . = 0x60 +/* + * The following code is used on pSeries to hold secondary processors + * in a spin loop after they have been freed from OpenFirmware, but + * before the bulk of the kernel has been relocated. This code + * is relocated to physical address 0x60 before prom_init is run. + * All of it must fit below the first exception vector at 0x100. + */ +_GLOBAL(__secondary_hold) + mfmsr r24 + ori r24,r24,MSR_RI + mtmsrd r24 /* RI on */ + + /* Grab our linux cpu number */ + mr r24,r3 + + /* Tell the master cpu we're here */ + /* Relocation is off & we are located at an address less */ + /* than 0x100, so only need to grab low order offset. */ + std r24,__secondary_hold_acknowledge@l(0) + sync + + /* All secondary cpus wait here until told to start. */ +100: ld r4,__secondary_hold_spinloop@l(0) + cmpdi 0,r4,1 + bne 100b + +#ifdef CONFIG_HMT + b .hmt_init +#else +#ifdef CONFIG_SMP + mr r3,r24 + b .pSeries_secondary_smp_init +#else + BUG_OPCODE +#endif +#endif + +/* This value is used to mark exception frames on the stack. */ + .section ".toc","aw" +exception_marker: + .tc ID_72656773_68657265[TC],0x7265677368657265 + .text + +/* + * The following macros define the code that appears as + * the prologue to each of the exception handlers. They + * are split into two parts to allow a single kernel binary + * to be used for pSeries and iSeries. + * LOL. One day... - paulus + */ + +/* + * We make as much of the exception code common between native + * exception handlers (including pSeries LPAR) and iSeries LPAR + * implementations as possible. + */ + +/* + * This is the start of the interrupt handlers for pSeries + * This code runs with relocation off. + */ +#define EX_R9 0 +#define EX_R10 8 +#define EX_R11 16 +#define EX_R12 24 +#define EX_R13 32 +#define EX_SRR0 40 +#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */ +#define EX_DAR 48 +#define EX_LR 48 /* SLB miss saves LR, but not DAR */ +#define EX_DSISR 56 +#define EX_CCR 60 + +#define EXCEPTION_PROLOG_PSERIES(area, label) \ + mfspr r13,SPRG3; /* get paca address into r13 */ \ + std r9,area+EX_R9(r13); /* save r9 - r12 */ \ + std r10,area+EX_R10(r13); \ + std r11,area+EX_R11(r13); \ + std r12,area+EX_R12(r13); \ + mfspr r9,SPRG1; \ + std r9,area+EX_R13(r13); \ + mfcr r9; \ + clrrdi r12,r13,32; /* get high part of &label */ \ + mfmsr r10; \ + mfspr r11,SRR0; /* save SRR0 */ \ + ori r12,r12,(label)@l; /* virt addr of handler */ \ + ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ + mtspr SRR0,r12; \ + mfspr r12,SRR1; /* and SRR1 */ \ + mtspr SRR1,r10; \ + rfid; \ + b . /* prevent speculative execution */ + +/* + * This is the start of the interrupt handlers for iSeries + * This code runs with relocation on. + */ +#define EXCEPTION_PROLOG_ISERIES_1(area) \ + mfspr r13,SPRG3; /* get paca address into r13 */ \ + std r9,area+EX_R9(r13); /* save r9 - r12 */ \ + std r10,area+EX_R10(r13); \ + std r11,area+EX_R11(r13); \ + std r12,area+EX_R12(r13); \ + mfspr r9,SPRG1; \ + std r9,area+EX_R13(r13); \ + mfcr r9 + +#define EXCEPTION_PROLOG_ISERIES_2 \ + mfmsr r10; \ + ld r11,PACALPPACA+LPPACASRR0(r13); \ + ld r12,PACALPPACA+LPPACASRR1(r13); \ + ori r10,r10,MSR_RI; \ + mtmsrd r10,1 + +/* + * The common exception prolog is used for all except a few exceptions + * such as a segment miss on a kernel address. We have to be prepared + * to take another exception from the point where we first touch the + * kernel stack onwards. + * + * On entry r13 points to the paca, r9-r13 are saved in the paca, + * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and + * SRR1, and relocation is on. + */ +#define EXCEPTION_PROLOG_COMMON(n, area) \ + andi. r10,r12,MSR_PR; /* See if coming from user */ \ + mr r10,r1; /* Save r1 */ \ + subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ + beq- 1f; \ + ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ +1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ + bge- cr1,bad_stack; /* abort if it is */ \ + std r9,_CCR(r1); /* save CR in stackframe */ \ + std r11,_NIP(r1); /* save SRR0 in stackframe */ \ + std r12,_MSR(r1); /* save SRR1 in stackframe */ \ + std r10,0(r1); /* make stack chain pointer */ \ + std r0,GPR0(r1); /* save r0 in stackframe */ \ + std r10,GPR1(r1); /* save r1 in stackframe */ \ + std r2,GPR2(r1); /* save r2 in stackframe */ \ + SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ + SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ + ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ + ld r10,area+EX_R10(r13); \ + std r9,GPR9(r1); \ + std r10,GPR10(r1); \ + ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ + ld r10,area+EX_R12(r13); \ + ld r11,area+EX_R13(r13); \ + std r9,GPR11(r1); \ + std r10,GPR12(r1); \ + std r11,GPR13(r1); \ + ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ + mflr r9; /* save LR in stackframe */ \ + std r9,_LINK(r1); \ + mfctr r10; /* save CTR in stackframe */ \ + std r10,_CTR(r1); \ + mfspr r11,XER; /* save XER in stackframe */ \ + std r11,_XER(r1); \ + li r9,(n)+1; \ + std r9,_TRAP(r1); /* set trap number */ \ + li r10,0; \ + ld r11,exception_marker@toc(r2); \ + std r10,RESULT(r1); /* clear regs->result */ \ + std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ + +/* + * Exception vectors. + */ +#define STD_EXCEPTION_PSERIES(n, label) \ + . = n; \ + .globl label##_pSeries; \ +label##_pSeries: \ + HMT_MEDIUM; \ + mtspr SPRG1,r13; /* save r13 */ \ + RUNLATCH_ON(r13); \ + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) + +#define STD_EXCEPTION_ISERIES(n, label, area) \ + .globl label##_iSeries; \ +label##_iSeries: \ + HMT_MEDIUM; \ + mtspr SPRG1,r13; /* save r13 */ \ + RUNLATCH_ON(r13); \ + EXCEPTION_PROLOG_ISERIES_1(area); \ + EXCEPTION_PROLOG_ISERIES_2; \ + b label##_common + +#define MASKABLE_EXCEPTION_ISERIES(n, label) \ + .globl label##_iSeries; \ +label##_iSeries: \ + HMT_MEDIUM; \ + mtspr SPRG1,r13; /* save r13 */ \ + RUNLATCH_ON(r13); \ + EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ + lbz r10,PACAPROCENABLED(r13); \ + cmpwi 0,r10,0; \ + beq- label##_iSeries_masked; \ + EXCEPTION_PROLOG_ISERIES_2; \ + b label##_common; \ + +#ifdef DO_SOFT_DISABLE +#define DISABLE_INTS \ + lbz r10,PACAPROCENABLED(r13); \ + li r11,0; \ + std r10,SOFTE(r1); \ + mfmsr r10; \ + stb r11,PACAPROCENABLED(r13); \ + ori r10,r10,MSR_EE; \ + mtmsrd r10,1 + +#define ENABLE_INTS \ + lbz r10,PACAPROCENABLED(r13); \ + mfmsr r11; \ + std r10,SOFTE(r1); \ + ori r11,r11,MSR_EE; \ + mtmsrd r11,1 + +#else /* hard enable/disable interrupts */ +#define DISABLE_INTS + +#define ENABLE_INTS \ + ld r12,_MSR(r1); \ + mfmsr r11; \ + rlwimi r11,r12,0,MSR_EE; \ + mtmsrd r11,1 + +#endif + +#define STD_EXCEPTION_COMMON(trap, label, hdlr) \ + .align 7; \ + .globl label##_common; \ +label##_common: \ + EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ + DISABLE_INTS; \ + bl .save_nvgprs; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + bl hdlr; \ + b .ret_from_except + +#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ + .align 7; \ + .globl label##_common; \ +label##_common: \ + EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ + DISABLE_INTS; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + bl hdlr; \ + b .ret_from_except_lite + +/* + * Start of pSeries system interrupt routines + */ + . = 0x100 + .globl __start_interrupts +__start_interrupts: + + STD_EXCEPTION_PSERIES(0x100, system_reset) + + . = 0x200 +_machine_check_pSeries: + HMT_MEDIUM + mtspr SPRG1,r13 /* save r13 */ + RUNLATCH_ON(r13) + EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) + + . = 0x300 + .globl data_access_pSeries +data_access_pSeries: + HMT_MEDIUM + mtspr SPRG1,r13 +BEGIN_FTR_SECTION + mtspr SPRG2,r12 + mfspr r13,DAR + mfspr r12,DSISR + srdi r13,r13,60 + rlwimi r13,r12,16,0x20 + mfcr r12 + cmpwi r13,0x2c + beq .do_stab_bolted_pSeries + mtcrf 0x80,r12 + mfspr r12,SPRG2 +END_FTR_SECTION_IFCLR(CPU_FTR_SLB) + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) + + . = 0x380 + .globl data_access_slb_pSeries +data_access_slb_pSeries: + HMT_MEDIUM + mtspr SPRG1,r13 + RUNLATCH_ON(r13) + mfspr r13,SPRG3 /* get paca address into r13 */ + std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ + std r10,PACA_EXSLB+EX_R10(r13) + std r11,PACA_EXSLB+EX_R11(r13) + std r12,PACA_EXSLB+EX_R12(r13) + std r3,PACA_EXSLB+EX_R3(r13) + mfspr r9,SPRG1 + std r9,PACA_EXSLB+EX_R13(r13) + mfcr r9 + mfspr r12,SRR1 /* and SRR1 */ + mfspr r3,DAR + b .do_slb_miss /* Rel. branch works in real mode */ + + STD_EXCEPTION_PSERIES(0x400, instruction_access) + + . = 0x480 + .globl instruction_access_slb_pSeries +instruction_access_slb_pSeries: + HMT_MEDIUM + mtspr SPRG1,r13 + RUNLATCH_ON(r13) + mfspr r13,SPRG3 /* get paca address into r13 */ + std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ + std r10,PACA_EXSLB+EX_R10(r13) + std r11,PACA_EXSLB+EX_R11(r13) + std r12,PACA_EXSLB+EX_R12(r13) + std r3,PACA_EXSLB+EX_R3(r13) + mfspr r9,SPRG1 + std r9,PACA_EXSLB+EX_R13(r13) + mfcr r9 + mfspr r12,SRR1 /* and SRR1 */ + mfspr r3,SRR0 /* SRR0 is faulting address */ + b .do_slb_miss /* Rel. branch works in real mode */ + + STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) + STD_EXCEPTION_PSERIES(0x600, alignment) + STD_EXCEPTION_PSERIES(0x700, program_check) + STD_EXCEPTION_PSERIES(0x800, fp_unavailable) + STD_EXCEPTION_PSERIES(0x900, decrementer) + STD_EXCEPTION_PSERIES(0xa00, trap_0a) + STD_EXCEPTION_PSERIES(0xb00, trap_0b) + + . = 0xc00 + .globl system_call_pSeries +system_call_pSeries: + HMT_MEDIUM + RUNLATCH_ON(r9) + mr r9,r13 + mfmsr r10 + mfspr r13,SPRG3 + mfspr r11,SRR0 + clrrdi r12,r13,32 + oris r12,r12,system_call_common@h + ori r12,r12,system_call_common@l + mtspr SRR0,r12 + ori r10,r10,MSR_IR|MSR_DR|MSR_RI + mfspr r12,SRR1 + mtspr SRR1,r10 + rfid + b . /* prevent speculative execution */ + + STD_EXCEPTION_PSERIES(0xd00, single_step) + STD_EXCEPTION_PSERIES(0xe00, trap_0e) + + /* We need to deal with the Altivec unavailable exception + * here which is at 0xf20, thus in the middle of the + * prolog code of the PerformanceMonitor one. A little + * trickery is thus necessary + */ + . = 0xf00 + b performance_monitor_pSeries + + STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable) + + STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) + STD_EXCEPTION_PSERIES(0x1700, altivec_assist) + + . = 0x3000 + +/*** pSeries interrupt support ***/ + + /* moved from 0xf00 */ + STD_EXCEPTION_PSERIES(., performance_monitor) + + .align 7 +_GLOBAL(do_stab_bolted_pSeries) + mtcrf 0x80,r12 + mfspr r12,SPRG2 + EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) + +/* + * Vectors for the FWNMI option. Share common code. + */ + .globl system_reset_fwnmi +system_reset_fwnmi: + HMT_MEDIUM + mtspr SPRG1,r13 /* save r13 */ + RUNLATCH_ON(r13) + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) + + .globl machine_check_fwnmi +machine_check_fwnmi: + HMT_MEDIUM + mtspr SPRG1,r13 /* save r13 */ + RUNLATCH_ON(r13) + EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) + +#ifdef CONFIG_PPC_ISERIES +/*** ISeries-LPAR interrupt handlers ***/ + + STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) + + .globl data_access_iSeries +data_access_iSeries: + mtspr SPRG1,r13 +BEGIN_FTR_SECTION + mtspr SPRG2,r12 + mfspr r13,DAR + mfspr r12,DSISR + srdi r13,r13,60 + rlwimi r13,r12,16,0x20 + mfcr r12 + cmpwi r13,0x2c + beq .do_stab_bolted_iSeries + mtcrf 0x80,r12 + mfspr r12,SPRG2 +END_FTR_SECTION_IFCLR(CPU_FTR_SLB) + EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN) + EXCEPTION_PROLOG_ISERIES_2 + b data_access_common + +.do_stab_bolted_iSeries: + mtcrf 0x80,r12 + mfspr r12,SPRG2 + EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) + EXCEPTION_PROLOG_ISERIES_2 + b .do_stab_bolted + + .globl data_access_slb_iSeries +data_access_slb_iSeries: + mtspr SPRG1,r13 /* save r13 */ + EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) + std r3,PACA_EXSLB+EX_R3(r13) + ld r12,PACALPPACA+LPPACASRR1(r13) + mfspr r3,DAR + b .do_slb_miss + + STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) + + .globl instruction_access_slb_iSeries +instruction_access_slb_iSeries: + mtspr SPRG1,r13 /* save r13 */ + EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) + std r3,PACA_EXSLB+EX_R3(r13) + ld r12,PACALPPACA+LPPACASRR1(r13) + ld r3,PACALPPACA+LPPACASRR0(r13) + b .do_slb_miss + + MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt) + STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN) + STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN) + STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN) + MASKABLE_EXCEPTION_ISERIES(0x900, decrementer) + STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN) + STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN) + + .globl system_call_iSeries +system_call_iSeries: + mr r9,r13 + mfspr r13,SPRG3 + EXCEPTION_PROLOG_ISERIES_2 + b system_call_common + + STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN) + STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN) + STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN) + + .globl system_reset_iSeries +system_reset_iSeries: + mfspr r13,SPRG3 /* Get paca address */ + mfmsr r24 + ori r24,r24,MSR_RI + mtmsrd r24 /* RI on */ + lhz r24,PACAPACAINDEX(r13) /* Get processor # */ + cmpwi 0,r24,0 /* Are we processor 0? */ + beq .__start_initialization_iSeries /* Start up the first processor */ + mfspr r4,SPRN_CTRLF + li r5,CTRL_RUNLATCH /* Turn off the run light */ + andc r4,r4,r5 + mtspr SPRN_CTRLT,r4 + +1: + HMT_LOW +#ifdef CONFIG_SMP + lbz r23,PACAPROCSTART(r13) /* Test if this processor + * should start */ + sync + LOADADDR(r3,current_set) + sldi r28,r24,3 /* get current_set[cpu#] */ + ldx r3,r3,r28 + addi r1,r3,THREAD_SIZE + subi r1,r1,STACK_FRAME_OVERHEAD + + cmpwi 0,r23,0 + beq iSeries_secondary_smp_loop /* Loop until told to go */ + bne .__secondary_start /* Loop until told to go */ +iSeries_secondary_smp_loop: + /* Let the Hypervisor know we are alive */ + /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ + lis r3,0x8002 + rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ +#else /* CONFIG_SMP */ + /* Yield the processor. This is required for non-SMP kernels + which are running on multi-threaded machines. */ + lis r3,0x8000 + rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ + addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ + li r4,0 /* "yield timed" */ + li r5,-1 /* "yield forever" */ +#endif /* CONFIG_SMP */ + li r0,-1 /* r0=-1 indicates a Hypervisor call */ + sc /* Invoke the hypervisor via a system call */ + mfspr r13,SPRG3 /* Put r13 back ???? */ + b 1b /* If SMP not configured, secondaries + * loop forever */ + + .globl decrementer_iSeries_masked +decrementer_iSeries_masked: + li r11,1 + stb r11,PACALPPACA+LPPACADECRINT(r13) + lwz r12,PACADEFAULTDECR(r13) + mtspr SPRN_DEC,r12 + /* fall through */ + + .globl hardware_interrupt_iSeries_masked +hardware_interrupt_iSeries_masked: + mtcrf 0x80,r9 /* Restore regs */ + ld r11,PACALPPACA+LPPACASRR0(r13) + ld r12,PACALPPACA+LPPACASRR1(r13) + mtspr SRR0,r11 + mtspr SRR1,r12 + ld r9,PACA_EXGEN+EX_R9(r13) + ld r10,PACA_EXGEN+EX_R10(r13) + ld r11,PACA_EXGEN+EX_R11(r13) + ld r12,PACA_EXGEN+EX_R12(r13) + ld r13,PACA_EXGEN+EX_R13(r13) + rfid + b . /* prevent speculative execution */ +#endif /* CONFIG_PPC_ISERIES */ + +/*** Common interrupt handlers ***/ + + STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) + + /* + * Machine check is different because we use a different + * save area: PACA_EXMC instead of PACA_EXGEN. + */ + .align 7 + .globl machine_check_common +machine_check_common: + EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) + DISABLE_INTS + bl .save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD + bl .machine_check_exception + b .ret_from_except + + STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) + STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) + STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) + STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) + STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) + STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception) + STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) +#ifdef CONFIG_ALTIVEC + STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) +#else + STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) +#endif + +/* + * Here we have detected that the kernel stack pointer is bad. + * R9 contains the saved CR, r13 points to the paca, + * r10 contains the (bad) kernel stack pointer, + * r11 and r12 contain the saved SRR0 and SRR1. + * We switch to using an emergency stack, save the registers there, + * and call kernel_bad_stack(), which panics. + */ +bad_stack: + ld r1,PACAEMERGSP(r13) + subi r1,r1,64+INT_FRAME_SIZE + std r9,_CCR(r1) + std r10,GPR1(r1) + std r11,_NIP(r1) + std r12,_MSR(r1) + mfspr r11,DAR + mfspr r12,DSISR + std r11,_DAR(r1) + std r12,_DSISR(r1) + mflr r10 + mfctr r11 + mfxer r12 + std r10,_LINK(r1) + std r11,_CTR(r1) + std r12,_XER(r1) + SAVE_GPR(0,r1) + SAVE_GPR(2,r1) + SAVE_4GPRS(3,r1) + SAVE_2GPRS(7,r1) + SAVE_10GPRS(12,r1) + SAVE_10GPRS(22,r1) + addi r11,r1,INT_FRAME_SIZE + std r11,0(r1) + li r12,0 + std r12,0(r11) + ld r2,PACATOC(r13) +1: addi r3,r1,STACK_FRAME_OVERHEAD + bl .kernel_bad_stack + b 1b + +/* + * Return from an exception with minimal checks. + * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. + * If interrupts have been enabled, or anything has been + * done that might have changed the scheduling status of + * any task or sent any task a signal, you should use + * ret_from_except or ret_from_except_lite instead of this. + */ +fast_exception_return: + ld r12,_MSR(r1) + ld r11,_NIP(r1) + andi. r3,r12,MSR_RI /* check if RI is set */ + beq- unrecov_fer + ld r3,_CCR(r1) + ld r4,_LINK(r1) + ld r5,_CTR(r1) + ld r6,_XER(r1) + mtcr r3 + mtlr r4 + mtctr r5 + mtxer r6 + REST_GPR(0, r1) + REST_8GPRS(2, r1) + + mfmsr r10 + clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ + mtmsrd r10,1 + + mtspr SRR1,r12 + mtspr SRR0,r11 + REST_4GPRS(10, r1) + ld r1,GPR1(r1) + rfid + b . /* prevent speculative execution */ + +unrecov_fer: + bl .save_nvgprs +1: addi r3,r1,STACK_FRAME_OVERHEAD + bl .unrecoverable_exception + b 1b + +/* + * Here r13 points to the paca, r9 contains the saved CR, + * SRR0 and SRR1 are saved in r11 and r12, + * r9 - r13 are saved in paca->exgen. + */ + .align 7 + .globl data_access_common +data_access_common: + RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ + mfspr r10,DAR + std r10,PACA_EXGEN+EX_DAR(r13) + mfspr r10,DSISR + stw r10,PACA_EXGEN+EX_DSISR(r13) + EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) + ld r3,PACA_EXGEN+EX_DAR(r13) + lwz r4,PACA_EXGEN+EX_DSISR(r13) + li r5,0x300 + b .do_hash_page /* Try to handle as hpte fault */ + + .align 7 + .globl instruction_access_common +instruction_access_common: + EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) + ld r3,_NIP(r1) + andis. r4,r12,0x5820 + li r5,0x400 + b .do_hash_page /* Try to handle as hpte fault */ + + .align 7 + .globl hardware_interrupt_common + .globl hardware_interrupt_entry +hardware_interrupt_common: + EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) +hardware_interrupt_entry: + DISABLE_INTS + addi r3,r1,STACK_FRAME_OVERHEAD + bl .do_IRQ + b .ret_from_except_lite + + .align 7 + .globl alignment_common +alignment_common: + mfspr r10,DAR + std r10,PACA_EXGEN+EX_DAR(r13) + mfspr r10,DSISR + stw r10,PACA_EXGEN+EX_DSISR(r13) + EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) + ld r3,PACA_EXGEN+EX_DAR(r13) + lwz r4,PACA_EXGEN+EX_DSISR(r13) + std r3,_DAR(r1) + std r4,_DSISR(r1) + bl .save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD + ENABLE_INTS + bl .alignment_exception + b .ret_from_except + + .align 7 + .globl program_check_common +program_check_common: + EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) + bl .save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD + ENABLE_INTS + bl .program_check_exception + b .ret_from_except + + .align 7 + .globl fp_unavailable_common +fp_unavailable_common: + EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) + bne .load_up_fpu /* if from user, just load it up */ + bl .save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD + ENABLE_INTS + bl .kernel_fp_unavailable_exception + BUG_OPCODE + +/* + * load_up_fpu(unused, unused, tsk) + * Disable FP for the task which had the FPU previously, + * and save its floating-point registers in its thread_struct. + * Enables the FPU for use in the kernel on return. + * On SMP we know the fpu is free, since we give it up every + * switch (ie, no lazy save of the FP registers). + * On entry: r13 == 'current' && last_task_used_math != 'current' + */ +_STATIC(load_up_fpu) + mfmsr r5 /* grab the current MSR */ + ori r5,r5,MSR_FP + mtmsrd r5 /* enable use of fpu now */ + isync +/* + * For SMP, we don't do lazy FPU switching because it just gets too + * horrendously complex, especially when a task switches from one CPU + * to another. Instead we call giveup_fpu in switch_to. + * + */ +#ifndef CONFIG_SMP + ld r3,last_task_used_math@got(r2) + ld r4,0(r3) + cmpdi 0,r4,0 + beq 1f + /* Save FP state to last_task_used_math's THREAD struct */ + addi r4,r4,THREAD + SAVE_32FPRS(0, r4) + mffs fr0 + stfd fr0,THREAD_FPSCR(r4) + /* Disable FP for last_task_used_math */ + ld r5,PT_REGS(r4) + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) + li r6,MSR_FP|MSR_FE0|MSR_FE1 + andc r4,r4,r6 + std r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + /* enable use of FP after return */ + ld r4,PACACURRENT(r13) + addi r5,r4,THREAD /* Get THREAD */ + ld r4,THREAD_FPEXC_MODE(r5) + ori r12,r12,MSR_FP + or r12,r12,r4 + std r12,_MSR(r1) + lfd fr0,THREAD_FPSCR(r5) + mtfsf 0xff,fr0 + REST_32FPRS(0, r5) +#ifndef CONFIG_SMP + /* Update last_task_used_math to 'current' */ + subi r4,r5,THREAD /* Back to 'current' */ + std r4,0(r3) +#endif /* CONFIG_SMP */ + /* restore registers and return */ + b fast_exception_return + + .align 7 + .globl altivec_unavailable_common +altivec_unavailable_common: + EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) +#ifdef CONFIG_ALTIVEC +BEGIN_FTR_SECTION + bne .load_up_altivec /* if from user, just load it up */ +END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) +#endif + bl .save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD + ENABLE_INTS + bl .altivec_unavailable_exception + b .ret_from_except + +#ifdef CONFIG_ALTIVEC +/* + * load_up_altivec(unused, unused, tsk) + * Disable VMX for the task which had it previously, + * and save its vector registers in its thread_struct. + * Enables the VMX for use in the kernel on return. + * On SMP we know the VMX is free, since we give it up every + * switch (ie, no lazy save of the vector registers). + * On entry: r13 == 'current' && last_task_used_altivec != 'current' + */ +_STATIC(load_up_altivec) + mfmsr r5 /* grab the current MSR */ + oris r5,r5,MSR_VEC@h + mtmsrd r5 /* enable use of VMX now */ + isync + +/* + * For SMP, we don't do lazy VMX switching because it just gets too + * horrendously complex, especially when a task switches from one CPU + * to another. Instead we call giveup_altvec in switch_to. + * VRSAVE isn't dealt with here, that is done in the normal context + * switch code. Note that we could rely on vrsave value to eventually + * avoid saving all of the VREGs here... + */ +#ifndef CONFIG_SMP + ld r3,last_task_used_altivec@got(r2) + ld r4,0(r3) + cmpdi 0,r4,0 + beq 1f + /* Save VMX state to last_task_used_altivec's THREAD struct */ + addi r4,r4,THREAD + SAVE_32VRS(0,r5,r4) + mfvscr vr0 + li r10,THREAD_VSCR + stvx vr0,r10,r4 + /* Disable VMX for last_task_used_altivec */ + ld r5,PT_REGS(r4) + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r6,MSR_VEC@h + andc r4,r4,r6 + std r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + /* Hack: if we get an altivec unavailable trap with VRSAVE + * set to all zeros, we assume this is a broken application + * that fails to set it properly, and thus we switch it to + * all 1's + */ + mfspr r4,SPRN_VRSAVE + cmpdi 0,r4,0 + bne+ 1f + li r4,-1 + mtspr SPRN_VRSAVE,r4 +1: + /* enable use of VMX after return */ + ld r4,PACACURRENT(r13) + addi r5,r4,THREAD /* Get THREAD */ + oris r12,r12,MSR_VEC@h + std r12,_MSR(r1) + li r4,1 + li r10,THREAD_VSCR + stw r4,THREAD_USED_VR(r5) + lvx vr0,r10,r5 + mtvscr vr0 + REST_32VRS(0,r4,r5) +#ifndef CONFIG_SMP + /* Update last_task_used_math to 'current' */ + subi r4,r5,THREAD /* Back to 'current' */ + std r4,0(r3) +#endif /* CONFIG_SMP */ + /* restore registers and return */ + b fast_exception_return +#endif /* CONFIG_ALTIVEC */ + +/* + * Hash table stuff + */ + .align 7 +_GLOBAL(do_hash_page) + std r3,_DAR(r1) + std r4,_DSISR(r1) + + andis. r0,r4,0xa450 /* weird error? */ + bne- .handle_page_fault /* if not, try to insert a HPTE */ +BEGIN_FTR_SECTION + andis. r0,r4,0x0020 /* Is it a segment table fault? */ + bne- .do_ste_alloc /* If so handle it */ +END_FTR_SECTION_IFCLR(CPU_FTR_SLB) + + /* + * We need to set the _PAGE_USER bit if MSR_PR is set or if we are + * accessing a userspace segment (even from the kernel). We assume + * kernel addresses always have the high bit set. + */ + rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ + rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ + orc r0,r12,r0 /* MSR_PR | ~high_bit */ + rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ + ori r4,r4,1 /* add _PAGE_PRESENT */ + rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ + + /* + * On iSeries, we soft-disable interrupts here, then + * hard-enable interrupts so that the hash_page code can spin on + * the hash_table_lock without problems on a shared processor. + */ + DISABLE_INTS + + /* + * r3 contains the faulting address + * r4 contains the required access permissions + * r5 contains the trap number + * + * at return r3 = 0 for success + */ + bl .hash_page /* build HPTE if possible */ + cmpdi r3,0 /* see if hash_page succeeded */ + +#ifdef DO_SOFT_DISABLE + /* + * If we had interrupts soft-enabled at the point where the + * DSI/ISI occurred, and an interrupt came in during hash_page, + * handle it now. + * We jump to ret_from_except_lite rather than fast_exception_return + * because ret_from_except_lite will check for and handle pending + * interrupts if necessary. + */ + beq .ret_from_except_lite + /* For a hash failure, we don't bother re-enabling interrupts */ + ble- 12f + + /* + * hash_page couldn't handle it, set soft interrupt enable back + * to what it was before the trap. Note that .local_irq_restore + * handles any interrupts pending at this point. + */ + ld r3,SOFTE(r1) + bl .local_irq_restore + b 11f +#else + beq fast_exception_return /* Return from exception on success */ + ble- 12f /* Failure return from hash_page */ + + /* fall through */ +#endif + +/* Here we have a page fault that hash_page can't handle. */ +_GLOBAL(handle_page_fault) + ENABLE_INTS +11: ld r4,_DAR(r1) + ld r5,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD + bl .do_page_fault + cmpdi r3,0 + beq+ .ret_from_except_lite + bl .save_nvgprs + mr r5,r3 + addi r3,r1,STACK_FRAME_OVERHEAD + lwz r4,_DAR(r1) + bl .bad_page_fault + b .ret_from_except + +/* We have a page fault that hash_page could handle but HV refused + * the PTE insertion + */ +12: bl .save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD + lwz r4,_DAR(r1) + bl .low_hash_fault + b .ret_from_except + + /* here we have a segment miss */ +_GLOBAL(do_ste_alloc) + bl .ste_allocate /* try to insert stab entry */ + cmpdi r3,0 + beq+ fast_exception_return + b .handle_page_fault + +/* + * r13 points to the PACA, r9 contains the saved CR, + * r11 and r12 contain the saved SRR0 and SRR1. + * r9 - r13 are saved in paca->exslb. + * We assume we aren't going to take any exceptions during this procedure. + * We assume (DAR >> 60) == 0xc. + */ + .align 7 +_GLOBAL(do_stab_bolted) + stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ + std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ + + /* Hash to the primary group */ + ld r10,PACASTABVIRT(r13) + mfspr r11,DAR + srdi r11,r11,28 + rldimi r10,r11,7,52 /* r10 = first ste of the group */ + + /* Calculate VSID */ + /* This is a kernel address, so protovsid = ESID */ + ASM_VSID_SCRAMBLE(r11, r9) + rldic r9,r11,12,16 /* r9 = vsid << 12 */ + + /* Search the primary group for a free entry */ +1: ld r11,0(r10) /* Test valid bit of the current ste */ + andi. r11,r11,0x80 + beq 2f + addi r10,r10,16 + andi. r11,r10,0x70 + bne 1b + + /* Stick for only searching the primary group for now. */ + /* At least for now, we use a very simple random castout scheme */ + /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ + mftb r11 + rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ + ori r11,r11,0x10 + + /* r10 currently points to an ste one past the group of interest */ + /* make it point to the randomly selected entry */ + subi r10,r10,128 + or r10,r10,r11 /* r10 is the entry to invalidate */ + + isync /* mark the entry invalid */ + ld r11,0(r10) + rldicl r11,r11,56,1 /* clear the valid bit */ + rotldi r11,r11,8 + std r11,0(r10) + sync + + clrrdi r11,r11,28 /* Get the esid part of the ste */ + slbie r11 + +2: std r9,8(r10) /* Store the vsid part of the ste */ + eieio + + mfspr r11,DAR /* Get the new esid */ + clrrdi r11,r11,28 /* Permits a full 32b of ESID */ + ori r11,r11,0x90 /* Turn on valid and kp */ + std r11,0(r10) /* Put new entry back into the stab */ + + sync + + /* All done -- return from exception. */ + lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ + ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ + + andi. r10,r12,MSR_RI + beq- unrecov_slb + + mtcrf 0x80,r9 /* restore CR */ + + mfmsr r10 + clrrdi r10,r10,2 + mtmsrd r10,1 + + mtspr SRR0,r11 + mtspr SRR1,r12 + ld r9,PACA_EXSLB+EX_R9(r13) + ld r10,PACA_EXSLB+EX_R10(r13) + ld r11,PACA_EXSLB+EX_R11(r13) + ld r12,PACA_EXSLB+EX_R12(r13) + ld r13,PACA_EXSLB+EX_R13(r13) + rfid + b . /* prevent speculative execution */ + +/* + * r13 points to the PACA, r9 contains the saved CR, + * r11 and r12 contain the saved SRR0 and SRR1. + * r3 has the faulting address + * r9 - r13 are saved in paca->exslb. + * r3 is saved in paca->slb_r3 + * We assume we aren't going to take any exceptions during this procedure. + */ +_GLOBAL(do_slb_miss) + mflr r10 + + stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ + std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ + + bl .slb_allocate /* handle it */ + + /* All done -- return from exception. */ + + ld r10,PACA_EXSLB+EX_LR(r13) + ld r3,PACA_EXSLB+EX_R3(r13) + lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ +#ifdef CONFIG_PPC_ISERIES + ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ +#endif /* CONFIG_PPC_ISERIES */ + + mtlr r10 + + andi. r10,r12,MSR_RI /* check for unrecoverable exception */ + beq- unrecov_slb + +.machine push +.machine "power4" + mtcrf 0x80,r9 + mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ +.machine pop + +#ifdef CONFIG_PPC_ISERIES + mtspr SRR0,r11 + mtspr SRR1,r12 +#endif /* CONFIG_PPC_ISERIES */ + ld r9,PACA_EXSLB+EX_R9(r13) + ld r10,PACA_EXSLB+EX_R10(r13) + ld r11,PACA_EXSLB+EX_R11(r13) + ld r12,PACA_EXSLB+EX_R12(r13) + ld r13,PACA_EXSLB+EX_R13(r13) + rfid + b . /* prevent speculative execution */ + +unrecov_slb: + EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) + DISABLE_INTS + bl .save_nvgprs +1: addi r3,r1,STACK_FRAME_OVERHEAD + bl .unrecoverable_exception + b 1b + +/* + * Space for CPU0's segment table. + * + * On iSeries, the hypervisor must fill in at least one entry before + * we get control (with relocate on). The address is give to the hv + * as a page number (see xLparMap in LparData.c), so this must be at a + * fixed address (the linker can't compute (u64)&initial_stab >> + * PAGE_SHIFT). + */ + . = STAB0_PHYS_ADDR /* 0x6000 */ + .globl initial_stab +initial_stab: + .space 4096 + +/* + * Data area reserved for FWNMI option. + * This address (0x7000) is fixed by the RPA. + */ + .= 0x7000 + .globl fwnmi_data_area +fwnmi_data_area: + + /* iSeries does not use the FWNMI stuff, so it is safe to put + * this here, even if we later allow kernels that will boot on + * both pSeries and iSeries */ +#ifdef CONFIG_PPC_ISERIES + . = LPARMAP_PHYS +#include "lparmap.s" +/* + * This ".text" is here for old compilers that generate a trailing + * .note section when compiling .c files to .s + */ + .text +#endif /* CONFIG_PPC_ISERIES */ + + . = 0x8000 + +/* + * On pSeries, secondary processors spin in the following code. + * At entry, r3 = this processor's number (physical cpu id) + */ +_GLOBAL(pSeries_secondary_smp_init) + mr r24,r3 + + /* turn on 64-bit mode */ + bl .enable_64b_mode + isync + + /* Copy some CPU settings from CPU 0 */ + bl .__restore_cpu_setup + + /* Set up a paca value for this processor. Since we have the + * physical cpu id in r24, we need to search the pacas to find + * which logical id maps to our physical one. + */ + LOADADDR(r13, paca) /* Get base vaddr of paca array */ + li r5,0 /* logical cpu id */ +1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ + cmpw r6,r24 /* Compare to our id */ + beq 2f + addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ + addi r5,r5,1 + cmpwi r5,NR_CPUS + blt 1b + + mr r3,r24 /* not found, copy phys to r3 */ + b .kexec_wait /* next kernel might do better */ + +2: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ + /* From now on, r24 is expected to be logical cpuid */ + mr r24,r5 +3: HMT_LOW + lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ + /* start. */ + sync + + /* Create a temp kernel stack for use before relocation is on. */ + ld r1,PACAEMERGSP(r13) + subi r1,r1,STACK_FRAME_OVERHEAD + + cmpwi 0,r23,0 +#ifdef CONFIG_SMP + bne .__secondary_start +#endif + b 3b /* Loop until told to go */ + +#ifdef CONFIG_PPC_ISERIES +_STATIC(__start_initialization_iSeries) + /* Clear out the BSS */ + LOADADDR(r11,__bss_stop) + LOADADDR(r8,__bss_start) + sub r11,r11,r8 /* bss size */ + addi r11,r11,7 /* round up to an even double word */ + rldicl. r11,r11,61,3 /* shift right by 3 */ + beq 4f + addi r8,r8,-8 + li r0,0 + mtctr r11 /* zero this many doublewords */ +3: stdu r0,8(r8) + bdnz 3b +4: + LOADADDR(r1,init_thread_union) + addi r1,r1,THREAD_SIZE + li r0,0 + stdu r0,-STACK_FRAME_OVERHEAD(r1) + + LOADADDR(r3,cpu_specs) + LOADADDR(r4,cur_cpu_spec) + li r5,0 + bl .identify_cpu + + LOADADDR(r2,__toc_start) + addi r2,r2,0x4000 + addi r2,r2,0x4000 + + bl .iSeries_early_setup + + /* relocation is on at this point */ + + b .start_here_common +#endif /* CONFIG_PPC_ISERIES */ + +#ifdef CONFIG_PPC_MULTIPLATFORM + +_STATIC(__mmu_off) + mfmsr r3 + andi. r0,r3,MSR_IR|MSR_DR + beqlr + andc r3,r3,r0 + mtspr SPRN_SRR0,r4 + mtspr SPRN_SRR1,r3 + sync + rfid + b . /* prevent speculative execution */ + + +/* + * Here is our main kernel entry point. We support currently 2 kind of entries + * depending on the value of r5. + * + * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content + * in r3...r7 + * + * r5 == NULL -> kexec style entry. r3 is a physical pointer to the + * DT block, r4 is a physical pointer to the kernel itself + * + */ +_GLOBAL(__start_initialization_multiplatform) + /* + * Are we booted from a PROM Of-type client-interface ? + */ + cmpldi cr0,r5,0 + bne .__boot_from_prom /* yes -> prom */ + + /* Save parameters */ + mr r31,r3 + mr r30,r4 + + /* Make sure we are running in 64 bits mode */ + bl .enable_64b_mode + + /* Setup some critical 970 SPRs before switching MMU off */ + bl .__970_cpu_preinit + + /* cpu # */ + li r24,0 + + /* Switch off MMU if not already */ + LOADADDR(r4, .__after_prom_start - KERNELBASE) + add r4,r4,r30 + bl .__mmu_off + b .__after_prom_start + +_STATIC(__boot_from_prom) + /* Save parameters */ + mr r31,r3 + mr r30,r4 + mr r29,r5 + mr r28,r6 + mr r27,r7 + + /* Make sure we are running in 64 bits mode */ + bl .enable_64b_mode + + /* put a relocation offset into r3 */ + bl .reloc_offset + + LOADADDR(r2,__toc_start) + addi r2,r2,0x4000 + addi r2,r2,0x4000 + + /* Relocate the TOC from a virt addr to a real addr */ + sub r2,r2,r3 + + /* Restore parameters */ + mr r3,r31 + mr r4,r30 + mr r5,r29 + mr r6,r28 + mr r7,r27 + + /* Do all of the interaction with OF client interface */ + bl .prom_init + /* We never return */ + trap + +/* + * At this point, r3 contains the physical address we are running at, + * returned by prom_init() + */ +_STATIC(__after_prom_start) + +/* + * We need to run with __start at physical address 0. + * This will leave some code in the first 256B of + * real memory, which are reserved for software use. + * The remainder of the first page is loaded with the fixed + * interrupt vectors. The next two pages are filled with + * unknown exception placeholders. + * + * Note: This process overwrites the OF exception vectors. + * r26 == relocation offset + * r27 == KERNELBASE + */ + bl .reloc_offset + mr r26,r3 + SET_REG_TO_CONST(r27,KERNELBASE) + + li r3,0 /* target addr */ + + // XXX FIXME: Use phys returned by OF (r30) + sub r4,r27,r26 /* source addr */ + /* current address of _start */ + /* i.e. where we are running */ + /* the source addr */ + + LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */ + sub r5,r5,r27 + + li r6,0x100 /* Start offset, the first 0x100 */ + /* bytes were copied earlier. */ + + bl .copy_and_flush /* copy the first n bytes */ + /* this includes the code being */ + /* executed here. */ + + LOADADDR(r0, 4f) /* Jump to the copy of this code */ + mtctr r0 /* that we just made/relocated */ + bctr + +4: LOADADDR(r5,klimit) + sub r5,r5,r26 + ld r5,0(r5) /* get the value of klimit */ + sub r5,r5,r27 + bl .copy_and_flush /* copy the rest */ + b .start_here_multiplatform + +#endif /* CONFIG_PPC_MULTIPLATFORM */ + +/* + * Copy routine used to copy the kernel to start at physical address 0 + * and flush and invalidate the caches as needed. + * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset + * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. + * + * Note: this routine *only* clobbers r0, r6 and lr + */ +_GLOBAL(copy_and_flush) + addi r5,r5,-8 + addi r6,r6,-8 +4: li r0,16 /* Use the least common */ + /* denominator cache line */ + /* size. This results in */ + /* extra cache line flushes */ + /* but operation is correct. */ + /* Can't get cache line size */ + /* from NACA as it is being */ + /* moved too. */ + + mtctr r0 /* put # words/line in ctr */ +3: addi r6,r6,8 /* copy a cache line */ + ldx r0,r6,r4 + stdx r0,r6,r3 + bdnz 3b + dcbst r6,r3 /* write it to memory */ + sync + icbi r6,r3 /* flush the icache line */ + cmpld 0,r6,r5 + blt 4b + sync + addi r5,r5,8 + addi r6,r6,8 + blr + +.align 8 +copy_to_here: + +#ifdef CONFIG_SMP +#ifdef CONFIG_PPC_PMAC +/* + * On PowerMac, secondary processors starts from the reset vector, which + * is temporarily turned into a call to one of the functions below. + */ + .section ".text"; + .align 2 ; + + .globl pmac_secondary_start_1 +pmac_secondary_start_1: + li r24, 1 + b .pmac_secondary_start + + .globl pmac_secondary_start_2 +pmac_secondary_start_2: + li r24, 2 + b .pmac_secondary_start + + .globl pmac_secondary_start_3 +pmac_secondary_start_3: + li r24, 3 + b .pmac_secondary_start + +_GLOBAL(pmac_secondary_start) + /* turn on 64-bit mode */ + bl .enable_64b_mode + isync + + /* Copy some CPU settings from CPU 0 */ + bl .__restore_cpu_setup + + /* pSeries do that early though I don't think we really need it */ + mfmsr r3 + ori r3,r3,MSR_RI + mtmsrd r3 /* RI on */ + + /* Set up a paca value for this processor. */ + LOADADDR(r4, paca) /* Get base vaddr of paca array */ + mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ + add r13,r13,r4 /* for this processor. */ + mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ + + /* Create a temp kernel stack for use before relocation is on. */ + ld r1,PACAEMERGSP(r13) + subi r1,r1,STACK_FRAME_OVERHEAD + + b .__secondary_start + +#endif /* CONFIG_PPC_PMAC */ + +/* + * This function is called after the master CPU has released the + * secondary processors. The execution environment is relocation off. + * The paca for this processor has the following fields initialized at + * this point: + * 1. Processor number + * 2. Segment table pointer (virtual address) + * On entry the following are set: + * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries + * r24 = cpu# (in Linux terms) + * r13 = paca virtual address + * SPRG3 = paca virtual address + */ +_GLOBAL(__secondary_start) + + HMT_MEDIUM /* Set thread priority to MEDIUM */ + + ld r2,PACATOC(r13) + li r6,0 + stb r6,PACAPROCENABLED(r13) + +#ifndef CONFIG_PPC_ISERIES + /* Initialize the page table pointer register. */ + LOADADDR(r6,_SDR1) + ld r6,0(r6) /* get the value of _SDR1 */ + mtspr SDR1,r6 /* set the htab location */ +#endif + /* Initialize the first segment table (or SLB) entry */ + ld r3,PACASTABVIRT(r13) /* get addr of segment table */ + bl .stab_initialize + + /* Initialize the kernel stack. Just a repeat for iSeries. */ + LOADADDR(r3,current_set) + sldi r28,r24,3 /* get current_set[cpu#] */ + ldx r1,r3,r28 + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + std r1,PACAKSAVE(r13) + + ld r3,PACASTABREAL(r13) /* get raddr of segment table */ + ori r4,r3,1 /* turn on valid bit */ + +#ifdef CONFIG_PPC_ISERIES + li r0,-1 /* hypervisor call */ + li r3,1 + sldi r3,r3,63 /* 0x8000000000000000 */ + ori r3,r3,4 /* 0x8000000000000004 */ + sc /* HvCall_setASR */ +#else + /* set the ASR */ + ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ + ld r3,0(r3) + lwz r3,PLATFORM(r3) /* r3 = platform flags */ + andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ + beq 98f /* branch if result is 0 */ + mfspr r3,PVR + srwi r3,r3,16 + cmpwi r3,0x37 /* SStar */ + beq 97f + cmpwi r3,0x36 /* IStar */ + beq 97f + cmpwi r3,0x34 /* Pulsar */ + bne 98f +97: li r3,H_SET_ASR /* hcall = H_SET_ASR */ + HVSC /* Invoking hcall */ + b 99f +98: /* !(rpa hypervisor) || !(star) */ + mtasr r4 /* set the stab location */ +99: +#endif + li r7,0 + mtlr r7 + + /* enable MMU and jump to start_secondary */ + LOADADDR(r3,.start_secondary_prolog) + SET_REG_TO_CONST(r4, MSR_KERNEL) +#ifdef DO_SOFT_DISABLE + ori r4,r4,MSR_EE +#endif + mtspr SRR0,r3 + mtspr SRR1,r4 + rfid + b . /* prevent speculative execution */ + +/* + * Running with relocation on at this point. All we want to do is + * zero the stack back-chain pointer before going into C code. + */ +_GLOBAL(start_secondary_prolog) + li r3,0 + std r3,0(r1) /* Zero the stack frame pointer */ + bl .start_secondary +#endif + +/* + * This subroutine clobbers r11 and r12 + */ +_GLOBAL(enable_64b_mode) + mfmsr r11 /* grab the current MSR */ + li r12,1 + rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) + or r11,r11,r12 + li r12,1 + rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) + or r11,r11,r12 + mtmsrd r11 + isync + blr + +#ifdef CONFIG_PPC_MULTIPLATFORM +/* + * This is where the main kernel code starts. + */ +_STATIC(start_here_multiplatform) + /* get a new offset, now that the kernel has moved. */ + bl .reloc_offset + mr r26,r3 + + /* Clear out the BSS. It may have been done in prom_init, + * already but that's irrelevant since prom_init will soon + * be detached from the kernel completely. Besides, we need + * to clear it now for kexec-style entry. + */ + LOADADDR(r11,__bss_stop) + LOADADDR(r8,__bss_start) + sub r11,r11,r8 /* bss size */ + addi r11,r11,7 /* round up to an even double word */ + rldicl. r11,r11,61,3 /* shift right by 3 */ + beq 4f + addi r8,r8,-8 + li r0,0 + mtctr r11 /* zero this many doublewords */ +3: stdu r0,8(r8) + bdnz 3b +4: + + mfmsr r6 + ori r6,r6,MSR_RI + mtmsrd r6 /* RI on */ + +#ifdef CONFIG_HMT + /* Start up the second thread on cpu 0 */ + mfspr r3,PVR + srwi r3,r3,16 + cmpwi r3,0x34 /* Pulsar */ + beq 90f + cmpwi r3,0x36 /* Icestar */ + beq 90f + cmpwi r3,0x37 /* SStar */ + beq 90f + b 91f /* HMT not supported */ +90: li r3,0 + bl .hmt_start_secondary +91: +#endif + + /* The following gets the stack and TOC set up with the regs */ + /* pointing to the real addr of the kernel stack. This is */ + /* all done to support the C function call below which sets */ + /* up the htab. This is done because we have relocated the */ + /* kernel but are still running in real mode. */ + + LOADADDR(r3,init_thread_union) + sub r3,r3,r26 + + /* set up a stack pointer (physical address) */ + addi r1,r3,THREAD_SIZE + li r0,0 + stdu r0,-STACK_FRAME_OVERHEAD(r1) + + /* set up the TOC (physical address) */ + LOADADDR(r2,__toc_start) + addi r2,r2,0x4000 + addi r2,r2,0x4000 + sub r2,r2,r26 + + LOADADDR(r3,cpu_specs) + sub r3,r3,r26 + LOADADDR(r4,cur_cpu_spec) + sub r4,r4,r26 + mr r5,r26 + bl .identify_cpu + + /* Save some low level config HIDs of CPU0 to be copied to + * other CPUs later on, or used for suspend/resume + */ + bl .__save_cpu_setup + sync + + /* Setup a valid physical PACA pointer in SPRG3 for early_setup + * note that boot_cpuid can always be 0 nowadays since there is + * nowhere it can be initialized differently before we reach this + * code + */ + LOADADDR(r27, boot_cpuid) + sub r27,r27,r26 + lwz r27,0(r27) + + LOADADDR(r24, paca) /* Get base vaddr of paca array */ + mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ + add r13,r13,r24 /* for this processor. */ + sub r13,r13,r26 /* convert to physical addr */ + mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */ + + /* Do very early kernel initializations, including initial hash table, + * stab and slb setup before we turn on relocation. */ + + /* Restore parameters passed from prom_init/kexec */ + mr r3,r31 + bl .early_setup + + /* set the ASR */ + ld r3,PACASTABREAL(r13) + ori r4,r3,1 /* turn on valid bit */ + ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ + ld r3,0(r3) + lwz r3,PLATFORM(r3) /* r3 = platform flags */ + andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ + beq 98f /* branch if result is 0 */ + mfspr r3,PVR + srwi r3,r3,16 + cmpwi r3,0x37 /* SStar */ + beq 97f + cmpwi r3,0x36 /* IStar */ + beq 97f + cmpwi r3,0x34 /* Pulsar */ + bne 98f +97: li r3,H_SET_ASR /* hcall = H_SET_ASR */ + HVSC /* Invoking hcall */ + b 99f +98: /* !(rpa hypervisor) || !(star) */ + mtasr r4 /* set the stab location */ +99: + /* Set SDR1 (hash table pointer) */ + ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ + ld r3,0(r3) + lwz r3,PLATFORM(r3) /* r3 = platform flags */ + /* Test if bit 0 is set (LPAR bit) */ + andi. r3,r3,PLATFORM_LPAR + bne 98f /* branch if result is !0 */ + LOADADDR(r6,_SDR1) /* Only if NOT LPAR */ + sub r6,r6,r26 + ld r6,0(r6) /* get the value of _SDR1 */ + mtspr SDR1,r6 /* set the htab location */ +98: + LOADADDR(r3,.start_here_common) + SET_REG_TO_CONST(r4, MSR_KERNEL) + mtspr SRR0,r3 + mtspr SRR1,r4 + rfid + b . /* prevent speculative execution */ +#endif /* CONFIG_PPC_MULTIPLATFORM */ + + /* This is where all platforms converge execution */ +_STATIC(start_here_common) + /* relocation is on at this point */ + + /* The following code sets up the SP and TOC now that we are */ + /* running with translation enabled. */ + + LOADADDR(r3,init_thread_union) + + /* set up the stack */ + addi r1,r3,THREAD_SIZE + li r0,0 + stdu r0,-STACK_FRAME_OVERHEAD(r1) + + /* Apply the CPUs-specific fixups (nop out sections not relevant + * to this CPU + */ + li r3,0 + bl .do_cpu_ftr_fixups + + LOADADDR(r26, boot_cpuid) + lwz r26,0(r26) + + LOADADDR(r24, paca) /* Get base vaddr of paca array */ + mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */ + add r13,r13,r24 /* for this processor. */ + mtspr SPRG3,r13 + + /* ptr to current */ + LOADADDR(r4,init_task) + std r4,PACACURRENT(r13) + + /* Load the TOC */ + ld r2,PACATOC(r13) + std r1,PACAKSAVE(r13) + + bl .setup_system + + /* Load up the kernel context */ +5: +#ifdef DO_SOFT_DISABLE + li r5,0 + stb r5,PACAPROCENABLED(r13) /* Soft Disabled */ + mfmsr r5 + ori r5,r5,MSR_EE /* Hard Enabled */ + mtmsrd r5 +#endif + + bl .start_kernel + +_GLOBAL(hmt_init) +#ifdef CONFIG_HMT + LOADADDR(r5, hmt_thread_data) + mfspr r7,PVR + srwi r7,r7,16 + cmpwi r7,0x34 /* Pulsar */ + beq 90f + cmpwi r7,0x36 /* Icestar */ + beq 91f + cmpwi r7,0x37 /* SStar */ + beq 91f + b 101f +90: mfspr r6,PIR + andi. r6,r6,0x1f + b 92f +91: mfspr r6,PIR + andi. r6,r6,0x3ff +92: sldi r4,r24,3 + stwx r6,r5,r4 + bl .hmt_start_secondary + b 101f + +__hmt_secondary_hold: + LOADADDR(r5, hmt_thread_data) + clrldi r5,r5,4 + li r7,0 + mfspr r6,PIR + mfspr r8,PVR + srwi r8,r8,16 + cmpwi r8,0x34 + bne 93f + andi. r6,r6,0x1f + b 103f +93: andi. r6,r6,0x3f + +103: lwzx r8,r5,r7 + cmpw r8,r6 + beq 104f + addi r7,r7,8 + b 103b + +104: addi r7,r7,4 + lwzx r9,r5,r7 + mr r24,r9 +101: +#endif + mr r3,r24 + b .pSeries_secondary_smp_init + +#ifdef CONFIG_HMT +_GLOBAL(hmt_start_secondary) + LOADADDR(r4,__hmt_secondary_hold) + clrldi r4,r4,4 + mtspr NIADORM, r4 + mfspr r4, MSRDORM + li r5, -65 + and r4, r4, r5 + mtspr MSRDORM, r4 + lis r4,0xffef + ori r4,r4,0x7403 + mtspr TSC, r4 + li r4,0x1f4 + mtspr TST, r4 + mfspr r4, HID0 + ori r4, r4, 0x1 + mtspr HID0, r4 + mfspr r4, SPRN_CTRLF + oris r4, r4, 0x40 + mtspr SPRN_CTRLT, r4 + blr +#endif + +#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)) +_GLOBAL(smp_release_cpus) + /* All secondary cpus are spinning on a common + * spinloop, release them all now so they can start + * to spin on their individual paca spinloops. + * For non SMP kernels, the secondary cpus never + * get out of the common spinloop. + */ + li r3,1 + LOADADDR(r5,__secondary_hold_spinloop) + std r3,0(r5) + sync + blr +#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */ + + +/* + * We put a few things here that have to be page-aligned. + * This stuff goes at the beginning of the bss, which is page-aligned. + */ + .section ".bss" + + .align PAGE_SHIFT + + .globl empty_zero_page +empty_zero_page: + .space PAGE_SIZE + + .globl swapper_pg_dir +swapper_pg_dir: + .space PAGE_SIZE + +/* + * This space gets a copy of optional info passed to us by the bootstrap + * Used to pass parameters into the kernel like root=/dev/sda1, etc. + */ + .globl cmd_line +cmd_line: + .space COMMAND_LINE_SIZE diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S new file mode 100644 index 000000000000..cb1a3a54a026 --- /dev/null +++ b/arch/powerpc/kernel/head_8xx.S @@ -0,0 +1,860 @@ +/* + * arch/ppc/kernel/except_8xx.S + * + * PowerPC version + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP + * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> + * Low-level exception handlers and MMU support + * rewritten by Paul Mackerras. + * Copyright (C) 1996 Paul Mackerras. + * MPC8xx modifications by Dan Malek + * Copyright (C) 1997 Dan Malek (dmalek@jlc.net). + * + * This file contains low-level support and setup for PowerPC 8xx + * embedded processors, including trap and interrupt dispatch. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/config.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/cache.h> +#include <asm/pgtable.h> +#include <asm/cputable.h> +#include <asm/thread_info.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> + +/* Macro to make the code more readable. */ +#ifdef CONFIG_8xx_CPU6 +#define DO_8xx_CPU6(val, reg) \ + li reg, val; \ + stw reg, 12(r0); \ + lwz reg, 12(r0); +#else +#define DO_8xx_CPU6(val, reg) +#endif + .text + .globl _stext +_stext: + .text + .globl _start +_start: + +/* MPC8xx + * This port was done on an MBX board with an 860. Right now I only + * support an ELF compressed (zImage) boot from EPPC-Bug because the + * code there loads up some registers before calling us: + * r3: ptr to board info data + * r4: initrd_start or if no initrd then 0 + * r5: initrd_end - unused if r4 is 0 + * r6: Start of command line string + * r7: End of command line string + * + * I decided to use conditional compilation instead of checking PVR and + * adding more processor specific branches around code I don't need. + * Since this is an embedded processor, I also appreciate any memory + * savings I can get. + * + * The MPC8xx does not have any BATs, but it supports large page sizes. + * We first initialize the MMU to support 8M byte pages, then load one + * entry into each of the instruction and data TLBs to map the first + * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to + * the "internal" processor registers before MMU_init is called. + * + * The TLB code currently contains a major hack. Since I use the condition + * code register, I have to save and restore it. I am out of registers, so + * I just store it in memory location 0 (the TLB handlers are not reentrant). + * To avoid making any decisions, I need to use the "segment" valid bit + * in the first level table, but that would require many changes to the + * Linux page directory/table functions that I don't want to do right now. + * + * I used to use SPRG2 for a temporary register in the TLB handler, but it + * has since been put to other uses. I now use a hack to save a register + * and the CCR at memory location 0.....Someday I'll fix this..... + * -- Dan + */ + .globl __start +__start: + mr r31,r3 /* save parameters */ + mr r30,r4 + mr r29,r5 + mr r28,r6 + mr r27,r7 + + /* We have to turn on the MMU right away so we get cache modes + * set correctly. + */ + bl initial_mmu + +/* We now have the lower 8 Meg mapped into TLB entries, and the caches + * ready to work. + */ + +turn_on_mmu: + mfmsr r0 + ori r0,r0,MSR_DR|MSR_IR + mtspr SPRN_SRR1,r0 + lis r0,start_here@h + ori r0,r0,start_here@l + mtspr SPRN_SRR0,r0 + SYNC + rfi /* enables MMU */ + +/* + * Exception entry code. This code runs with address translation + * turned off, i.e. using physical addresses. + * We assume sprg3 has the physical address of the current + * task's thread_struct. + */ +#define EXCEPTION_PROLOG \ + mtspr SPRN_SPRG0,r10; \ + mtspr SPRN_SPRG1,r11; \ + mfcr r10; \ + EXCEPTION_PROLOG_1; \ + EXCEPTION_PROLOG_2 + +#define EXCEPTION_PROLOG_1 \ + mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ + andi. r11,r11,MSR_PR; \ + tophys(r11,r1); /* use tophys(r1) if kernel */ \ + beq 1f; \ + mfspr r11,SPRN_SPRG3; \ + lwz r11,THREAD_INFO-THREAD(r11); \ + addi r11,r11,THREAD_SIZE; \ + tophys(r11,r11); \ +1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ + + +#define EXCEPTION_PROLOG_2 \ + CLR_TOP32(r11); \ + stw r10,_CCR(r11); /* save registers */ \ + stw r12,GPR12(r11); \ + stw r9,GPR9(r11); \ + mfspr r10,SPRN_SPRG0; \ + stw r10,GPR10(r11); \ + mfspr r12,SPRN_SPRG1; \ + stw r12,GPR11(r11); \ + mflr r10; \ + stw r10,_LINK(r11); \ + mfspr r12,SPRN_SRR0; \ + mfspr r9,SPRN_SRR1; \ + stw r1,GPR1(r11); \ + stw r1,0(r11); \ + tovirt(r1,r11); /* set new kernel sp */ \ + li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ + MTMSRD(r10); /* (except for mach check in rtas) */ \ + stw r0,GPR0(r11); \ + SAVE_4GPRS(3, r11); \ + SAVE_2GPRS(7, r11) + +/* + * Note: code which follows this uses cr0.eq (set if from kernel), + * r11, r12 (SRR0), and r9 (SRR1). + * + * Note2: once we have set r1 we are in a position to take exceptions + * again, and we could thus set MSR:RI at that point. + */ + +/* + * Exception vectors. + */ +#define EXCEPTION(n, label, hdlr, xfer) \ + . = n; \ +label: \ + EXCEPTION_PROLOG; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + xfer(n, hdlr) + +#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ + li r10,trap; \ + stw r10,TRAP(r11); \ + li r10,MSR_KERNEL; \ + copyee(r10, r9); \ + bl tfer; \ +i##n: \ + .long hdlr; \ + .long ret + +#define COPY_EE(d, s) rlwimi d,s,0,16,16 +#define NOCOPY(d, s) + +#define EXC_XFER_STD(n, hdlr) \ + EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ + ret_from_except) + +#define EXC_XFER_EE(n, hdlr) \ + EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_EE_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ + ret_from_except) + +/* System reset */ + EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) + +/* Machine check */ + . = 0x200 +MachineCheck: + EXCEPTION_PROLOG + mfspr r4,SPRN_DAR + stw r4,_DAR(r11) + mfspr r5,SPRN_DSISR + stw r5,_DSISR(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_STD(0x200, MachineCheckException) + +/* Data access exception. + * This is "never generated" by the MPC8xx. We jump to it for other + * translation errors. + */ + . = 0x300 +DataAccess: + EXCEPTION_PROLOG + mfspr r10,SPRN_DSISR + stw r10,_DSISR(r11) + mr r5,r10 + mfspr r4,SPRN_DAR + EXC_XFER_EE_LITE(0x300, handle_page_fault) + +/* Instruction access exception. + * This is "never generated" by the MPC8xx. We jump to it for other + * translation errors. + */ + . = 0x400 +InstructionAccess: + EXCEPTION_PROLOG + mr r4,r12 + mr r5,r9 + EXC_XFER_EE_LITE(0x400, handle_page_fault) + +/* External interrupt */ + EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) + +/* Alignment exception */ + . = 0x600 +Alignment: + EXCEPTION_PROLOG + mfspr r4,SPRN_DAR + stw r4,_DAR(r11) + mfspr r5,SPRN_DSISR + stw r5,_DSISR(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE(0x600, AlignmentException) + +/* Program check exception */ + EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) + +/* No FPU on MPC8xx. This exception is not supposed to happen. +*/ + EXCEPTION(0x800, FPUnavailable, UnknownException, EXC_XFER_STD) + +/* Decrementer */ + EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) + + EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) + EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) + +/* System call */ + . = 0xc00 +SystemCall: + EXCEPTION_PROLOG + EXC_XFER_EE_LITE(0xc00, DoSyscall) + +/* Single step - not used on 601 */ + EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) + EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) + EXCEPTION(0xf00, Trap_0f, UnknownException, EXC_XFER_EE) + +/* On the MPC8xx, this is a software emulation interrupt. It occurs + * for all unimplemented and illegal instructions. + */ + EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD) + + . = 0x1100 +/* + * For the MPC8xx, this is a software tablewalk to load the instruction + * TLB. It is modelled after the example in the Motorola manual. The task + * switch loads the M_TWB register with the pointer to the first level table. + * If we discover there is no second level table (value is zero) or if there + * is an invalid pte, we load that into the TLB, which causes another fault + * into the TLB Error interrupt where we can handle such problems. + * We have to use the MD_xxx registers for the tablewalk because the + * equivalent MI_xxx registers only perform the attribute functions. + */ +InstructionTLBMiss: +#ifdef CONFIG_8xx_CPU6 + stw r3, 8(r0) +#endif + DO_8xx_CPU6(0x3f80, r3) + mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ + mfcr r10 + stw r10, 0(r0) + stw r11, 4(r0) + mfspr r10, SPRN_SRR0 /* Get effective address of fault */ + DO_8xx_CPU6(0x3780, r3) + mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */ + mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + andi. r11, r10, 0x0800 /* Address >= 0x80000000 */ + beq 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + rlwimi r10, r11, 0, 2, 19 +3: + lwz r11, 0(r10) /* Get the level 1 entry */ + rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ + beq 2f /* If zero, don't try to find a pte */ + + /* We have a pte table, so load the MI_TWC with the attributes + * for this "segment." + */ + ori r11,r11,1 /* Set valid bit */ + DO_8xx_CPU6(0x2b80, r3) + mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ + DO_8xx_CPU6(0x3b80, r3) + mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ + mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ + lwz r10, 0(r11) /* Get the pte */ + + ori r10, r10, _PAGE_ACCESSED + stw r10, 0(r11) + + /* The Linux PTE won't go exactly into the MMU TLB. + * Software indicator bits 21, 22 and 28 must be clear. + * Software indicator bits 24, 25, 26, and 27 must be + * set. All other Linux PTE bits control the behavior + * of the MMU. + */ +2: li r11, 0x00f0 + rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ + DO_8xx_CPU6(0x2d80, r3) + mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ + + mfspr r10, SPRN_M_TW /* Restore registers */ + lwz r11, 0(r0) + mtcr r11 + lwz r11, 4(r0) +#ifdef CONFIG_8xx_CPU6 + lwz r3, 8(r0) +#endif + rfi + + . = 0x1200 +DataStoreTLBMiss: +#ifdef CONFIG_8xx_CPU6 + stw r3, 8(r0) +#endif + DO_8xx_CPU6(0x3f80, r3) + mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ + mfcr r10 + stw r10, 0(r0) + stw r11, 4(r0) + mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + andi. r11, r10, 0x0800 + beq 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + rlwimi r10, r11, 0, 2, 19 +3: + lwz r11, 0(r10) /* Get the level 1 entry */ + rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ + beq 2f /* If zero, don't try to find a pte */ + + /* We have a pte table, so load fetch the pte from the table. + */ + ori r11, r11, 1 /* Set valid bit in physical L2 page */ + DO_8xx_CPU6(0x3b80, r3) + mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ + mfspr r10, SPRN_MD_TWC /* ....and get the pte address */ + lwz r10, 0(r10) /* Get the pte */ + + /* Insert the Guarded flag into the TWC from the Linux PTE. + * It is bit 27 of both the Linux PTE and the TWC (at least + * I got that right :-). It will be better when we can put + * this into the Linux pgd/pmd and load it in the operation + * above. + */ + rlwimi r11, r10, 0, 27, 27 + DO_8xx_CPU6(0x3b80, r3) + mtspr SPRN_MD_TWC, r11 + + mfspr r11, SPRN_MD_TWC /* get the pte address again */ + ori r10, r10, _PAGE_ACCESSED + stw r10, 0(r11) + + /* The Linux PTE won't go exactly into the MMU TLB. + * Software indicator bits 21, 22 and 28 must be clear. + * Software indicator bits 24, 25, 26, and 27 must be + * set. All other Linux PTE bits control the behavior + * of the MMU. + */ +2: li r11, 0x00f0 + rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ + DO_8xx_CPU6(0x3d80, r3) + mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ + + mfspr r10, SPRN_M_TW /* Restore registers */ + lwz r11, 0(r0) + mtcr r11 + lwz r11, 4(r0) +#ifdef CONFIG_8xx_CPU6 + lwz r3, 8(r0) +#endif + rfi + +/* This is an instruction TLB error on the MPC8xx. This could be due + * to many reasons, such as executing guarded memory or illegal instruction + * addresses. There is nothing to do but handle a big time error fault. + */ + . = 0x1300 +InstructionTLBError: + b InstructionAccess + +/* This is the data TLB error on the MPC8xx. This could be due to + * many reasons, including a dirty update to a pte. We can catch that + * one here, but anything else is an error. First, we track down the + * Linux pte. If it is valid, write access is allowed, but the + * page dirty bit is not set, we will set it and reload the TLB. For + * any other case, we bail out to a higher level function that can + * handle it. + */ + . = 0x1400 +DataTLBError: +#ifdef CONFIG_8xx_CPU6 + stw r3, 8(r0) +#endif + DO_8xx_CPU6(0x3f80, r3) + mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ + mfcr r10 + stw r10, 0(r0) + stw r11, 4(r0) + + /* First, make sure this was a store operation. + */ + mfspr r10, SPRN_DSISR + andis. r11, r10, 0x0200 /* If set, indicates store op */ + beq 2f + + /* The EA of a data TLB miss is automatically stored in the MD_EPN + * register. The EA of a data TLB error is automatically stored in + * the DAR, but not the MD_EPN register. We must copy the 20 most + * significant bits of the EA from the DAR to MD_EPN before we + * start walking the page tables. We also need to copy the CASID + * value from the M_CASID register. + * Addendum: The EA of a data TLB error is _supposed_ to be stored + * in DAR, but it seems that this doesn't happen in some cases, such + * as when the error is due to a dcbi instruction to a page with a + * TLB that doesn't have the changed bit set. In such cases, there + * does not appear to be any way to recover the EA of the error + * since it is neither in DAR nor MD_EPN. As a workaround, the + * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs + * are initialized in mapin_ram(). This will avoid the problem, + * assuming we only use the dcbi instruction on kernel addresses. + */ + mfspr r10, SPRN_DAR + rlwinm r11, r10, 0, 0, 19 + ori r11, r11, MD_EVALID + mfspr r10, SPRN_M_CASID + rlwimi r11, r10, 0, 28, 31 + DO_8xx_CPU6(0x3780, r3) + mtspr SPRN_MD_EPN, r11 + + mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + andi. r11, r10, 0x0800 + beq 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + rlwimi r10, r11, 0, 2, 19 +3: + lwz r11, 0(r10) /* Get the level 1 entry */ + rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ + beq 2f /* If zero, bail */ + + /* We have a pte table, so fetch the pte from the table. + */ + ori r11, r11, 1 /* Set valid bit in physical L2 page */ + DO_8xx_CPU6(0x3b80, r3) + mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ + mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ + lwz r10, 0(r11) /* Get the pte */ + + andi. r11, r10, _PAGE_RW /* Is it writeable? */ + beq 2f /* Bail out if not */ + + /* Update 'changed', among others. + */ + ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE + mfspr r11, SPRN_MD_TWC /* Get pte address again */ + stw r10, 0(r11) /* and update pte in table */ + + /* The Linux PTE won't go exactly into the MMU TLB. + * Software indicator bits 21, 22 and 28 must be clear. + * Software indicator bits 24, 25, 26, and 27 must be + * set. All other Linux PTE bits control the behavior + * of the MMU. + */ + li r11, 0x00f0 + rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ + DO_8xx_CPU6(0x3d80, r3) + mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ + + mfspr r10, SPRN_M_TW /* Restore registers */ + lwz r11, 0(r0) + mtcr r11 + lwz r11, 4(r0) +#ifdef CONFIG_8xx_CPU6 + lwz r3, 8(r0) +#endif + rfi +2: + mfspr r10, SPRN_M_TW /* Restore registers */ + lwz r11, 0(r0) + mtcr r11 + lwz r11, 4(r0) +#ifdef CONFIG_8xx_CPU6 + lwz r3, 8(r0) +#endif + b DataAccess + + EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) + +/* On the MPC8xx, these next four traps are used for development + * support of breakpoints and such. Someday I will get around to + * using them. + */ + EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) + + . = 0x2000 + + .globl giveup_fpu +giveup_fpu: + blr + +/* + * This is where the main kernel code starts. + */ +start_here: + /* ptr to current */ + lis r2,init_task@h + ori r2,r2,init_task@l + + /* ptr to phys current thread */ + tophys(r4,r2) + addi r4,r4,THREAD /* init task's THREAD */ + mtspr SPRN_SPRG3,r4 + li r3,0 + mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */ + + /* stack */ + lis r1,init_thread_union@ha + addi r1,r1,init_thread_union@l + li r0,0 + stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) + + bl early_init /* We have to do this with MMU on */ + +/* + * Decide what sort of machine this is and initialize the MMU. + */ + mr r3,r31 + mr r4,r30 + mr r5,r29 + mr r6,r28 + mr r7,r27 + bl machine_init + bl MMU_init + +/* + * Go back to running unmapped so we can load up new values + * and change to using our exception vectors. + * On the 8xx, all we have to do is invalidate the TLB to clear + * the old 8M byte TLB mappings and load the page table base register. + */ + /* The right way to do this would be to track it down through + * init's THREAD like the context switch code does, but this is + * easier......until someone changes init's static structures. + */ + lis r6, swapper_pg_dir@h + ori r6, r6, swapper_pg_dir@l + tophys(r6,r6) +#ifdef CONFIG_8xx_CPU6 + lis r4, cpu6_errata_word@h + ori r4, r4, cpu6_errata_word@l + li r3, 0x3980 + stw r3, 12(r4) + lwz r3, 12(r4) +#endif + mtspr SPRN_M_TWB, r6 + lis r4,2f@h + ori r4,r4,2f@l + tophys(r4,r4) + li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) + mtspr SPRN_SRR0,r4 + mtspr SPRN_SRR1,r3 + rfi +/* Load up the kernel context */ +2: + SYNC /* Force all PTE updates to finish */ + tlbia /* Clear all TLB entries */ + sync /* wait for tlbia/tlbie to finish */ + TLBSYNC /* ... on all CPUs */ + + /* set up the PTE pointers for the Abatron bdiGDB. + */ + tovirt(r6,r6) + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l + stw r5, 0xf0(r0) /* Must match your Abatron config file */ + tophys(r5,r5) + stw r6, 0(r5) + +/* Now turn on the MMU for real! */ + li r4,MSR_KERNEL + lis r3,start_kernel@h + ori r3,r3,start_kernel@l + mtspr SPRN_SRR0,r3 + mtspr SPRN_SRR1,r4 + rfi /* enable MMU and jump to start_kernel */ + +/* Set up the initial MMU state so we can do the first level of + * kernel initialization. This maps the first 8 MBytes of memory 1:1 + * virtual to physical. Also, set the cache mode since that is defined + * by TLB entries and perform any additional mapping (like of the IMMR). + * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel, + * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by + * these mappings is mapped by page tables. + */ +initial_mmu: + tlbia /* Invalidate all TLB entries */ +#ifdef CONFIG_PIN_TLB + lis r8, MI_RSV4I@h + ori r8, r8, 0x1c00 +#else + li r8, 0 +#endif + mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */ + +#ifdef CONFIG_PIN_TLB + lis r10, (MD_RSV4I | MD_RESETVAL)@h + ori r10, r10, 0x1c00 + mr r8, r10 +#else + lis r10, MD_RESETVAL@h +#endif +#ifndef CONFIG_8xx_COPYBACK + oris r10, r10, MD_WTDEF@h +#endif + mtspr SPRN_MD_CTR, r10 /* Set data TLB control */ + + /* Now map the lower 8 Meg into the TLBs. For this quick hack, + * we can load the instruction and data TLB registers with the + * same values. + */ + lis r8, KERNELBASE@h /* Create vaddr for TLB */ + ori r8, r8, MI_EVALID /* Mark it valid */ + mtspr SPRN_MI_EPN, r8 + mtspr SPRN_MD_EPN, r8 + li r8, MI_PS8MEG /* Set 8M byte page */ + ori r8, r8, MI_SVALID /* Make it valid */ + mtspr SPRN_MI_TWC, r8 + mtspr SPRN_MD_TWC, r8 + li r8, MI_BOOTINIT /* Create RPN for address 0 */ + mtspr SPRN_MI_RPN, r8 /* Store TLB entry */ + mtspr SPRN_MD_RPN, r8 + lis r8, MI_Kp@h /* Set the protection mode */ + mtspr SPRN_MI_AP, r8 + mtspr SPRN_MD_AP, r8 + + /* Map another 8 MByte at the IMMR to get the processor + * internal registers (among other things). + */ +#ifdef CONFIG_PIN_TLB + addi r10, r10, 0x0100 + mtspr SPRN_MD_CTR, r10 +#endif + mfspr r9, 638 /* Get current IMMR */ + andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */ + + mr r8, r9 /* Create vaddr for TLB */ + ori r8, r8, MD_EVALID /* Mark it valid */ + mtspr SPRN_MD_EPN, r8 + li r8, MD_PS8MEG /* Set 8M byte page */ + ori r8, r8, MD_SVALID /* Make it valid */ + mtspr SPRN_MD_TWC, r8 + mr r8, r9 /* Create paddr for TLB */ + ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */ + mtspr SPRN_MD_RPN, r8 + +#ifdef CONFIG_PIN_TLB + /* Map two more 8M kernel data pages. + */ + addi r10, r10, 0x0100 + mtspr SPRN_MD_CTR, r10 + + lis r8, KERNELBASE@h /* Create vaddr for TLB */ + addis r8, r8, 0x0080 /* Add 8M */ + ori r8, r8, MI_EVALID /* Mark it valid */ + mtspr SPRN_MD_EPN, r8 + li r9, MI_PS8MEG /* Set 8M byte page */ + ori r9, r9, MI_SVALID /* Make it valid */ + mtspr SPRN_MD_TWC, r9 + li r11, MI_BOOTINIT /* Create RPN for address 0 */ + addis r11, r11, 0x0080 /* Add 8M */ + mtspr SPRN_MD_RPN, r8 + + addis r8, r8, 0x0080 /* Add 8M */ + mtspr SPRN_MD_EPN, r8 + mtspr SPRN_MD_TWC, r9 + addis r11, r11, 0x0080 /* Add 8M */ + mtspr SPRN_MD_RPN, r8 +#endif + + /* Since the cache is enabled according to the information we + * just loaded into the TLB, invalidate and enable the caches here. + * We should probably check/set other modes....later. + */ + lis r8, IDC_INVALL@h + mtspr SPRN_IC_CST, r8 + mtspr SPRN_DC_CST, r8 + lis r8, IDC_ENABLE@h + mtspr SPRN_IC_CST, r8 +#ifdef CONFIG_8xx_COPYBACK + mtspr SPRN_DC_CST, r8 +#else + /* For a debug option, I left this here to easily enable + * the write through cache mode + */ + lis r8, DC_SFWT@h + mtspr SPRN_DC_CST, r8 + lis r8, IDC_ENABLE@h + mtspr SPRN_DC_CST, r8 +#endif + blr + + +/* + * Set up to use a given MMU context. + * r3 is context number, r4 is PGD pointer. + * + * We place the physical address of the new task page directory loaded + * into the MMU base register, and set the ASID compare register with + * the new "context." + */ +_GLOBAL(set_context) + +#ifdef CONFIG_BDI_SWITCH + /* Context switch the PTE pointer for the Abatron BDI2000. + * The PGDIR is passed as second argument. + */ + lis r5, KERNELBASE@h + lwz r5, 0xf0(r5) + stw r4, 0x4(r5) +#endif + +#ifdef CONFIG_8xx_CPU6 + lis r6, cpu6_errata_word@h + ori r6, r6, cpu6_errata_word@l + tophys (r4, r4) + li r7, 0x3980 + stw r7, 12(r6) + lwz r7, 12(r6) + mtspr SPRN_M_TWB, r4 /* Update MMU base address */ + li r7, 0x3380 + stw r7, 12(r6) + lwz r7, 12(r6) + mtspr SPRN_M_CASID, r3 /* Update context */ +#else + mtspr SPRN_M_CASID,r3 /* Update context */ + tophys (r4, r4) + mtspr SPRN_M_TWB, r4 /* and pgd */ +#endif + SYNC + blr + +#ifdef CONFIG_8xx_CPU6 +/* It's here because it is unique to the 8xx. + * It is important we get called with interrupts disabled. I used to + * do that, but it appears that all code that calls this already had + * interrupt disabled. + */ + .globl set_dec_cpu6 +set_dec_cpu6: + lis r7, cpu6_errata_word@h + ori r7, r7, cpu6_errata_word@l + li r4, 0x2c00 + stw r4, 8(r7) + lwz r4, 8(r7) + mtspr 22, r3 /* Update Decrementer */ + SYNC + blr +#endif + +/* + * We put a few things here that have to be page-aligned. + * This stuff goes at the beginning of the data segment, + * which is page-aligned. + */ + .data + .globl sdata +sdata: + .globl empty_zero_page +empty_zero_page: + .space 4096 + + .globl swapper_pg_dir +swapper_pg_dir: + .space 4096 + +/* + * This space gets a copy of optional info passed to us by the bootstrap + * Used to pass parameters into the kernel like root=/dev/sda1, etc. + */ + .globl cmd_line +cmd_line: + .space 512 + +/* Room for two PTE table poiners, usually the kernel and current user + * pointer to their respective root page table (pgdir). + */ +abatron_pteptrs: + .space 8 + +#ifdef CONFIG_8xx_CPU6 + .globl cpu6_errata_word +cpu6_errata_word: + .space 16 +#endif + diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S new file mode 100644 index 000000000000..eba5a5f8ff08 --- /dev/null +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -0,0 +1,1058 @@ +/* + * arch/ppc/kernel/head_fsl_booke.S + * + * Kernel execution entry point code. + * + * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> + * Initial PowerPC version. + * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> + * Rewritten for PReP + * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> + * Low-level exception handers, MMU support, and rewrite. + * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> + * PowerPC 8xx modifications. + * Copyright (c) 1998-1999 TiVo, Inc. + * PowerPC 403GCX modifications. + * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> + * PowerPC 403GCX/405GP modifications. + * Copyright 2000 MontaVista Software Inc. + * PPC405 modifications + * PowerPC 403GCX/405GP modifications. + * Author: MontaVista Software, Inc. + * frank_rowand@mvista.com or source@mvista.com + * debbie_chu@mvista.com + * Copyright 2002-2004 MontaVista Software, Inc. + * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> + * Copyright 2004 Freescale Semiconductor, Inc + * PowerPC e500 modifications, Kumar Gala <kumar.gala@freescale.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/config.h> +#include <linux/threads.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/pgtable.h> +#include <asm/cputable.h> +#include <asm/thread_info.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> +#include "head_booke.h" + +/* As with the other PowerPC ports, it is expected that when code + * execution begins here, the following registers contain valid, yet + * optional, information: + * + * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) + * r4 - Starting address of the init RAM disk + * r5 - Ending address of the init RAM disk + * r6 - Start of kernel command line string (e.g. "mem=128") + * r7 - End of kernel command line string + * + */ + .text +_GLOBAL(_stext) +_GLOBAL(_start) + /* + * Reserve a word at a fixed location to store the address + * of abatron_pteptrs + */ + nop +/* + * Save parameters we are passed + */ + mr r31,r3 + mr r30,r4 + mr r29,r5 + mr r28,r6 + mr r27,r7 + li r24,0 /* CPU number */ + +/* We try to not make any assumptions about how the boot loader + * setup or used the TLBs. We invalidate all mappings from the + * boot loader and load a single entry in TLB1[0] to map the + * first 16M of kernel memory. Any boot info passed from the + * bootloader needs to live in this first 16M. + * + * Requirement on bootloader: + * - The page we're executing in needs to reside in TLB1 and + * have IPROT=1. If not an invalidate broadcast could + * evict the entry we're currently executing in. + * + * r3 = Index of TLB1 were executing in + * r4 = Current MSR[IS] + * r5 = Index of TLB1 temp mapping + * + * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0] + * if needed + */ + +/* 1. Find the index of the entry we're executing in */ + bl invstr /* Find our address */ +invstr: mflr r6 /* Make it accessible */ + mfmsr r7 + rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ + mfspr r7, SPRN_PID0 + slwi r7,r7,16 + or r7,r7,r4 + mtspr SPRN_MAS6,r7 + tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */ +#ifndef CONFIG_E200 + mfspr r7,SPRN_MAS1 + andis. r7,r7,MAS1_VALID@h + bne match_TLB + mfspr r7,SPRN_PID1 + slwi r7,r7,16 + or r7,r7,r4 + mtspr SPRN_MAS6,r7 + tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */ + mfspr r7,SPRN_MAS1 + andis. r7,r7,MAS1_VALID@h + bne match_TLB + mfspr r7, SPRN_PID2 + slwi r7,r7,16 + or r7,r7,r4 + mtspr SPRN_MAS6,r7 + tlbsx 0,r6 /* Fall through, we had to match */ +#endif +match_TLB: + mfspr r7,SPRN_MAS0 + rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */ + + mfspr r7,SPRN_MAS1 /* Insure IPROT set */ + oris r7,r7,MAS1_IPROT@h + mtspr SPRN_MAS1,r7 + tlbwe + +/* 2. Invalidate all entries except the entry we're executing in */ + mfspr r9,SPRN_TLB1CFG + andi. r9,r9,0xfff + li r6,0 /* Set Entry counter to 0 */ +1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ + mtspr SPRN_MAS0,r7 + tlbre + mfspr r7,SPRN_MAS1 + rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ + cmpw r3,r6 + beq skpinv /* Dont update the current execution TLB */ + mtspr SPRN_MAS1,r7 + tlbwe + isync +skpinv: addi r6,r6,1 /* Increment */ + cmpw r6,r9 /* Are we done? */ + bne 1b /* If not, repeat */ + + /* Invalidate TLB0 */ + li r6,0x04 + tlbivax 0,r6 +#ifdef CONFIG_SMP + tlbsync +#endif + /* Invalidate TLB1 */ + li r6,0x0c + tlbivax 0,r6 +#ifdef CONFIG_SMP + tlbsync +#endif + msync + +/* 3. Setup a temp mapping and jump to it */ + andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ + addi r5, r5, 0x1 + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r7 + tlbre + + /* Just modify the entry ID and EPN for the temp mapping */ + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ + mtspr SPRN_MAS0,r7 + xori r6,r4,1 /* Setup TMP mapping in the other Address space */ + slwi r6,r6,12 + oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h + ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l + mtspr SPRN_MAS1,r6 + mfspr r6,SPRN_MAS2 + li r7,0 /* temp EPN = 0 */ + rlwimi r7,r6,0,20,31 + mtspr SPRN_MAS2,r7 + tlbwe + + xori r6,r4,1 + slwi r6,r6,5 /* setup new context with other address space */ + bl 1f /* Find our address */ +1: mflr r9 + rlwimi r7,r9,0,20,31 + addi r7,r7,24 + mtspr SPRN_SRR0,r7 + mtspr SPRN_SRR1,r6 + rfi + +/* 4. Clear out PIDs & Search info */ + li r6,0 + mtspr SPRN_PID0,r6 +#ifndef CONFIG_E200 + mtspr SPRN_PID1,r6 + mtspr SPRN_PID2,r6 +#endif + mtspr SPRN_MAS6,r6 + +/* 5. Invalidate mapping we started in */ + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r7 + tlbre + li r6,0 + mtspr SPRN_MAS1,r6 + tlbwe + /* Invalidate TLB1 */ + li r9,0x0c + tlbivax 0,r9 +#ifdef CONFIG_SMP + tlbsync +#endif + msync + +/* 6. Setup KERNELBASE mapping in TLB1[0] */ + lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ + mtspr SPRN_MAS0,r6 + lis r6,(MAS1_VALID|MAS1_IPROT)@h + ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l + mtspr SPRN_MAS1,r6 + li r7,0 + lis r6,KERNELBASE@h + ori r6,r6,KERNELBASE@l + rlwimi r6,r7,0,20,31 + mtspr SPRN_MAS2,r6 + li r7,(MAS3_SX|MAS3_SW|MAS3_SR) + mtspr SPRN_MAS3,r7 + tlbwe + +/* 7. Jump to KERNELBASE mapping */ + lis r7,MSR_KERNEL@h + ori r7,r7,MSR_KERNEL@l + bl 1f /* Find our address */ +1: mflr r9 + rlwimi r6,r9,0,20,31 + addi r6,r6,24 + mtspr SPRN_SRR0,r6 + mtspr SPRN_SRR1,r7 + rfi /* start execution out of TLB1[0] entry */ + +/* 8. Clear out the temp mapping */ + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ + mtspr SPRN_MAS0,r7 + tlbre + mtspr SPRN_MAS1,r8 + tlbwe + /* Invalidate TLB1 */ + li r9,0x0c + tlbivax 0,r9 +#ifdef CONFIG_SMP + tlbsync +#endif + msync + + /* Establish the interrupt vector offsets */ + SET_IVOR(0, CriticalInput); + SET_IVOR(1, MachineCheck); + SET_IVOR(2, DataStorage); + SET_IVOR(3, InstructionStorage); + SET_IVOR(4, ExternalInput); + SET_IVOR(5, Alignment); + SET_IVOR(6, Program); + SET_IVOR(7, FloatingPointUnavailable); + SET_IVOR(8, SystemCall); + SET_IVOR(9, AuxillaryProcessorUnavailable); + SET_IVOR(10, Decrementer); + SET_IVOR(11, FixedIntervalTimer); + SET_IVOR(12, WatchdogTimer); + SET_IVOR(13, DataTLBError); + SET_IVOR(14, InstructionTLBError); + SET_IVOR(15, Debug); + SET_IVOR(32, SPEUnavailable); + SET_IVOR(33, SPEFloatingPointData); + SET_IVOR(34, SPEFloatingPointRound); +#ifndef CONFIG_E200 + SET_IVOR(35, PerformanceMonitor); +#endif + + /* Establish the interrupt vector base */ + lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ + mtspr SPRN_IVPR,r4 + + /* Setup the defaults for TLB entries */ + li r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l +#ifdef CONFIG_E200 + oris r2,r2,MAS4_TLBSELD(1)@h +#endif + mtspr SPRN_MAS4, r2 + +#if 0 + /* Enable DOZE */ + mfspr r2,SPRN_HID0 + oris r2,r2,HID0_DOZE@h + mtspr SPRN_HID0, r2 +#endif +#ifdef CONFIG_E200 + /* enable dedicated debug exception handling resources (Debug APU) */ + mfspr r2,SPRN_HID0 + ori r2,r2,HID0_DAPUEN@l + mtspr SPRN_HID0,r2 +#endif + +#if !defined(CONFIG_BDI_SWITCH) + /* + * The Abatron BDI JTAG debugger does not tolerate others + * mucking with the debug registers. + */ + lis r2,DBCR0_IDM@h + mtspr SPRN_DBCR0,r2 + /* clear any residual debug events */ + li r2,-1 + mtspr SPRN_DBSR,r2 +#endif + + /* + * This is where the main kernel code starts. + */ + + /* ptr to current */ + lis r2,init_task@h + ori r2,r2,init_task@l + + /* ptr to current thread */ + addi r4,r2,THREAD /* init task's THREAD */ + mtspr SPRN_SPRG3,r4 + + /* stack */ + lis r1,init_thread_union@h + ori r1,r1,init_thread_union@l + li r0,0 + stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) + + bl early_init + + mfspr r3,SPRN_TLB1CFG + andi. r3,r3,0xfff + lis r4,num_tlbcam_entries@ha + stw r3,num_tlbcam_entries@l(r4) +/* + * Decide what sort of machine this is and initialize the MMU. + */ + mr r3,r31 + mr r4,r30 + mr r5,r29 + mr r6,r28 + mr r7,r27 + bl machine_init + bl MMU_init + + /* Setup PTE pointers for the Abatron bdiGDB */ + lis r6, swapper_pg_dir@h + ori r6, r6, swapper_pg_dir@l + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l + lis r4, KERNELBASE@h + ori r4, r4, KERNELBASE@l + stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ + stw r6, 0(r5) + + /* Let's move on */ + lis r4,start_kernel@h + ori r4,r4,start_kernel@l + lis r3,MSR_KERNEL@h + ori r3,r3,MSR_KERNEL@l + mtspr SPRN_SRR0,r4 + mtspr SPRN_SRR1,r3 + rfi /* change context and jump to start_kernel */ + +/* Macros to hide the PTE size differences + * + * FIND_PTE -- walks the page tables given EA & pgdir pointer + * r10 -- EA of fault + * r11 -- PGDIR pointer + * r12 -- free + * label 2: is the bailout case + * + * if we find the pte (fall through): + * r11 is low pte word + * r12 is pointer to the pte + */ +#ifdef CONFIG_PTE_64BIT +#define PTE_FLAGS_OFFSET 4 +#define FIND_PTE \ + rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ + lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ + rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ + beq 2f; /* Bail if no table */ \ + rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ + lwz r11, 4(r12); /* Get pte entry */ +#else +#define PTE_FLAGS_OFFSET 0 +#define FIND_PTE \ + rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ + lwz r11, 0(r11); /* Get L1 entry */ \ + rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \ + beq 2f; /* Bail if no table */ \ + rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \ + lwz r11, 0(r12); /* Get Linux PTE */ +#endif + +/* + * Interrupt vector entry code + * + * The Book E MMUs are always on so we don't need to handle + * interrupts in real mode as with previous PPC processors. In + * this case we handle interrupts in the kernel virtual address + * space. + * + * Interrupt vectors are dynamically placed relative to the + * interrupt prefix as determined by the address of interrupt_base. + * The interrupt vectors offsets are programmed using the labels + * for each interrupt vector entry. + * + * Interrupt vectors must be aligned on a 16 byte boundary. + * We align on a 32 byte cache line boundary for good measure. + */ + +interrupt_base: + /* Critical Input Interrupt */ + CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) + + /* Machine Check Interrupt */ +#ifdef CONFIG_E200 + /* no RFMCI, MCSRRs on E200 */ + CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) +#else + MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) +#endif + + /* Data Storage Interrupt */ + START_EXCEPTION(DataStorage) + mtspr SPRN_SPRG0, r10 /* Save some working registers */ + mtspr SPRN_SPRG1, r11 + mtspr SPRN_SPRG4W, r12 + mtspr SPRN_SPRG5W, r13 + mfcr r11 + mtspr SPRN_SPRG7W, r11 + + /* + * Check if it was a store fault, if not then bail + * because a user tried to access a kernel or + * read-protected page. Otherwise, get the + * offending address and handle it. + */ + mfspr r10, SPRN_ESR + andis. r10, r10, ESR_ST@h + beq 2f + + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + ori r11, r11, TASK_SIZE@l + cmplw 0, r10, r11 + bge 2f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRN_SPRG3 + lwz r11,PGDIR(r11) +4: + FIND_PTE + + /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */ + andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE + cmpwi 0, r13, _PAGE_RW|_PAGE_USER + bne 2f /* Bail if not */ + + /* Update 'changed'. */ + ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE + stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */ + + /* MAS2 not updated as the entry does exist in the tlb, this + fault taken to detect state transition (eg: COW -> DIRTY) + */ + andi. r11, r11, _PAGE_HWEXEC + rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ + ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ + + /* update search PID in MAS6, AS = 0 */ + mfspr r12, SPRN_PID0 + slwi r12, r12, 16 + mtspr SPRN_MAS6, r12 + + /* find the TLB index that caused the fault. It has to be here. */ + tlbsx 0, r10 + + /* only update the perm bits, assume the RPN is fine */ + mfspr r12, SPRN_MAS3 + rlwimi r12, r11, 0, 20, 31 + mtspr SPRN_MAS3,r12 + tlbwe + + /* Done...restore registers and get out of here. */ + mfspr r11, SPRN_SPRG7R + mtcr r11 + mfspr r13, SPRN_SPRG5R + mfspr r12, SPRN_SPRG4R + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + rfi /* Force context change */ + +2: + /* + * The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11, SPRN_SPRG7R + mtcr r11 + mfspr r13, SPRN_SPRG5R + mfspr r12, SPRN_SPRG4R + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + b data_access + + /* Instruction Storage Interrupt */ + INSTRUCTION_STORAGE_EXCEPTION + + /* External Input Interrupt */ + EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) + + /* Alignment Interrupt */ + ALIGNMENT_EXCEPTION + + /* Program Interrupt */ + PROGRAM_EXCEPTION + + /* Floating Point Unavailable Interrupt */ +#ifdef CONFIG_PPC_FPU + FP_UNAVAILABLE_EXCEPTION +#else +#ifdef CONFIG_E200 + /* E200 treats 'normal' floating point instructions as FP Unavail exception */ + EXCEPTION(0x0800, FloatingPointUnavailable, ProgramCheckException, EXC_XFER_EE) +#else + EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) +#endif +#endif + + /* System Call Interrupt */ + START_EXCEPTION(SystemCall) + NORMAL_EXCEPTION_PROLOG + EXC_XFER_EE_LITE(0x0c00, DoSyscall) + + /* Auxillary Processor Unavailable Interrupt */ + EXCEPTION(0x2900, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) + + /* Decrementer Interrupt */ + DECREMENTER_EXCEPTION + + /* Fixed Internal Timer Interrupt */ + /* TODO: Add FIT support */ + EXCEPTION(0x3100, FixedIntervalTimer, UnknownException, EXC_XFER_EE) + + /* Watchdog Timer Interrupt */ +#ifdef CONFIG_BOOKE_WDT + CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException) +#else + CRITICAL_EXCEPTION(0x3200, WatchdogTimer, UnknownException) +#endif + + /* Data TLB Error Interrupt */ + START_EXCEPTION(DataTLBError) + mtspr SPRN_SPRG0, r10 /* Save some working registers */ + mtspr SPRN_SPRG1, r11 + mtspr SPRN_SPRG4W, r12 + mtspr SPRN_SPRG5W, r13 + mfcr r11 + mtspr SPRN_SPRG7W, r11 + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + ori r11, r11, TASK_SIZE@l + cmplw 5, r10, r11 + blt 5, 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + + mfspr r12,SPRN_MAS1 /* Set TID to 0 */ + rlwinm r12,r12,0,16,1 + mtspr SPRN_MAS1,r12 + + b 4f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRN_SPRG3 + lwz r11,PGDIR(r11) + +4: + FIND_PTE + andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ + beq 2f /* Bail if not present */ + +#ifdef CONFIG_PTE_64BIT + lwz r13, 0(r12) +#endif + ori r11, r11, _PAGE_ACCESSED + stw r11, PTE_FLAGS_OFFSET(r12) + + /* Jump to common tlb load */ + b finish_tlb_load +2: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11, SPRN_SPRG7R + mtcr r11 + mfspr r13, SPRN_SPRG5R + mfspr r12, SPRN_SPRG4R + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + b data_access + + /* Instruction TLB Error Interrupt */ + /* + * Nearly the same as above, except we get our + * information from different registers and bailout + * to a different point. + */ + START_EXCEPTION(InstructionTLBError) + mtspr SPRN_SPRG0, r10 /* Save some working registers */ + mtspr SPRN_SPRG1, r11 + mtspr SPRN_SPRG4W, r12 + mtspr SPRN_SPRG5W, r13 + mfcr r11 + mtspr SPRN_SPRG7W, r11 + mfspr r10, SPRN_SRR0 /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + ori r11, r11, TASK_SIZE@l + cmplw 5, r10, r11 + blt 5, 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + + mfspr r12,SPRN_MAS1 /* Set TID to 0 */ + rlwinm r12,r12,0,16,1 + mtspr SPRN_MAS1,r12 + + b 4f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRN_SPRG3 + lwz r11,PGDIR(r11) + +4: + FIND_PTE + andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ + beq 2f /* Bail if not present */ + +#ifdef CONFIG_PTE_64BIT + lwz r13, 0(r12) +#endif + ori r11, r11, _PAGE_ACCESSED + stw r11, PTE_FLAGS_OFFSET(r12) + + /* Jump to common TLB load point */ + b finish_tlb_load + +2: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11, SPRN_SPRG7R + mtcr r11 + mfspr r13, SPRN_SPRG5R + mfspr r12, SPRN_SPRG4R + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + b InstructionStorage + +#ifdef CONFIG_SPE + /* SPE Unavailable */ + START_EXCEPTION(SPEUnavailable) + NORMAL_EXCEPTION_PROLOG + bne load_up_spe + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE_LITE(0x2010, KernelSPE) +#else + EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE) +#endif /* CONFIG_SPE */ + + /* SPE Floating Point Data */ +#ifdef CONFIG_SPE + EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); +#else + EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE) +#endif /* CONFIG_SPE */ + + /* SPE Floating Point Round */ + EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE) + + /* Performance Monitor */ + EXCEPTION(0x2060, PerformanceMonitor, PerformanceMonitorException, EXC_XFER_STD) + + + /* Debug Interrupt */ + DEBUG_EXCEPTION + +/* + * Local functions + */ + + /* + * Data TLB exceptions will bail out to this point + * if they can't resolve the lightweight TLB fault. + */ +data_access: + NORMAL_EXCEPTION_PROLOG + mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ + stw r5,_ESR(r11) + mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ + andis. r10,r5,(ESR_ILK|ESR_DLK)@h + bne 1f + EXC_XFER_EE_LITE(0x0300, handle_page_fault) +1: + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE_LITE(0x0300, CacheLockingException) + +/* + + * Both the instruction and data TLB miss get to this + * point to load the TLB. + * r10 - EA of fault + * r11 - TLB (info from Linux PTE) + * r12, r13 - available to use + * CR5 - results of addr < TASK_SIZE + * MAS0, MAS1 - loaded with proper value when we get here + * MAS2, MAS3 - will need additional info from Linux PTE + * Upon exit, we reload everything and RFI. + */ +finish_tlb_load: + /* + * We set execute, because we don't have the granularity to + * properly set this at the page level (Linux problem). + * Many of these bits are software only. Bits we don't set + * here we (properly should) assume have the appropriate value. + */ + + mfspr r12, SPRN_MAS2 +#ifdef CONFIG_PTE_64BIT + rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */ +#else + rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ +#endif + mtspr SPRN_MAS2, r12 + + bge 5, 1f + + /* is user addr */ + andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC) + andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ + srwi r10, r12, 1 + or r12, r12, r10 /* Copy user perms into supervisor */ + iseleq r12, 0, r12 + b 2f + + /* is kernel addr */ +1: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */ + ori r12, r12, (MAS3_SX | MAS3_SR) + +#ifdef CONFIG_PTE_64BIT +2: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ + rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ + mtspr SPRN_MAS3, r12 +BEGIN_FTR_SECTION + srwi r10, r13, 8 /* grab RPN[8:31] */ + mtspr SPRN_MAS7, r10 +END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS) +#else +2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ + mtspr SPRN_MAS3, r11 +#endif +#ifdef CONFIG_E200 + /* Round robin TLB1 entries assignment */ + mfspr r12, SPRN_MAS0 + + /* Extract TLB1CFG(NENTRY) */ + mfspr r11, SPRN_TLB1CFG + andi. r11, r11, 0xfff + + /* Extract MAS0(NV) */ + andi. r13, r12, 0xfff + addi r13, r13, 1 + cmpw 0, r13, r11 + addi r12, r12, 1 + + /* check if we need to wrap */ + blt 7f + + /* wrap back to first free tlbcam entry */ + lis r13, tlbcam_index@ha + lwz r13, tlbcam_index@l(r13) + rlwimi r12, r13, 0, 20, 31 +7: + mtspr SPRN_MAS0,r12 +#endif /* CONFIG_E200 */ + + tlbwe + + /* Done...restore registers and get out of here. */ + mfspr r11, SPRN_SPRG7R + mtcr r11 + mfspr r13, SPRN_SPRG5R + mfspr r12, SPRN_SPRG4R + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + rfi /* Force context change */ + +#ifdef CONFIG_SPE +/* Note that the SPE support is closely modeled after the AltiVec + * support. Changes to one are likely to be applicable to the + * other! */ +load_up_spe: +/* + * Disable SPE for the task which had SPE previously, + * and save its SPE registers in its thread_struct. + * Enables SPE for use in the kernel on return. + * On SMP we know the SPE units are free, since we give it up every + * switch. -- Kumar + */ + mfmsr r5 + oris r5,r5,MSR_SPE@h + mtmsr r5 /* enable use of SPE now */ + isync +/* + * For SMP, we don't do lazy SPE switching because it just gets too + * horrendously complex, especially when a task switches from one CPU + * to another. Instead we call giveup_spe in switch_to. + */ +#ifndef CONFIG_SMP + lis r3,last_task_used_spe@ha + lwz r4,last_task_used_spe@l(r3) + cmpi 0,r4,0 + beq 1f + addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ + SAVE_32EVRS(0,r10,r4) + evxor evr10, evr10, evr10 /* clear out evr10 */ + evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ + li r5,THREAD_ACC + evstddx evr10, r4, r5 /* save off accumulator */ + lwz r5,PT_REGS(r4) + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r10,MSR_SPE@h + andc r4,r4,r10 /* disable SPE for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + /* enable use of SPE after return */ + oris r9,r9,MSR_SPE@h + mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ + li r4,1 + li r10,THREAD_ACC + stw r4,THREAD_USED_SPE(r5) + evlddx evr4,r10,r5 + evmra evr4,evr4 + REST_32EVRS(0,r10,r5) +#ifndef CONFIG_SMP + subi r4,r5,THREAD + stw r4,last_task_used_spe@l(r3) +#endif /* CONFIG_SMP */ + /* restore registers and return */ +2: REST_4GPRS(3, r11) + lwz r10,_CCR(r11) + REST_GPR(1, r11) + mtcr r10 + lwz r10,_LINK(r11) + mtlr r10 + REST_GPR(10, r11) + mtspr SPRN_SRR1,r9 + mtspr SPRN_SRR0,r12 + REST_GPR(9, r11) + REST_GPR(12, r11) + lwz r11,GPR11(r11) + SYNC + rfi + +/* + * SPE unavailable trap from kernel - print a message, but let + * the task use SPE in the kernel until it returns to user mode. + */ +KernelSPE: + lwz r3,_MSR(r1) + oris r3,r3,MSR_SPE@h + stw r3,_MSR(r1) /* enable use of SPE after return */ + lis r3,87f@h + ori r3,r3,87f@l + mr r4,r2 /* current */ + lwz r5,_NIP(r1) + bl printk + b ret_from_except +87: .string "SPE used in kernel (task=%p, pc=%x) \n" + .align 4,0 + +#endif /* CONFIG_SPE */ + +/* + * Global functions + */ + +/* + * extern void loadcam_entry(unsigned int index) + * + * Load TLBCAM[index] entry in to the L2 CAM MMU + */ +_GLOBAL(loadcam_entry) + lis r4,TLBCAM@ha + addi r4,r4,TLBCAM@l + mulli r5,r3,20 + add r3,r5,r4 + lwz r4,0(r3) + mtspr SPRN_MAS0,r4 + lwz r4,4(r3) + mtspr SPRN_MAS1,r4 + lwz r4,8(r3) + mtspr SPRN_MAS2,r4 + lwz r4,12(r3) + mtspr SPRN_MAS3,r4 + tlbwe + isync + blr + +/* + * extern void giveup_altivec(struct task_struct *prev) + * + * The e500 core does not have an AltiVec unit. + */ +_GLOBAL(giveup_altivec) + blr + +#ifdef CONFIG_SPE +/* + * extern void giveup_spe(struct task_struct *prev) + * + */ +_GLOBAL(giveup_spe) + mfmsr r5 + oris r5,r5,MSR_SPE@h + SYNC + mtmsr r5 /* enable use of SPE now */ + isync + cmpi 0,r3,0 + beqlr- /* if no previous owner, done */ + addi r3,r3,THREAD /* want THREAD of task */ + lwz r5,PT_REGS(r3) + cmpi 0,r5,0 + SAVE_32EVRS(0, r4, r3) + evxor evr6, evr6, evr6 /* clear out evr6 */ + evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ + li r4,THREAD_ACC + evstddx evr6, r4, r3 /* save off accumulator */ + mfspr r6,SPRN_SPEFSCR + stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */ + beq 1f + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r3,MSR_SPE@h + andc r4,r4,r3 /* disable SPE for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#ifndef CONFIG_SMP + li r5,0 + lis r4,last_task_used_spe@ha + stw r5,last_task_used_spe@l(r4) +#endif /* CONFIG_SMP */ + blr +#endif /* CONFIG_SPE */ + +/* + * extern void giveup_fpu(struct task_struct *prev) + * + * Not all FSL Book-E cores have an FPU + */ +#ifndef CONFIG_PPC_FPU +_GLOBAL(giveup_fpu) + blr +#endif + +/* + * extern void abort(void) + * + * At present, this routine just applies a system reset. + */ +_GLOBAL(abort) + li r13,0 + mtspr SPRN_DBCR0,r13 /* disable all debug events */ + mfmsr r13 + ori r13,r13,MSR_DE@l /* Enable Debug Events */ + mtmsr r13 + mfspr r13,SPRN_DBCR0 + lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h + mtspr SPRN_DBCR0,r13 + +_GLOBAL(set_context) + +#ifdef CONFIG_BDI_SWITCH + /* Context switch the PTE pointer for the Abatron BDI2000. + * The PGDIR is the second parameter. + */ + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l + stw r4, 0x4(r5) +#endif + mtspr SPRN_PID,r3 + isync /* Force context change */ + blr + +/* + * We put a few things here that have to be page-aligned. This stuff + * goes at the beginning of the data segment, which is page-aligned. + */ + .data +_GLOBAL(sdata) +_GLOBAL(empty_zero_page) + .space 4096 +_GLOBAL(swapper_pg_dir) + .space 4096 + +/* Reserved 4k for the critical exception stack & 4k for the machine + * check stack per CPU for kernel mode exceptions */ + .section .bss + .align 12 +exception_stack_bottom: + .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS +_GLOBAL(exception_stack_top) + +/* + * This space gets a copy of optional info passed to us by the bootstrap + * which is used to pass parameters into the kernel like root=/dev/sda1, etc. + */ +_GLOBAL(cmd_line) + .space 512 + +/* + * Room for two PTE pointers, usually the kernel and current user pointers + * to their respective root page table. + */ +abatron_pteptrs: + .space 8 + diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S new file mode 100644 index 000000000000..1a2194cf6828 --- /dev/null +++ b/arch/powerpc/kernel/idle_6xx.S @@ -0,0 +1,233 @@ +/* + * This file contains the power_save function for 6xx & 7xxx CPUs + * rewritten in assembler + * + * Warning ! This code assumes that if your machine has a 750fx + * it will have PLL 1 set to low speed mode (used during NAP/DOZE). + * if this is not the case some additional changes will have to + * be done to check a runtime var (a bit like powersave-nap) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/config.h> +#include <linux/threads.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/cputable.h> +#include <asm/thread_info.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> + +#undef DEBUG + + .text + +/* + * Init idle, called at early CPU setup time from head.S for each CPU + * Make sure no rest of NAP mode remains in HID0, save default + * values for some CPU specific registers. Called with r24 + * containing CPU number and r3 reloc offset + */ +_GLOBAL(init_idle_6xx) +BEGIN_FTR_SECTION + mfspr r4,SPRN_HID0 + rlwinm r4,r4,0,10,8 /* Clear NAP */ + mtspr SPRN_HID0, r4 + b 1f +END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) + blr +1: + slwi r5,r24,2 + add r5,r5,r3 +BEGIN_FTR_SECTION + mfspr r4,SPRN_MSSCR0 + addis r6,r5, nap_save_msscr0@ha + stw r4,nap_save_msscr0@l(r6) +END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) +BEGIN_FTR_SECTION + mfspr r4,SPRN_HID1 + addis r6,r5,nap_save_hid1@ha + stw r4,nap_save_hid1@l(r6) +END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) + blr + +/* + * Here is the power_save_6xx function. This could eventually be + * split into several functions & changing the function pointer + * depending on the various features. + */ +_GLOBAL(ppc6xx_idle) + /* Check if we can nap or doze, put HID0 mask in r3 + */ + lis r3, 0 +BEGIN_FTR_SECTION + lis r3,HID0_DOZE@h +END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) +BEGIN_FTR_SECTION + /* We must dynamically check for the NAP feature as it + * can be cleared by CPU init after the fixups are done + */ + lis r4,cur_cpu_spec@ha + lwz r4,cur_cpu_spec@l(r4) + lwz r4,CPU_SPEC_FEATURES(r4) + andi. r0,r4,CPU_FTR_CAN_NAP + beq 1f + /* Now check if user or arch enabled NAP mode */ + lis r4,powersave_nap@ha + lwz r4,powersave_nap@l(r4) + cmpwi 0,r4,0 + beq 1f + lis r3,HID0_NAP@h +1: +END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) + cmpwi 0,r3,0 + beqlr + + /* Clear MSR:EE */ + mfmsr r7 + rlwinm r0,r7,0,17,15 + mtmsr r0 + + /* Check current_thread_info()->flags */ + rlwinm r4,r1,0,0,18 + lwz r4,TI_FLAGS(r4) + andi. r0,r4,_TIF_NEED_RESCHED + beq 1f + mtmsr r7 /* out of line this ? */ + blr +1: + /* Some pre-nap cleanups needed on some CPUs */ + andis. r0,r3,HID0_NAP@h + beq 2f +BEGIN_FTR_SECTION + /* Disable L2 prefetch on some 745x and try to ensure + * L2 prefetch engines are idle. As explained by errata + * text, we can't be sure they are, we just hope very hard + * that well be enough (sic !). At least I noticed Apple + * doesn't even bother doing the dcbf's here... + */ + mfspr r4,SPRN_MSSCR0 + rlwinm r4,r4,0,0,29 + sync + mtspr SPRN_MSSCR0,r4 + sync + isync + lis r4,KERNELBASE@h + dcbf 0,r4 + dcbf 0,r4 + dcbf 0,r4 + dcbf 0,r4 +END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) +#ifdef DEBUG + lis r6,nap_enter_count@ha + lwz r4,nap_enter_count@l(r6) + addi r4,r4,1 + stw r4,nap_enter_count@l(r6) +#endif +2: +BEGIN_FTR_SECTION + /* Go to low speed mode on some 750FX */ + lis r4,powersave_lowspeed@ha + lwz r4,powersave_lowspeed@l(r4) + cmpwi 0,r4,0 + beq 1f + mfspr r4,SPRN_HID1 + oris r4,r4,0x0001 + mtspr SPRN_HID1,r4 +1: +END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) + + /* Go to NAP or DOZE now */ + mfspr r4,SPRN_HID0 + lis r5,(HID0_NAP|HID0_SLEEP)@h +BEGIN_FTR_SECTION + oris r5,r5,HID0_DOZE@h +END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) + andc r4,r4,r5 + or r4,r4,r3 +BEGIN_FTR_SECTION + oris r4,r4,HID0_DPM@h /* that should be done once for all */ +END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) + mtspr SPRN_HID0,r4 +BEGIN_FTR_SECTION + DSSALL + sync +END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) + ori r7,r7,MSR_EE /* Could be ommited (already set) */ + oris r7,r7,MSR_POW@h + sync + isync + mtmsr r7 + isync + sync + blr + +/* + * Return from NAP/DOZE mode, restore some CPU specific registers, + * we are called with DR/IR still off and r2 containing physical + * address of current. + */ +_GLOBAL(power_save_6xx_restore) + mfspr r11,SPRN_HID0 + rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */ + cror 4*cr1+eq,4*cr0+eq,4*cr0+eq +BEGIN_FTR_SECTION + rlwinm r11,r11,0,9,7 /* Clear DOZE */ +END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) + mtspr SPRN_HID0, r11 + +#ifdef DEBUG + beq cr1,1f + lis r11,(nap_return_count-KERNELBASE)@ha + lwz r9,nap_return_count@l(r11) + addi r9,r9,1 + stw r9,nap_return_count@l(r11) +1: +#endif + + rlwinm r9,r1,0,0,18 + tophys(r9,r9) + lwz r11,TI_CPU(r9) + slwi r11,r11,2 + /* Todo make sure all these are in the same page + * and load r22 (@ha part + CPU offset) only once + */ +BEGIN_FTR_SECTION + beq cr1,1f + addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha + lwz r9,nap_save_msscr0@l(r9) + mtspr SPRN_MSSCR0, r9 + sync + isync +1: +END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) +BEGIN_FTR_SECTION + addis r9,r11,(nap_save_hid1-KERNELBASE)@ha + lwz r9,nap_save_hid1@l(r9) + mtspr SPRN_HID1, r9 +END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) + b transfer_to_handler_cont + + .data + +_GLOBAL(nap_save_msscr0) + .space 4*NR_CPUS + +_GLOBAL(nap_save_hid1) + .space 4*NR_CPUS + +_GLOBAL(powersave_nap) + .long 0 +_GLOBAL(powersave_lowspeed) + .long 0 + +#ifdef DEBUG +_GLOBAL(nap_enter_count) + .space 4 +_GLOBAL(nap_return_count) + .space 4 +#endif diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c new file mode 100644 index 000000000000..f5a9d2a84fa1 --- /dev/null +++ b/arch/powerpc/kernel/process.c @@ -0,0 +1,724 @@ +/* + * arch/ppc/kernel/process.c + * + * Derived from "arch/i386/kernel/process.c" + * Copyright (C) 1995 Linus Torvalds + * + * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and + * Paul Mackerras (paulus@cs.anu.edu.au) + * + * PowerPC version + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/config.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> +#include <linux/stddef.h> +#include <linux/unistd.h> +#include <linux/ptrace.h> +#include <linux/slab.h> +#include <linux/user.h> +#include <linux/elf.h> +#include <linux/init.h> +#include <linux/prctl.h> +#include <linux/init_task.h> +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <linux/mqueue.h> +#include <linux/hardirq.h> + +#include <asm/pgtable.h> +#include <asm/uaccess.h> +#include <asm/system.h> +#include <asm/io.h> +#include <asm/processor.h> +#include <asm/mmu.h> +#include <asm/prom.h> + +extern unsigned long _get_SP(void); + +#ifndef CONFIG_SMP +struct task_struct *last_task_used_math = NULL; +struct task_struct *last_task_used_altivec = NULL; +struct task_struct *last_task_used_spe = NULL; +#endif + +static struct fs_struct init_fs = INIT_FS; +static struct files_struct init_files = INIT_FILES; +static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); +struct mm_struct init_mm = INIT_MM(init_mm); +EXPORT_SYMBOL(init_mm); + +/* this is 8kB-aligned so we can get to the thread_info struct + at the base of it from the stack pointer with 1 integer instruction. */ +union thread_union init_thread_union + __attribute__((__section__(".data.init_task"))) = +{ INIT_THREAD_INFO(init_task) }; + +/* initial task structure */ +struct task_struct init_task = INIT_TASK(init_task); +EXPORT_SYMBOL(init_task); + +/* only used to get secondary processor up */ +struct task_struct *current_set[NR_CPUS] = {&init_task, }; + +/* + * Make sure the floating-point register state in the + * the thread_struct is up to date for task tsk. + */ +void flush_fp_to_thread(struct task_struct *tsk) +{ + if (tsk->thread.regs) { + /* + * We need to disable preemption here because if we didn't, + * another process could get scheduled after the regs->msr + * test but before we have finished saving the FP registers + * to the thread_struct. That process could take over the + * FPU, and then when we get scheduled again we would store + * bogus values for the remaining FP registers. + */ + preempt_disable(); + if (tsk->thread.regs->msr & MSR_FP) { +#ifdef CONFIG_SMP + /* + * This should only ever be called for current or + * for a stopped child process. Since we save away + * the FP register state on context switch on SMP, + * there is something wrong if a stopped child appears + * to still have its FP state in the CPU registers. + */ + BUG_ON(tsk != current); +#endif + giveup_fpu(current); + } + preempt_enable(); + } +} + +void enable_kernel_fp(void) +{ + WARN_ON(preemptible()); + +#ifdef CONFIG_SMP + if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) + giveup_fpu(current); + else + giveup_fpu(NULL); /* just enables FP for kernel */ +#else + giveup_fpu(last_task_used_math); +#endif /* CONFIG_SMP */ +} +EXPORT_SYMBOL(enable_kernel_fp); + +int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) +{ + if (!tsk->thread.regs) + return 0; + flush_fp_to_thread(current); + + memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); + + return 1; +} + +#ifdef CONFIG_ALTIVEC +void enable_kernel_altivec(void) +{ + WARN_ON(preemptible()); + +#ifdef CONFIG_SMP + if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) + giveup_altivec(current); + else + giveup_altivec(NULL); /* just enable AltiVec for kernel - force */ +#else + giveup_altivec(last_task_used_altivec); +#endif /* CONFIG_SMP */ +} +EXPORT_SYMBOL(enable_kernel_altivec); + +/* + * Make sure the VMX/Altivec register state in the + * the thread_struct is up to date for task tsk. + */ +void flush_altivec_to_thread(struct task_struct *tsk) +{ + if (tsk->thread.regs) { + preempt_disable(); + if (tsk->thread.regs->msr & MSR_VEC) { +#ifdef CONFIG_SMP + BUG_ON(tsk != current); +#endif + giveup_altivec(current); + } + preempt_enable(); + } +} + +int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) +{ + flush_altivec_to_thread(current); + memcpy(vrregs, ¤t->thread.vr[0], sizeof(*vrregs)); + return 1; +} +#endif /* CONFIG_ALTIVEC */ + +#ifdef CONFIG_SPE + +void enable_kernel_spe(void) +{ + WARN_ON(preemptible()); + +#ifdef CONFIG_SMP + if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) + giveup_spe(current); + else + giveup_spe(NULL); /* just enable SPE for kernel - force */ +#else + giveup_spe(last_task_used_spe); +#endif /* __SMP __ */ +} +EXPORT_SYMBOL(enable_kernel_spe); + +void flush_spe_to_thread(struct task_struct *tsk) +{ + if (tsk->thread.regs) { + preempt_disable(); + if (tsk->thread.regs->msr & MSR_SPE) { +#ifdef CONFIG_SMP + BUG_ON(tsk != current); +#endif + giveup_spe(current); + } + preempt_enable(); + } +} + +int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) +{ + flush_spe_to_thread(current); + /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ + memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35); + return 1; +} +#endif /* CONFIG_SPE */ + +static void set_dabr_spr(unsigned long val) +{ + mtspr(SPRN_DABR, val); +} + +int set_dabr(unsigned long dabr) +{ + int ret = 0; + +#ifdef CONFIG_PPC64 + if (firmware_has_feature(FW_FEATURE_XDABR)) { + /* We want to catch accesses from kernel and userspace */ + unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER; + ret = plpar_set_xdabr(dabr, flags); + } else if (firmware_has_feature(FW_FEATURE_DABR)) { + ret = plpar_set_dabr(dabr); + } else +#endif + set_dabr_spr(dabr); + + return ret; +} + +static DEFINE_PER_CPU(unsigned long, current_dabr); + +struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *new) +{ + struct thread_struct *new_thread, *old_thread; + unsigned long flags; + struct task_struct *last; + +#ifdef CONFIG_SMP + /* avoid complexity of lazy save/restore of fpu + * by just saving it every time we switch out if + * this task used the fpu during the last quantum. + * + * If it tries to use the fpu again, it'll trap and + * reload its fp regs. So we don't have to do a restore + * every switch, just a save. + * -- Cort + */ + if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) + giveup_fpu(prev); +#ifdef CONFIG_ALTIVEC + /* + * If the previous thread used altivec in the last quantum + * (thus changing altivec regs) then save them. + * We used to check the VRSAVE register but not all apps + * set it, so we don't rely on it now (and in fact we need + * to save & restore VSCR even if VRSAVE == 0). -- paulus + * + * On SMP we always save/restore altivec regs just to avoid the + * complexity of changing processors. + * -- Cort + */ + if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) + giveup_altivec(prev); + /* Avoid the trap. On smp this this never happens since + * we don't set last_task_used_altivec -- Cort + */ + if (new->thread.regs && last_task_used_altivec == new) + new->thread.regs->msr |= MSR_VEC; +#endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_SPE + /* + * If the previous thread used spe in the last quantum + * (thus changing spe regs) then save them. + * + * On SMP we always save/restore spe regs just to avoid the + * complexity of changing processors. + */ + if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) + giveup_spe(prev); + /* Avoid the trap. On smp this this never happens since + * we don't set last_task_used_spe + */ + if (new->thread.regs && last_task_used_spe == new) + new->thread.regs->msr |= MSR_SPE; +#endif /* CONFIG_SPE */ +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_PPC64 /* for now */ + if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) { + set_dabr(new->thread.dabr); + __get_cpu_var(current_dabr) = new->thread.dabr; + } +#endif + + new_thread = &new->thread; + old_thread = ¤t->thread; + local_irq_save(flags); + last = _switch(old_thread, new_thread); + + local_irq_restore(flags); + + return last; +} + +void show_regs(struct pt_regs * regs) +{ + int i, trap; + + printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n", + regs->nip, regs->link, regs->gpr[1], regs, regs->trap, + print_tainted()); + printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n", + regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0, + regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0, + regs->msr&MSR_IR ? 1 : 0, + regs->msr&MSR_DR ? 1 : 0); + trap = TRAP(regs); + if (trap == 0x300 || trap == 0x600) + printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr); + printk("TASK = %p[%d] '%s' THREAD: %p\n", + current, current->pid, current->comm, current->thread_info); + printk("Last syscall: %ld ", current->thread.last_syscall); + +#ifdef CONFIG_SMP + printk(" CPU: %d", smp_processor_id()); +#endif /* CONFIG_SMP */ + + for (i = 0; i < 32; i++) { + long r; + if ((i % 8) == 0) + printk("\n" KERN_INFO "GPR%02d: ", i); + if (__get_user(r, ®s->gpr[i])) + break; + printk("%08lX ", r); + if (i == 12 && !FULL_REGS(regs)) + break; + } + printk("\n"); +#ifdef CONFIG_KALLSYMS + /* + * Lookup NIP late so we have the best change of getting the + * above info out without failing + */ + printk("NIP [%08lx] ", regs->nip); + print_symbol("%s\n", regs->nip); + printk("LR [%08lx] ", regs->link); + print_symbol("%s\n", regs->link); +#endif + show_stack(current, (unsigned long *) regs->gpr[1]); +} + +void exit_thread(void) +{ +#ifndef CONFIG_SMP + if (last_task_used_math == current) + last_task_used_math = NULL; +#ifdef CONFIG_ALTIVEC + if (last_task_used_altivec == current) + last_task_used_altivec = NULL; +#endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_SPE + if (last_task_used_spe == current) + last_task_used_spe = NULL; +#endif +#endif /* CONFIG_SMP */ +} + +void flush_thread(void) +{ +#ifndef CONFIG_SMP + if (last_task_used_math == current) + last_task_used_math = NULL; +#ifdef CONFIG_ALTIVEC + if (last_task_used_altivec == current) + last_task_used_altivec = NULL; +#endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_SPE + if (last_task_used_spe == current) + last_task_used_spe = NULL; +#endif +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_PPC64 /* for now */ + if (current->thread.dabr) { + current->thread.dabr = 0; + set_dabr(0); + } +#endif +} + +void +release_thread(struct task_struct *t) +{ +} + +/* + * This gets called before we allocate a new thread and copy + * the current task into it. + */ +void prepare_to_copy(struct task_struct *tsk) +{ + flush_fp_to_thread(current); + flush_altivec_to_thread(current); + flush_spe_to_thread(current); +} + +/* + * Copy a thread.. + */ +int +copy_thread(int nr, unsigned long clone_flags, unsigned long usp, + unsigned long unused, + struct task_struct *p, struct pt_regs *regs) +{ + struct pt_regs *childregs, *kregs; + extern void ret_from_fork(void); + unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE; + unsigned long childframe; + + CHECK_FULL_REGS(regs); + /* Copy registers */ + sp -= sizeof(struct pt_regs); + childregs = (struct pt_regs *) sp; + *childregs = *regs; + if ((childregs->msr & MSR_PR) == 0) { + /* for kernel thread, set `current' and stackptr in new task */ + childregs->gpr[1] = sp + sizeof(struct pt_regs); + childregs->gpr[2] = (unsigned long) p; + p->thread.regs = NULL; /* no user register state */ + } else { + childregs->gpr[1] = usp; + p->thread.regs = childregs; + if (clone_flags & CLONE_SETTLS) + childregs->gpr[2] = childregs->gpr[6]; + } + childregs->gpr[3] = 0; /* Result from fork() */ + sp -= STACK_FRAME_OVERHEAD; + childframe = sp; + + /* + * The way this works is that at some point in the future + * some task will call _switch to switch to the new task. + * That will pop off the stack frame created below and start + * the new task running at ret_from_fork. The new task will + * do some house keeping and then return from the fork or clone + * system call, using the stack frame created above. + */ + sp -= sizeof(struct pt_regs); + kregs = (struct pt_regs *) sp; + sp -= STACK_FRAME_OVERHEAD; + p->thread.ksp = sp; + kregs->nip = (unsigned long)ret_from_fork; + + p->thread.last_syscall = -1; + + return 0; +} + +/* + * Set up a thread for executing a new program + */ +void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) +{ + set_fs(USER_DS); + memset(regs->gpr, 0, sizeof(regs->gpr)); + regs->ctr = 0; + regs->link = 0; + regs->xer = 0; + regs->ccr = 0; + regs->mq = 0; + regs->nip = nip; + regs->gpr[1] = sp; + regs->msr = MSR_USER; +#ifndef CONFIG_SMP + if (last_task_used_math == current) + last_task_used_math = NULL; +#ifdef CONFIG_ALTIVEC + if (last_task_used_altivec == current) + last_task_used_altivec = NULL; +#endif +#ifdef CONFIG_SPE + if (last_task_used_spe == current) + last_task_used_spe = NULL; +#endif +#endif /* CONFIG_SMP */ + memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); + current->thread.fpscr = 0; +#ifdef CONFIG_ALTIVEC + memset(current->thread.vr, 0, sizeof(current->thread.vr)); + memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); + current->thread.vrsave = 0; + current->thread.used_vr = 0; +#endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_SPE + memset(current->thread.evr, 0, sizeof(current->thread.evr)); + current->thread.acc = 0; + current->thread.spefscr = 0; + current->thread.used_spe = 0; +#endif /* CONFIG_SPE */ +} + +#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ + | PR_FP_EXC_RES | PR_FP_EXC_INV) + +int set_fpexc_mode(struct task_struct *tsk, unsigned int val) +{ + struct pt_regs *regs = tsk->thread.regs; + + /* This is a bit hairy. If we are an SPE enabled processor + * (have embedded fp) we store the IEEE exception enable flags in + * fpexc_mode. fpexc_mode is also used for setting FP exception + * mode (asyn, precise, disabled) for 'Classic' FP. */ + if (val & PR_FP_EXC_SW_ENABLE) { +#ifdef CONFIG_SPE + tsk->thread.fpexc_mode = val & + (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); +#else + return -EINVAL; +#endif + } else { + /* on a CONFIG_SPE this does not hurt us. The bits that + * __pack_fe01 use do not overlap with bits used for + * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits + * on CONFIG_SPE implementations are reserved so writing to + * them does not change anything */ + if (val > PR_FP_EXC_PRECISE) + return -EINVAL; + tsk->thread.fpexc_mode = __pack_fe01(val); + if (regs != NULL && (regs->msr & MSR_FP) != 0) + regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) + | tsk->thread.fpexc_mode; + } + return 0; +} + +int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) +{ + unsigned int val; + + if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) +#ifdef CONFIG_SPE + val = tsk->thread.fpexc_mode; +#else + return -EINVAL; +#endif + else + val = __unpack_fe01(tsk->thread.fpexc_mode); + return put_user(val, (unsigned int __user *) adr); +} + +int sys_clone(unsigned long clone_flags, unsigned long usp, + int __user *parent_tidp, void __user *child_threadptr, + int __user *child_tidp, int p6, + struct pt_regs *regs) +{ + CHECK_FULL_REGS(regs); + if (usp == 0) + usp = regs->gpr[1]; /* stack pointer for child */ + return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); +} + +int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, + unsigned long p4, unsigned long p5, unsigned long p6, + struct pt_regs *regs) +{ + CHECK_FULL_REGS(regs); + return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); +} + +int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, + unsigned long p4, unsigned long p5, unsigned long p6, + struct pt_regs *regs) +{ + CHECK_FULL_REGS(regs); + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], + regs, 0, NULL, NULL); +} + +int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, unsigned long a5, + struct pt_regs *regs) +{ + int error; + char * filename; + + filename = getname((char __user *) a0); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + goto out; + flush_fp_to_thread(current); + flush_altivec_to_thread(current); + flush_spe_to_thread(current); + if (error == 0) { + task_lock(current); + current->ptrace &= ~PT_DTRACE; + task_unlock(current); + } + putname(filename); +out: + return error; +} + +static int validate_sp(unsigned long sp, struct task_struct *p, + unsigned long nbytes) +{ + unsigned long stack_page = (unsigned long)p->thread_info; + + if (sp >= stack_page + sizeof(struct thread_struct) + && sp <= stack_page + THREAD_SIZE - nbytes) + return 1; + +#ifdef CONFIG_IRQSTACKS + stack_page = (unsigned long) hardirq_ctx[task_cpu(p)]; + if (sp >= stack_page + sizeof(struct thread_struct) + && sp <= stack_page + THREAD_SIZE - nbytes) + return 1; + + stack_page = (unsigned long) softirq_ctx[task_cpu(p)]; + if (sp >= stack_page + sizeof(struct thread_struct) + && sp <= stack_page + THREAD_SIZE - nbytes) + return 1; +#endif + + return 0; +} + +void dump_stack(void) +{ + show_stack(current, NULL); +} + +EXPORT_SYMBOL(dump_stack); + +void show_stack(struct task_struct *tsk, unsigned long *stack) +{ + unsigned long sp, stack_top, prev_sp, ret; + int count = 0; + unsigned long next_exc = 0; + struct pt_regs *regs; + extern char ret_from_except, ret_from_except_full, ret_from_syscall; + + sp = (unsigned long) stack; + if (tsk == NULL) + tsk = current; + if (sp == 0) { + if (tsk == current) + asm("mr %0,1" : "=r" (sp)); + else + sp = tsk->thread.ksp; + } + + prev_sp = (unsigned long) (tsk->thread_info + 1); + stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE; + while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) { + if (count == 0) { + printk("Call trace:"); +#ifdef CONFIG_KALLSYMS + printk("\n"); +#endif + } else { + if (next_exc) { + ret = next_exc; + next_exc = 0; + } else + ret = *(unsigned long *)(sp + 4); + printk(" [%08lx] ", ret); +#ifdef CONFIG_KALLSYMS + print_symbol("%s", ret); + printk("\n"); +#endif + if (ret == (unsigned long) &ret_from_except + || ret == (unsigned long) &ret_from_except_full + || ret == (unsigned long) &ret_from_syscall) { + /* sp + 16 points to an exception frame */ + regs = (struct pt_regs *) (sp + 16); + if (sp + 16 + sizeof(*regs) <= stack_top) + next_exc = regs->nip; + } + } + ++count; + sp = *(unsigned long *)sp; + } +#ifndef CONFIG_KALLSYMS + if (count > 0) + printk("\n"); +#endif +} + +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long ip, sp; + int count = 0; + + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + + sp = p->thread.ksp; + if (!validate_sp(sp, p, 16)) + return 0; + + do { + sp = *(unsigned long *)sp; + if (!validate_sp(sp, p, 16)) + return 0; + if (count > 0) { + ip = *(unsigned long *)(sp + 4); + if (!in_sched_functions(ip)) + return ip; + } + } while (count++ < 16); + return 0; +} +EXPORT_SYMBOL(get_wchan); diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c new file mode 100644 index 000000000000..2f8c3c951394 --- /dev/null +++ b/arch/powerpc/kernel/semaphore.c @@ -0,0 +1,135 @@ +/* + * PowerPC-specific semaphore code. + * + * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * April 2001 - Reworked by Paul Mackerras <paulus@samba.org> + * to eliminate the SMP races in the old version between the updates + * of `count' and `waking'. Now we use negative `count' values to + * indicate that some process(es) are waiting for the semaphore. + */ + +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/module.h> + +#include <asm/atomic.h> +#include <asm/semaphore.h> +#include <asm/errno.h> + +/* + * Atomically update sem->count. + * This does the equivalent of the following: + * + * old_count = sem->count; + * tmp = MAX(old_count, 0) + incr; + * sem->count = tmp; + * return old_count; + */ +static inline int __sem_update_count(struct semaphore *sem, int incr) +{ + int old_count, tmp; + + __asm__ __volatile__("\n" +"1: lwarx %0,0,%3\n" +" srawi %1,%0,31\n" +" andc %1,%0,%1\n" +" add %1,%1,%4\n" + PPC405_ERR77(0,%3) +" stwcx. %1,0,%3\n" +" bne 1b" + : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) + : "r" (&sem->count), "r" (incr), "m" (sem->count) + : "cc"); + + return old_count; +} + +void __up(struct semaphore *sem) +{ + /* + * Note that we incremented count in up() before we came here, + * but that was ineffective since the result was <= 0, and + * any negative value of count is equivalent to 0. + * This ends up setting count to 1, unless count is now > 0 + * (i.e. because some other cpu has called up() in the meantime), + * in which case we just increment count. + */ + __sem_update_count(sem, 1); + wake_up(&sem->wait); +} +EXPORT_SYMBOL(__up); + +/* + * Note that when we come in to __down or __down_interruptible, + * we have already decremented count, but that decrement was + * ineffective since the result was < 0, and any negative value + * of count is equivalent to 0. + * Thus it is only when we decrement count from some value > 0 + * that we have actually got the semaphore. + */ +void __sched __down(struct semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + __set_task_state(tsk, TASK_UNINTERRUPTIBLE); + add_wait_queue_exclusive(&sem->wait, &wait); + + /* + * Try to get the semaphore. If the count is > 0, then we've + * got the semaphore; we decrement count and exit the loop. + * If the count is 0 or negative, we set it to -1, indicating + * that we are asleep, and then sleep. + */ + while (__sem_update_count(sem, -1) <= 0) { + schedule(); + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + } + remove_wait_queue(&sem->wait, &wait); + __set_task_state(tsk, TASK_RUNNING); + + /* + * If there are any more sleepers, wake one of them up so + * that it can either get the semaphore, or set count to -1 + * indicating that there are still processes sleeping. + */ + wake_up(&sem->wait); +} +EXPORT_SYMBOL(__down); + +int __sched __down_interruptible(struct semaphore * sem) +{ + int retval = 0; + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + __set_task_state(tsk, TASK_INTERRUPTIBLE); + add_wait_queue_exclusive(&sem->wait, &wait); + + while (__sem_update_count(sem, -1) <= 0) { + if (signal_pending(current)) { + /* + * A signal is pending - give up trying. + * Set sem->count to 0 if it is negative, + * since we are no longer sleeping. + */ + __sem_update_count(sem, 0); + retval = -EINTR; + break; + } + schedule(); + set_task_state(tsk, TASK_INTERRUPTIBLE); + } + remove_wait_queue(&sem->wait, &wait); + __set_task_state(tsk, TASK_RUNNING); + + wake_up(&sem->wait); + return retval; +} +EXPORT_SYMBOL(__down_interruptible); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c new file mode 100644 index 000000000000..c7afbbba0f36 --- /dev/null +++ b/arch/powerpc/kernel/traps.c @@ -0,0 +1,1047 @@ +/* + * arch/powerpc/kernel/traps.c + * + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Modified by Cort Dougan (cort@cs.nmt.edu) + * and Paul Mackerras (paulus@samba.org) + */ + +/* + * This file handles the architecture-dependent parts of hardware exceptions + */ + +#include <linux/config.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/stddef.h> +#include <linux/unistd.h> +#include <linux/ptrace.h> +#include <linux/slab.h> +#include <linux/user.h> +#include <linux/a.out.h> +#include <linux/interrupt.h> +#include <linux/config.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/prctl.h> +#include <linux/delay.h> +#include <linux/kprobes.h> +#include <asm/kdebug.h> + +#include <asm/pgtable.h> +#include <asm/uaccess.h> +#include <asm/system.h> +#include <asm/io.h> +#include <asm/reg.h> +#include <asm/xmon.h> +#ifdef CONFIG_PMAC_BACKLIGHT +#include <asm/backlight.h> +#endif +#include <asm/perfmon.h> + +#ifdef CONFIG_DEBUGGER +int (*__debugger)(struct pt_regs *regs); +int (*__debugger_ipi)(struct pt_regs *regs); +int (*__debugger_bpt)(struct pt_regs *regs); +int (*__debugger_sstep)(struct pt_regs *regs); +int (*__debugger_iabr_match)(struct pt_regs *regs); +int (*__debugger_dabr_match)(struct pt_regs *regs); +int (*__debugger_fault_handler)(struct pt_regs *regs); + +EXPORT_SYMBOL(__debugger); +EXPORT_SYMBOL(__debugger_ipi); +EXPORT_SYMBOL(__debugger_bpt); +EXPORT_SYMBOL(__debugger_sstep); +EXPORT_SYMBOL(__debugger_iabr_match); +EXPORT_SYMBOL(__debugger_dabr_match); +EXPORT_SYMBOL(__debugger_fault_handler); +#endif + +struct notifier_block *powerpc_die_chain; +static DEFINE_SPINLOCK(die_notifier_lock); + +int register_die_notifier(struct notifier_block *nb) +{ + int err = 0; + unsigned long flags; + + spin_lock_irqsave(&die_notifier_lock, flags); + err = notifier_chain_register(&powerpc_die_chain, nb); + spin_unlock_irqrestore(&die_notifier_lock, flags); + return err; +} + +/* + * Trap & Exception support + */ + +static DEFINE_SPINLOCK(die_lock); + +int die(const char *str, struct pt_regs *regs, long err) +{ + static int die_counter; + int nl = 0; + + if (debugger(regs)) + return 1; + + console_verbose(); + spin_lock_irq(&die_lock); + bust_spinlocks(1); +#ifdef CONFIG_PMAC_BACKLIGHT + if (_machine == _MACH_Pmac) { + set_backlight_enable(1); + set_backlight_level(BACKLIGHT_MAX); + } +#endif + printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); +#ifdef CONFIG_PREEMPT + printk("PREEMPT "); + nl = 1; +#endif +#ifdef CONFIG_SMP + printk("SMP NR_CPUS=%d ", NR_CPUS); + nl = 1; +#endif +#ifdef CONFIG_DEBUG_PAGEALLOC + printk("DEBUG_PAGEALLOC "); + nl = 1; +#endif +#ifdef CONFIG_NUMA + printk("NUMA "); + nl = 1; +#endif +#ifdef CONFIG_PPC64 + switch (systemcfg->platform) { + case PLATFORM_PSERIES: + printk("PSERIES "); + nl = 1; + break; + case PLATFORM_PSERIES_LPAR: + printk("PSERIES LPAR "); + nl = 1; + break; + case PLATFORM_ISERIES_LPAR: + printk("ISERIES LPAR "); + nl = 1; + break; + case PLATFORM_POWERMAC: + printk("POWERMAC "); + nl = 1; + break; + case PLATFORM_BPA: + printk("BPA "); + nl = 1; + break; + } +#endif + if (nl) + printk("\n"); + print_modules(); + show_regs(regs); + bust_spinlocks(0); + spin_unlock_irq(&die_lock); + + if (in_interrupt()) + panic("Fatal exception in interrupt"); + + if (panic_on_oops) { + panic("Fatal exception"); + } + do_exit(err); + + return 0; +} + +void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) +{ + siginfo_t info; + + if (!user_mode(regs)) { + if (die("Exception in kernel mode", regs, signr)) + return; + } + + memset(&info, 0, sizeof(info)); + info.si_signo = signr; + info.si_code = code; + info.si_addr = (void __user *) addr; + force_sig_info(signr, &info, current); + + /* + * Init gets no signals that it doesn't have a handler for. + * That's all very well, but if it has caused a synchronous + * exception and we ignore the resulting signal, it will just + * generate the same exception over and over again and we get + * nowhere. Better to kill it and let the kernel panic. + */ + if (current->pid == 1) { + __sighandler_t handler; + + spin_lock_irq(¤t->sighand->siglock); + handler = current->sighand->action[signr-1].sa.sa_handler; + spin_unlock_irq(¤t->sighand->siglock); + if (handler == SIG_DFL) { + /* init has generated a synchronous exception + and it doesn't have a handler for the signal */ + printk(KERN_CRIT "init has generated signal %d " + "but has no handler for it\n", signr); + do_exit(signr); + } + } +} + +#ifdef CONFIG_PPC64 +void system_reset_exception(struct pt_regs *regs) +{ + /* See if any machine dependent calls */ + if (ppc_md.system_reset_exception) + ppc_md.system_reset_exception(regs); + + die("System Reset", regs, SIGABRT); + + /* Must die if the interrupt is not recoverable */ + if (!(regs->msr & MSR_RI)) + panic("Unrecoverable System Reset"); + + /* What should we do here? We could issue a shutdown or hard reset. */ +} +#endif + +/* + * I/O accesses can cause machine checks on powermacs. + * Check if the NIP corresponds to the address of a sync + * instruction for which there is an entry in the exception + * table. + * Note that the 601 only takes a machine check on TEA + * (transfer error ack) signal assertion, and does not + * set any of the top 16 bits of SRR1. + * -- paulus. + */ +static inline int check_io_access(struct pt_regs *regs) +{ +#ifdef CONFIG_PPC_PMAC + unsigned long msr = regs->msr; + const struct exception_table_entry *entry; + unsigned int *nip = (unsigned int *)regs->nip; + + if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) + && (entry = search_exception_tables(regs->nip)) != NULL) { + /* + * Check that it's a sync instruction, or somewhere + * in the twi; isync; nop sequence that inb/inw/inl uses. + * As the address is in the exception table + * we should be able to read the instr there. + * For the debug message, we look at the preceding + * load or store. + */ + if (*nip == 0x60000000) /* nop */ + nip -= 2; + else if (*nip == 0x4c00012c) /* isync */ + --nip; + if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { + /* sync or twi */ + unsigned int rb; + + --nip; + rb = (*nip >> 11) & 0x1f; + printk(KERN_DEBUG "%s bad port %lx at %p\n", + (*nip & 0x100)? "OUT to": "IN from", + regs->gpr[rb] - _IO_BASE, nip); + regs->msr |= MSR_RI; + regs->nip = entry->fixup; + return 1; + } + } +#endif /* CONFIG_PPC_PMAC */ + return 0; +} + +#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) +/* On 4xx, the reason for the machine check or program exception + is in the ESR. */ +#define get_reason(regs) ((regs)->dsisr) +#ifndef CONFIG_FSL_BOOKE +#define get_mc_reason(regs) ((regs)->dsisr) +#else +#define get_mc_reason(regs) (mfspr(SPRN_MCSR)) +#endif +#define REASON_FP ESR_FP +#define REASON_ILLEGAL (ESR_PIL | ESR_PUO) +#define REASON_PRIVILEGED ESR_PPR +#define REASON_TRAP ESR_PTR + +/* single-step stuff */ +#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) +#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) + +#else +/* On non-4xx, the reason for the machine check or program + exception is in the MSR. */ +#define get_reason(regs) ((regs)->msr) +#define get_mc_reason(regs) ((regs)->msr) +#define REASON_FP 0x100000 +#define REASON_ILLEGAL 0x80000 +#define REASON_PRIVILEGED 0x40000 +#define REASON_TRAP 0x20000 + +#define single_stepping(regs) ((regs)->msr & MSR_SE) +#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) +#endif + +/* + * This is "fall-back" implementation for configurations + * which don't provide platform-specific machine check info + */ +void __attribute__ ((weak)) +platform_machine_check(struct pt_regs *regs) +{ +} + +void MachineCheckException(struct pt_regs *regs) +{ +#ifdef CONFIG_PPC64 + int recover = 0; + + /* See if any machine dependent calls */ + if (ppc_md.machine_check_exception) + recover = ppc_md.machine_check_exception(regs); + + if (recover) + return; +#else + unsigned long reason = get_mc_reason(regs); + + if (user_mode(regs)) { + regs->msr |= MSR_RI; + _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); + return; + } + +#if defined(CONFIG_8xx) && defined(CONFIG_PCI) + /* the qspan pci read routines can cause machine checks -- Cort */ + bad_page_fault(regs, regs->dar, SIGBUS); + return; +#endif + + if (debugger_fault_handler(regs)) { + regs->msr |= MSR_RI; + return; + } + + if (check_io_access(regs)) + return; + +#if defined(CONFIG_4xx) && !defined(CONFIG_440A) + if (reason & ESR_IMCP) { + printk("Instruction"); + mtspr(SPRN_ESR, reason & ~ESR_IMCP); + } else + printk("Data"); + printk(" machine check in kernel mode.\n"); +#elif defined(CONFIG_440A) + printk("Machine check in kernel mode.\n"); + if (reason & ESR_IMCP){ + printk("Instruction Synchronous Machine Check exception\n"); + mtspr(SPRN_ESR, reason & ~ESR_IMCP); + } + else { + u32 mcsr = mfspr(SPRN_MCSR); + if (mcsr & MCSR_IB) + printk("Instruction Read PLB Error\n"); + if (mcsr & MCSR_DRB) + printk("Data Read PLB Error\n"); + if (mcsr & MCSR_DWB) + printk("Data Write PLB Error\n"); + if (mcsr & MCSR_TLBP) + printk("TLB Parity Error\n"); + if (mcsr & MCSR_ICP){ + flush_instruction_cache(); + printk("I-Cache Parity Error\n"); + } + if (mcsr & MCSR_DCSP) + printk("D-Cache Search Parity Error\n"); + if (mcsr & MCSR_DCFP) + printk("D-Cache Flush Parity Error\n"); + if (mcsr & MCSR_IMPE) + printk("Machine Check exception is imprecise\n"); + + /* Clear MCSR */ + mtspr(SPRN_MCSR, mcsr); + } +#elif defined (CONFIG_E500) + printk("Machine check in kernel mode.\n"); + printk("Caused by (from MCSR=%lx): ", reason); + + if (reason & MCSR_MCP) + printk("Machine Check Signal\n"); + if (reason & MCSR_ICPERR) + printk("Instruction Cache Parity Error\n"); + if (reason & MCSR_DCP_PERR) + printk("Data Cache Push Parity Error\n"); + if (reason & MCSR_DCPERR) + printk("Data Cache Parity Error\n"); + if (reason & MCSR_GL_CI) + printk("Guarded Load or Cache-Inhibited stwcx.\n"); + if (reason & MCSR_BUS_IAERR) + printk("Bus - Instruction Address Error\n"); + if (reason & MCSR_BUS_RAERR) + printk("Bus - Read Address Error\n"); + if (reason & MCSR_BUS_WAERR) + printk("Bus - Write Address Error\n"); + if (reason & MCSR_BUS_IBERR) + printk("Bus - Instruction Data Error\n"); + if (reason & MCSR_BUS_RBERR) + printk("Bus - Read Data Bus Error\n"); + if (reason & MCSR_BUS_WBERR) + printk("Bus - Read Data Bus Error\n"); + if (reason & MCSR_BUS_IPERR) + printk("Bus - Instruction Parity Error\n"); + if (reason & MCSR_BUS_RPERR) + printk("Bus - Read Parity Error\n"); +#elif defined (CONFIG_E200) + printk("Machine check in kernel mode.\n"); + printk("Caused by (from MCSR=%lx): ", reason); + + if (reason & MCSR_MCP) + printk("Machine Check Signal\n"); + if (reason & MCSR_CP_PERR) + printk("Cache Push Parity Error\n"); + if (reason & MCSR_CPERR) + printk("Cache Parity Error\n"); + if (reason & MCSR_EXCP_ERR) + printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); + if (reason & MCSR_BUS_IRERR) + printk("Bus - Read Bus Error on instruction fetch\n"); + if (reason & MCSR_BUS_DRERR) + printk("Bus - Read Bus Error on data load\n"); + if (reason & MCSR_BUS_WRERR) + printk("Bus - Write Bus Error on buffered store or cache line push\n"); +#else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */ + printk("Machine check in kernel mode.\n"); + printk("Caused by (from SRR1=%lx): ", reason); + switch (reason & 0x601F0000) { + case 0x80000: + printk("Machine check signal\n"); + break; + case 0: /* for 601 */ + case 0x40000: + case 0x140000: /* 7450 MSS error and TEA */ + printk("Transfer error ack signal\n"); + break; + case 0x20000: + printk("Data parity error signal\n"); + break; + case 0x10000: + printk("Address parity error signal\n"); + break; + case 0x20000000: + printk("L1 Data Cache error\n"); + break; + case 0x40000000: + printk("L1 Instruction Cache error\n"); + break; + case 0x00100000: + printk("L2 data cache parity error\n"); + break; + default: + printk("Unknown values in msr\n"); + } +#endif /* CONFIG_4xx */ + + /* + * Optional platform-provided routine to print out + * additional info, e.g. bus error registers. + */ + platform_machine_check(regs); +#endif /* CONFIG_PPC64 */ + + if (debugger_fault_handler(regs)) + return; + die("Machine check", regs, SIGBUS); + + /* Must die if the interrupt is not recoverable */ + if (!(regs->msr & MSR_RI)) + panic("Unrecoverable Machine check"); +} + +void SMIException(struct pt_regs *regs) +{ + die("System Management Interrupt", regs, SIGABRT); +} + +void UnknownException(struct pt_regs *regs) +{ + printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", + regs->nip, regs->msr, regs->trap); + + _exception(SIGTRAP, regs, 0, 0); +} + +void InstructionBreakpoint(struct pt_regs *regs) +{ + if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, + 5, SIGTRAP) == NOTIFY_STOP) + return; + if (debugger_iabr_match(regs)) + return; + _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); +} + +void RunModeException(struct pt_regs *regs) +{ + _exception(SIGTRAP, regs, 0, 0); +} + +void SingleStepException(struct pt_regs *regs) +{ + regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ + + if (notify_die(DIE_SSTEP, "single_step", regs, 5, + 5, SIGTRAP) == NOTIFY_STOP) + return; + if (debugger_sstep(regs)) + return; + + _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); +} + +/* + * After we have successfully emulated an instruction, we have to + * check if the instruction was being single-stepped, and if so, + * pretend we got a single-step exception. This was pointed out + * by Kumar Gala. -- paulus + */ +static void emulate_single_step(struct pt_regs *regs) +{ + if (single_stepping(regs)) { + clear_single_step(regs); + _exception(SIGTRAP, regs, TRAP_TRACE, 0); + } +} + +/* Illegal instruction emulation support. Originally written to + * provide the PVR to user applications using the mfspr rd, PVR. + * Return non-zero if we can't emulate, or -EFAULT if the associated + * memory access caused an access fault. Return zero on success. + * + * There are a couple of ways to do this, either "decode" the instruction + * or directly match lots of bits. In this case, matching lots of + * bits is faster and easier. + * + */ +#define INST_MFSPR_PVR 0x7c1f42a6 +#define INST_MFSPR_PVR_MASK 0xfc1fffff + +#define INST_DCBA 0x7c0005ec +#define INST_DCBA_MASK 0x7c0007fe + +#define INST_MCRXR 0x7c000400 +#define INST_MCRXR_MASK 0x7c0007fe + +#define INST_STRING 0x7c00042a +#define INST_STRING_MASK 0x7c0007fe +#define INST_STRING_GEN_MASK 0x7c00067e +#define INST_LSWI 0x7c0004aa +#define INST_LSWX 0x7c00042a +#define INST_STSWI 0x7c0005aa +#define INST_STSWX 0x7c00052a + +static int emulate_string_inst(struct pt_regs *regs, u32 instword) +{ + u8 rT = (instword >> 21) & 0x1f; + u8 rA = (instword >> 16) & 0x1f; + u8 NB_RB = (instword >> 11) & 0x1f; + u32 num_bytes; + unsigned long EA; + int pos = 0; + + /* Early out if we are an invalid form of lswx */ + if ((instword & INST_STRING_MASK) == INST_LSWX) + if ((rT == rA) || (rT == NB_RB)) + return -EINVAL; + + EA = (rA == 0) ? 0 : regs->gpr[rA]; + + switch (instword & INST_STRING_MASK) { + case INST_LSWX: + case INST_STSWX: + EA += NB_RB; + num_bytes = regs->xer & 0x7f; + break; + case INST_LSWI: + case INST_STSWI: + num_bytes = (NB_RB == 0) ? 32 : NB_RB; + break; + default: + return -EINVAL; + } + + while (num_bytes != 0) + { + u8 val; + u32 shift = 8 * (3 - (pos & 0x3)); + + switch ((instword & INST_STRING_MASK)) { + case INST_LSWX: + case INST_LSWI: + if (get_user(val, (u8 __user *)EA)) + return -EFAULT; + /* first time updating this reg, + * zero it out */ + if (pos == 0) + regs->gpr[rT] = 0; + regs->gpr[rT] |= val << shift; + break; + case INST_STSWI: + case INST_STSWX: + val = regs->gpr[rT] >> shift; + if (put_user(val, (u8 __user *)EA)) + return -EFAULT; + break; + } + /* move EA to next address */ + EA += 1; + num_bytes--; + + /* manage our position within the register */ + if (++pos == 4) { + pos = 0; + if (++rT == 32) + rT = 0; + } + } + + return 0; +} + +static int emulate_instruction(struct pt_regs *regs) +{ + u32 instword; + u32 rd; + + if (!user_mode(regs)) + return -EINVAL; + CHECK_FULL_REGS(regs); + + if (get_user(instword, (u32 __user *)(regs->nip))) + return -EFAULT; + + /* Emulate the mfspr rD, PVR. */ + if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) { + rd = (instword >> 21) & 0x1f; + regs->gpr[rd] = mfspr(SPRN_PVR); + return 0; + } + + /* Emulating the dcba insn is just a no-op. */ + if ((instword & INST_DCBA_MASK) == INST_DCBA) + return 0; + + /* Emulate the mcrxr insn. */ + if ((instword & INST_MCRXR_MASK) == INST_MCRXR) { + int shift = (instword >> 21) & 0x1c; + unsigned long msk = 0xf0000000UL >> shift; + + regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); + regs->xer &= ~0xf0000000UL; + return 0; + } + + /* Emulate load/store string insn. */ + if ((instword & INST_STRING_GEN_MASK) == INST_STRING) + return emulate_string_inst(regs, instword); + + return -EINVAL; +} + +/* + * Look through the list of trap instructions that are used for BUG(), + * BUG_ON() and WARN_ON() and see if we hit one. At this point we know + * that the exception was caused by a trap instruction of some kind. + * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0 + * otherwise. + */ +extern struct bug_entry __start___bug_table[], __stop___bug_table[]; + +#ifndef CONFIG_MODULES +#define module_find_bug(x) NULL +#endif + +struct bug_entry *find_bug(unsigned long bugaddr) +{ + struct bug_entry *bug; + + for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) + if (bugaddr == bug->bug_addr) + return bug; + return module_find_bug(bugaddr); +} + +int check_bug_trap(struct pt_regs *regs) +{ + struct bug_entry *bug; + unsigned long addr; + + if (regs->msr & MSR_PR) + return 0; /* not in kernel */ + addr = regs->nip; /* address of trap instruction */ + if (addr < PAGE_OFFSET) + return 0; + bug = find_bug(regs->nip); + if (bug == NULL) + return 0; + if (bug->line & BUG_WARNING_TRAP) { + /* this is a WARN_ON rather than BUG/BUG_ON */ +#ifdef CONFIG_XMON + xmon_printf(KERN_ERR "Badness in %s at %s:%d\n", + bug->function, bug->file, + bug->line & ~BUG_WARNING_TRAP); +#endif /* CONFIG_XMON */ + printk(KERN_ERR "Badness in %s at %s:%d\n", + bug->function, bug->file, + bug->line & ~BUG_WARNING_TRAP); + dump_stack(); + return 1; + } +#ifdef CONFIG_XMON + xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n", + bug->function, bug->file, bug->line); + xmon(regs); +#endif /* CONFIG_XMON */ + printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n", + bug->function, bug->file, bug->line); + + return 0; +} + +void ProgramCheckException(struct pt_regs *regs) +{ + unsigned int reason = get_reason(regs); + extern int do_mathemu(struct pt_regs *regs); + +#ifdef CONFIG_MATH_EMULATION + /* (reason & REASON_ILLEGAL) would be the obvious thing here, + * but there seems to be a hardware bug on the 405GP (RevD) + * that means ESR is sometimes set incorrectly - either to + * ESR_DST (!?) or 0. In the process of chasing this with the + * hardware people - not sure if it can happen on any illegal + * instruction or only on FP instructions, whether there is a + * pattern to occurences etc. -dgibson 31/Mar/2003 */ + if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) { + emulate_single_step(regs); + return; + } +#endif /* CONFIG_MATH_EMULATION */ + + if (reason & REASON_FP) { + /* IEEE FP exception */ + int code = 0; + u32 fpscr; + + /* We must make sure the FP state is consistent with + * our MSR_FP in regs + */ + preempt_disable(); + if (regs->msr & MSR_FP) + giveup_fpu(current); + preempt_enable(); + + fpscr = current->thread.fpscr; + fpscr &= fpscr << 22; /* mask summary bits with enables */ + if (fpscr & FPSCR_VX) + code = FPE_FLTINV; + else if (fpscr & FPSCR_OX) + code = FPE_FLTOVF; + else if (fpscr & FPSCR_UX) + code = FPE_FLTUND; + else if (fpscr & FPSCR_ZX) + code = FPE_FLTDIV; + else if (fpscr & FPSCR_XX) + code = FPE_FLTRES; + _exception(SIGFPE, regs, code, regs->nip); + return; + } + + if (reason & REASON_TRAP) { + /* trap exception */ + if (debugger_bpt(regs)) + return; + if (check_bug_trap(regs)) { + regs->nip += 4; + return; + } + _exception(SIGTRAP, regs, TRAP_BRKPT, 0); + return; + } + + /* Try to emulate it if we should. */ + if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { + switch (emulate_instruction(regs)) { + case 0: + regs->nip += 4; + emulate_single_step(regs); + return; + case -EFAULT: + _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); + return; + } + } + + if (reason & REASON_PRIVILEGED) + _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); + else + _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); +} + +void AlignmentException(struct pt_regs *regs) +{ + int fixed; + + fixed = fix_alignment(regs); + + if (fixed == 1) { + regs->nip += 4; /* skip over emulated instruction */ + emulate_single_step(regs); + return; + } + + /* Operand address was bad */ + if (fixed == -EFAULT) { + if (user_mode(regs)) + _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); + else + /* Search exception table */ + bad_page_fault(regs, regs->dar, SIGSEGV); + return; + } + _exception(SIGBUS, regs, BUS_ADRALN, regs->dar); +} + +void StackOverflow(struct pt_regs *regs) +{ + printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", + current, regs->gpr[1]); + debugger(regs); + show_regs(regs); + panic("kernel stack overflow"); +} + +void nonrecoverable_exception(struct pt_regs *regs) +{ + printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", + regs->nip, regs->msr); + debugger(regs); + die("nonrecoverable exception", regs, SIGKILL); +} + +void trace_syscall(struct pt_regs *regs) +{ + printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", + current, current->pid, regs->nip, regs->link, regs->gpr[0], + regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); +} + +#ifdef CONFIG_8xx +void SoftwareEmulation(struct pt_regs *regs) +{ + extern int do_mathemu(struct pt_regs *); + extern int Soft_emulate_8xx(struct pt_regs *); + int errcode; + + CHECK_FULL_REGS(regs); + + if (!user_mode(regs)) { + debugger(regs); + die("Kernel Mode Software FPU Emulation", regs, SIGFPE); + } + +#ifdef CONFIG_MATH_EMULATION + errcode = do_mathemu(regs); +#else + errcode = Soft_emulate_8xx(regs); +#endif + if (errcode) { + if (errcode > 0) + _exception(SIGFPE, regs, 0, 0); + else if (errcode == -EFAULT) + _exception(SIGSEGV, regs, 0, 0); + else + _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); + } else + emulate_single_step(regs); +} +#endif /* CONFIG_8xx */ + +#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) + +void DebugException(struct pt_regs *regs, unsigned long debug_status) +{ + if (debug_status & DBSR_IC) { /* instruction completion */ + regs->msr &= ~MSR_DE; + if (user_mode(regs)) { + current->thread.dbcr0 &= ~DBCR0_IC; + } else { + /* Disable instruction completion */ + mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); + /* Clear the instruction completion event */ + mtspr(SPRN_DBSR, DBSR_IC); + if (debugger_sstep(regs)) + return; + } + _exception(SIGTRAP, regs, TRAP_TRACE, 0); + } +} +#endif /* CONFIG_4xx || CONFIG_BOOKE */ + +#if !defined(CONFIG_TAU_INT) +void TAUException(struct pt_regs *regs) +{ + printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", + regs->nip, regs->msr, regs->trap, print_tainted()); +} +#endif /* CONFIG_INT_TAU */ + +void AltivecUnavailException(struct pt_regs *regs) +{ + static int kernel_altivec_count; + +#ifndef CONFIG_ALTIVEC + if (user_mode(regs)) { + /* A user program has executed an altivec instruction, + but this kernel doesn't support altivec. */ + _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); + return; + } +#endif + /* The kernel has executed an altivec instruction without + first enabling altivec. Whinge but let it do it. */ + if (++kernel_altivec_count < 10) + printk(KERN_ERR "AltiVec used in kernel (task=%p, pc=%lx)\n", + current, regs->nip); + regs->msr |= MSR_VEC; +} + +#ifdef CONFIG_ALTIVEC +void AltivecAssistException(struct pt_regs *regs) +{ + int err; + + preempt_disable(); + if (regs->msr & MSR_VEC) + giveup_altivec(current); + preempt_enable(); + if (!user_mode(regs)) { + printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" + " at %lx\n", regs->nip); + die("Kernel Altivec assist exception", regs, SIGILL); + } + + err = emulate_altivec(regs); + if (err == 0) { + regs->nip += 4; /* skip emulated instruction */ + emulate_single_step(regs); + return; + } + + if (err == -EFAULT) { + /* got an error reading the instruction */ + _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); + } else { + /* didn't recognize the instruction */ + /* XXX quick hack for now: set the non-Java bit in the VSCR */ + if (printk_ratelimit()) + printk(KERN_ERR "Unrecognized altivec instruction " + "in %s at %lx\n", current->comm, regs->nip); + current->thread.vscr.u[3] |= 0x10000; + } +} +#endif /* CONFIG_ALTIVEC */ + +#ifdef CONFIG_E500 +void PerformanceMonitorException(struct pt_regs *regs) +{ + perf_irq(regs); +} +#endif + +#ifdef CONFIG_FSL_BOOKE +void CacheLockingException(struct pt_regs *regs, unsigned long address, + unsigned long error_code) +{ + /* We treat cache locking instructions from the user + * as priv ops, in the future we could try to do + * something smarter + */ + if (error_code & (ESR_DLK|ESR_ILK)) + _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); + return; +} +#endif /* CONFIG_FSL_BOOKE */ + +#ifdef CONFIG_SPE +void SPEFloatingPointException(struct pt_regs *regs) +{ + unsigned long spefscr; + int fpexc_mode; + int code = 0; + + spefscr = current->thread.spefscr; + fpexc_mode = current->thread.fpexc_mode; + + /* Hardware does not neccessarily set sticky + * underflow/overflow/invalid flags */ + if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { + code = FPE_FLTOVF; + spefscr |= SPEFSCR_FOVFS; + } + else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { + code = FPE_FLTUND; + spefscr |= SPEFSCR_FUNFS; + } + else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) + code = FPE_FLTDIV; + else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { + code = FPE_FLTINV; + spefscr |= SPEFSCR_FINVS; + } + else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) + code = FPE_FLTRES; + + current->thread.spefscr = spefscr; + + _exception(SIGFPE, regs, code, regs->nip); + return; +} +#endif + +#ifdef CONFIG_BOOKE_WDT +/* + * Default handler for a Watchdog exception, + * spins until a reboot occurs + */ +void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) +{ + /* Generic WatchdogHandler, implement your own */ + mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); + return; +} + +void WatchdogException(struct pt_regs *regs) +{ + printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); + WatchdogHandler(regs); +} +#endif + +void __init trap_init(void) +{ +} diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S new file mode 100644 index 000000000000..12cb90bc209c --- /dev/null +++ b/arch/powerpc/kernel/vector.S @@ -0,0 +1,197 @@ +#include <linux/config.h> +#include <asm/ppc_asm.h> +#include <asm/processor.h> + +/* + * The routines below are in assembler so we can closely control the + * usage of floating-point registers. These routines must be called + * with preempt disabled. + */ +#ifdef CONFIG_PPC32 + .data +fpzero: + .long 0 +fpone: + .long 0x3f800000 /* 1.0 in single-precision FP */ +fphalf: + .long 0x3f000000 /* 0.5 in single-precision FP */ + +#define LDCONST(fr, name) \ + lis r11,name@ha; \ + lfs fr,name@l(r11) +#else + + .section ".toc","aw" +fpzero: + .tc FD_0_0[TC],0 +fpone: + .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */ +fphalf: + .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */ + +#define LDCONST(fr, name) \ + lfd fr,name@toc(r2) +#endif + + .text +/* + * Internal routine to enable floating point and set FPSCR to 0. + * Don't call it from C; it doesn't use the normal calling convention. + */ +fpenable: +#ifdef CONFIG_PPC32 + stwu r1,-64(r1) +#else + stdu r1,-64(r1) +#endif + mfmsr r10 + ori r11,r10,MSR_FP + mtmsr r11 + isync + stfd fr0,24(r1) + stfd fr1,16(r1) + stfd fr31,8(r1) + LDCONST(fr1, fpzero) + mffs fr31 + mtfsf 0xff,fr1 + blr + +fpdisable: + mtlr r12 + mtfsf 0xff,fr31 + lfd fr31,8(r1) + lfd fr1,16(r1) + lfd fr0,24(r1) + mtmsr r10 + isync + addi r1,r1,64 + blr + +/* + * Vector add, floating point. + */ +_GLOBAL(vaddfp) + mflr r12 + bl fpenable + li r0,4 + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + lfsx fr1,r5,r6 + fadds fr0,fr0,fr1 + stfsx fr0,r3,r6 + addi r6,r6,4 + bdnz 1b + b fpdisable + +/* + * Vector subtract, floating point. + */ +_GLOBAL(vsubfp) + mflr r12 + bl fpenable + li r0,4 + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + lfsx fr1,r5,r6 + fsubs fr0,fr0,fr1 + stfsx fr0,r3,r6 + addi r6,r6,4 + bdnz 1b + b fpdisable + +/* + * Vector multiply and add, floating point. + */ +_GLOBAL(vmaddfp) + mflr r12 + bl fpenable + stfd fr2,32(r1) + li r0,4 + mtctr r0 + li r7,0 +1: lfsx fr0,r4,r7 + lfsx fr1,r5,r7 + lfsx fr2,r6,r7 + fmadds fr0,fr0,fr2,fr1 + stfsx fr0,r3,r7 + addi r7,r7,4 + bdnz 1b + lfd fr2,32(r1) + b fpdisable + +/* + * Vector negative multiply and subtract, floating point. + */ +_GLOBAL(vnmsubfp) + mflr r12 + bl fpenable + stfd fr2,32(r1) + li r0,4 + mtctr r0 + li r7,0 +1: lfsx fr0,r4,r7 + lfsx fr1,r5,r7 + lfsx fr2,r6,r7 + fnmsubs fr0,fr0,fr2,fr1 + stfsx fr0,r3,r7 + addi r7,r7,4 + bdnz 1b + lfd fr2,32(r1) + b fpdisable + +/* + * Vector reciprocal estimate. We just compute 1.0/x. + * r3 -> destination, r4 -> source. + */ +_GLOBAL(vrefp) + mflr r12 + bl fpenable + li r0,4 + LDCONST(fr1, fpone) + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + fdivs fr0,fr1,fr0 + stfsx fr0,r3,r6 + addi r6,r6,4 + bdnz 1b + b fpdisable + +/* + * Vector reciprocal square-root estimate, floating point. + * We use the frsqrte instruction for the initial estimate followed + * by 2 iterations of Newton-Raphson to get sufficient accuracy. + * r3 -> destination, r4 -> source. + */ +_GLOBAL(vrsqrtefp) + mflr r12 + bl fpenable + stfd fr2,32(r1) + stfd fr3,40(r1) + stfd fr4,48(r1) + stfd fr5,56(r1) + li r0,4 + LDCONST(fr4, fpone) + LDCONST(fr5, fphalf) + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + frsqrte fr1,fr0 /* r = frsqrte(s) */ + fmuls fr3,fr1,fr0 /* r * s */ + fmuls fr2,fr1,fr5 /* r * 0.5 */ + fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ + fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ + fmuls fr3,fr1,fr0 /* r * s */ + fmuls fr2,fr1,fr5 /* r * 0.5 */ + fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ + fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ + stfsx fr1,r3,r6 + addi r6,r6,4 + bdnz 1b + lfd fr5,56(r1) + lfd fr4,48(r1) + lfd fr3,40(r1) + lfd fr2,32(r1) + b fpdisable diff --git a/arch/powerpc/kernel/vmlinux.lds b/arch/powerpc/kernel/vmlinux.lds new file mode 100644 index 000000000000..d62c288a81d0 --- /dev/null +++ b/arch/powerpc/kernel/vmlinux.lds @@ -0,0 +1,174 @@ +/* Align . to a 8 byte boundary equals to maximum function alignment. */ +/* sched.text is aling to function alignment to secure we have same + * address even at second ld pass when generating System.map */ +/* spinlock.text is aling to function alignment to secure we have same + * address even at second ld pass when generating System.map */ + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to + the beginning of the section so we begin them at 0. */ + /* Stabs debugging sections. */ +OUTPUT_ARCH(powerpc:common) +jiffies = jiffies_64 + 4; +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + . = + SIZEOF_HEADERS; + .interp : { *(.interp) } + .hash : { *(.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .rel.text : { *(.rel.text) } + .rela.text : { *(.rela.text) } + .rel.data : { *(.rel.data) } + .rela.data : { *(.rela.data) } + .rel.rodata : { *(.rel.rodata) } + .rela.rodata : { *(.rela.rodata) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.bss : { *(.rel.bss) } + .rela.bss : { *(.rela.bss) } + .rel.plt : { *(.rel.plt) } + .rela.plt : { *(.rela.plt) } +/* .init : { *(.init) } =0*/ + .plt : { *(.plt) } + .text : + { + *(.text) + . = ALIGN(8); __sched_text_start = .; *(.sched.text) __sched_text_end = .; + . = ALIGN(8); __lock_text_start = .; *(.spinlock.text) __lock_text_end = .; + *(.fixup) + *(.got1) + __got2_start = .; + *(.got2) + __got2_end = .; + } + _etext = .; + PROVIDE (etext = .); + .rodata : AT(ADDR(.rodata) - 0) { *(.rodata) *(.rodata.*) *(__vermagic) } .rodata1 : AT(ADDR(.rodata1) - 0) { *(.rodata1) } .pci_fixup : AT(ADDR(.pci_fixup) - 0) { __start_pci_fixups_early = .; *(.pci_fixup_early) __end_pci_fixups_early = .; __start_pci_fixups_header = .; *(.pci_fixup_header) __end_pci_fixups_header = .; __start_pci_fixups_final = .; *(.pci_fixup_final) __end_pci_fixups_final = .; __start_pci_fixups_enable = .; *(.pci_fixup_enable) __end_pci_fixups_enable = .; } __ksymtab : AT(ADDR(__ksymtab) - 0) { __start___ksymtab = .; *(__ksymtab) __stop___ksymtab = .; } __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - 0) { __start___ksymtab_gpl = .; *(__ksymtab_gpl) __stop___ksymtab_gpl = .; } __kcrctab : AT(ADDR(__kcrctab) - 0) { __start___kcrctab = .; *(__kcrctab) __stop___kcrctab = .; } __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - 0) { __start___kcrctab_gpl = .; *(__kcrctab_gpl) __stop___kcrctab_gpl = .; } __ksymtab_strings : AT(ADDR(__ksymtab_strings) - 0) { *(__ksymtab_strings) } __param : AT(ADDR(__param) - 0) { __start___param = .; *(__param) __stop___param = .; } + .fini : { *(.fini) } =0 + .ctors : { *(.ctors) } + .dtors : { *(.dtors) } + .fixup : { *(.fixup) } + __ex_table : { + __start___ex_table = .; + *(__ex_table) + __stop___ex_table = .; + } + __bug_table : { + __start___bug_table = .; + *(__bug_table) + __stop___bug_table = .; + } + /* Read-write section, merged into data segment: */ + . = ALIGN(4096); + .data : + { + *(.data) + *(.data1) + *(.sdata) + *(.sdata2) + *(.got.plt) *(.got) + *(.dynamic) + CONSTRUCTORS + } + + . = ALIGN(4096); + __nosave_begin = .; + .data_nosave : { *(.data.nosave) } + . = ALIGN(4096); + __nosave_end = .; + + . = ALIGN(32); + .data.cacheline_aligned : { *(.data.cacheline_aligned) } + + _edata = .; + PROVIDE (edata = .); + + . = ALIGN(8192); + .data.init_task : { *(.data.init_task) } + + . = ALIGN(4096); + __init_begin = .; + .init.text : { + _sinittext = .; + *(.init.text) + _einittext = .; + } + /* .exit.text is discarded at runtime, not link time, + to deal with references from __bug_table */ + .exit.text : { *(.exit.text) } + .init.data : { + *(.init.data); + __vtop_table_begin = .; + *(.vtop_fixup); + __vtop_table_end = .; + __ptov_table_begin = .; + *(.ptov_fixup); + __ptov_table_end = .; + } + . = ALIGN(16); + __setup_start = .; + .init.setup : { *(.init.setup) } + __setup_end = .; + __initcall_start = .; + .initcall.init : { + *(.initcall1.init) + *(.initcall2.init) + *(.initcall3.init) + *(.initcall4.init) + *(.initcall5.init) + *(.initcall6.init) + *(.initcall7.init) + } + __initcall_end = .; + + __con_initcall_start = .; + .con_initcall.init : { *(.con_initcall.init) } + __con_initcall_end = .; + + .security_initcall.init : AT(ADDR(.security_initcall.init) - 0) { __security_initcall_start = .; *(.security_initcall.init) __security_initcall_end = .; } + + __start___ftr_fixup = .; + __ftr_fixup : { *(__ftr_fixup) } + __stop___ftr_fixup = .; + + . = ALIGN(32); + __per_cpu_start = .; + .data.percpu : { *(.data.percpu) } + __per_cpu_end = .; + + . = ALIGN(4096); + __initramfs_start = .; + .init.ramfs : { *(.init.ramfs) } + __initramfs_end = .; + + . = ALIGN(4096); + __init_end = .; + + . = ALIGN(4096); + _sextratext = .; + _eextratext = .; + + __bss_start = .; + .bss : + { + *(.sbss) *(.scommon) + *(.dynbss) + *(.bss) + *(COMMON) + } + __bss_stop = .; + + _end = . ; + PROVIDE (end = .); + + /* Sections to be discarded. */ + /DISCARD/ : { + *(.exitcall.exit) + *(.exit.data) + } +} diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..09c6525cfa61 --- /dev/null +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -0,0 +1,172 @@ +#include <asm-generic/vmlinux.lds.h> + +OUTPUT_ARCH(powerpc:common) +jiffies = jiffies_64 + 4; +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + . = + SIZEOF_HEADERS; + .interp : { *(.interp) } + .hash : { *(.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .rel.text : { *(.rel.text) } + .rela.text : { *(.rela.text) } + .rel.data : { *(.rel.data) } + .rela.data : { *(.rela.data) } + .rel.rodata : { *(.rel.rodata) } + .rela.rodata : { *(.rela.rodata) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.bss : { *(.rel.bss) } + .rela.bss : { *(.rela.bss) } + .rel.plt : { *(.rel.plt) } + .rela.plt : { *(.rela.plt) } +/* .init : { *(.init) } =0*/ + .plt : { *(.plt) } + .text : + { + *(.text) + SCHED_TEXT + LOCK_TEXT + *(.fixup) + *(.got1) + __got2_start = .; + *(.got2) + __got2_end = .; + } + _etext = .; + PROVIDE (etext = .); + + RODATA + .fini : { *(.fini) } =0 + .ctors : { *(.ctors) } + .dtors : { *(.dtors) } + + .fixup : { *(.fixup) } + + __ex_table : { + __start___ex_table = .; + *(__ex_table) + __stop___ex_table = .; + } + + __bug_table : { + __start___bug_table = .; + *(__bug_table) + __stop___bug_table = .; + } + + /* Read-write section, merged into data segment: */ + . = ALIGN(4096); + .data : + { + *(.data) + *(.data1) + *(.sdata) + *(.sdata2) + *(.got.plt) *(.got) + *(.dynamic) + CONSTRUCTORS + } + + . = ALIGN(4096); + __nosave_begin = .; + .data_nosave : { *(.data.nosave) } + . = ALIGN(4096); + __nosave_end = .; + + . = ALIGN(32); + .data.cacheline_aligned : { *(.data.cacheline_aligned) } + + _edata = .; + PROVIDE (edata = .); + + . = ALIGN(8192); + .data.init_task : { *(.data.init_task) } + + . = ALIGN(4096); + __init_begin = .; + .init.text : { + _sinittext = .; + *(.init.text) + _einittext = .; + } + /* .exit.text is discarded at runtime, not link time, + to deal with references from __bug_table */ + .exit.text : { *(.exit.text) } + .init.data : { + *(.init.data); + __vtop_table_begin = .; + *(.vtop_fixup); + __vtop_table_end = .; + __ptov_table_begin = .; + *(.ptov_fixup); + __ptov_table_end = .; + } + . = ALIGN(16); + __setup_start = .; + .init.setup : { *(.init.setup) } + __setup_end = .; + __initcall_start = .; + .initcall.init : { + *(.initcall1.init) + *(.initcall2.init) + *(.initcall3.init) + *(.initcall4.init) + *(.initcall5.init) + *(.initcall6.init) + *(.initcall7.init) + } + __initcall_end = .; + + __con_initcall_start = .; + .con_initcall.init : { *(.con_initcall.init) } + __con_initcall_end = .; + + SECURITY_INIT + + __start___ftr_fixup = .; + __ftr_fixup : { *(__ftr_fixup) } + __stop___ftr_fixup = .; + + . = ALIGN(32); + __per_cpu_start = .; + .data.percpu : { *(.data.percpu) } + __per_cpu_end = .; + + . = ALIGN(4096); + __initramfs_start = .; + .init.ramfs : { *(.init.ramfs) } + __initramfs_end = .; + + . = ALIGN(4096); + __init_end = .; + + . = ALIGN(4096); + _sextratext = .; + _eextratext = .; + + __bss_start = .; + .bss : + { + *(.sbss) *(.scommon) + *(.dynbss) + *(.bss) + *(COMMON) + } + __bss_stop = .; + + _end = . ; + PROVIDE (end = .); + + /* Sections to be discarded. */ + /DISCARD/ : { + *(.exitcall.exit) + *(.exit.data) + } +} |