From 85d9d7a9206631abd1030cd69ebb1c45f4a01037 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 21 Sep 2012 17:38:15 +0100 Subject: metag: Boot Add boot code for metag. Due to the multi-threaded nature of Meta it is not uncommon for an RTOS or bare metal application to be started on other hardware threads by the bootloader. Since there is a single MMU switch which affects all threads, the MMU is traditionally configured by the bootloader prior to starting Linux. The bootloader passes a structure to Linux which among other things contains information about memory regions which have been mapped. Linux then assumes control of the local heap memory region. A kernel arguments string pointer or a flattened device tree pointer can be provided in the third argument. Signed-off-by: James Hogan --- arch/metag/kernel/head.S | 45 ++++ arch/metag/kernel/machines.c | 20 ++ arch/metag/kernel/setup.c | 556 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 621 insertions(+) create mode 100644 arch/metag/kernel/head.S create mode 100644 arch/metag/kernel/machines.c create mode 100644 arch/metag/kernel/setup.c (limited to 'arch/metag/kernel') diff --git a/arch/metag/kernel/head.S b/arch/metag/kernel/head.S new file mode 100644 index 000000000000..8b1388663892 --- /dev/null +++ b/arch/metag/kernel/head.S @@ -0,0 +1,45 @@ + ! Copyright 2005,2006,2007,2009 Imagination Technologies + +#include +#include +#undef __exit + + __HEAD + ! Setup the stack and get going into _metag_start_kernel + .global __start + .type __start,function +__start: + ! D1Ar1 contains pTBI (ISTAT) + ! D0Ar2 contains pTBI + ! D1Ar3 contains __pTBISegs + ! D0Ar4 contains kernel arglist pointer + + MOVT D0Re0,#HI(___pTBIs) + ADD D0Re0,D0Re0,#LO(___pTBIs) + SETL [D0Re0],D0Ar2,D1Ar1 + MOVT D0Re0,#HI(___pTBISegs) + ADD D0Re0,D0Re0,#LO(___pTBISegs) + SETD [D0Re0],D1Ar3 + MOV A0FrP,#0 + MOV D0Re0,#0 + MOV D1Re0,#0 + MOV D1Ar3,#0 + MOV D1Ar1,D0Ar4 !Store kernel boot params + MOV D1Ar5,#0 + MOV D0Ar6,#0 +#ifdef CONFIG_METAG_DSP + MOV D0.8,#0 +#endif + MOVT A0StP,#HI(_init_thread_union) + ADD A0StP,A0StP,#LO(_init_thread_union) + ADD A0StP,A0StP,#THREAD_INFO_SIZE + MOVT D1RtP,#HI(_metag_start_kernel) + CALL D1RtP,#LO(_metag_start_kernel) + .size __start,.-__start + + !! Needed by TBX + .global __exit + .type __exit,function +__exit: + XOR TXENABLE,D0Re0,D0Re0 + .size __exit,.-__exit diff --git a/arch/metag/kernel/machines.c b/arch/metag/kernel/machines.c new file mode 100644 index 000000000000..1edf6ba193b1 --- /dev/null +++ b/arch/metag/kernel/machines.c @@ -0,0 +1,20 @@ +/* + * arch/metag/kernel/machines.c + * + * Copyright (C) 2012 Imagination Technologies Ltd. + * + * Generic Meta Boards. + */ + +#include +#include +#include + +static const char *meta_boards_compat[] __initdata = { + "img,meta", + NULL, +}; + +MACHINE_START(META, "Generic Meta") + .dt_compat = meta_boards_compat, +MACHINE_END diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c new file mode 100644 index 000000000000..74e2c1f812a5 --- /dev/null +++ b/arch/metag/kernel/setup.c @@ -0,0 +1,556 @@ +/* + * Copyright (C) 2005-2012 Imagination Technologies Ltd. + * + * This file contains the architecture-dependant parts of system setup. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* PRIV protect as many registers as possible. */ +#define DEFAULT_PRIV 0xff0f7f00 + +/* Enable unaligned access checking. */ +#define UNALIGNED_PRIV 0x00000010 + +#ifdef CONFIG_METAG_UNALIGNED +#define PRIV_BITS (DEFAULT_PRIV | UNALIGNED_PRIV) +#else +#define PRIV_BITS DEFAULT_PRIV +#endif + +extern char _heap_start[]; + +#ifdef CONFIG_METAG_BUILTIN_DTB +extern u32 __dtb_start[]; +#endif + +struct machine_desc *machine_desc __initdata; + +/* + * Map a Linux CPU number to a hardware thread ID + * In SMP this will be setup with the correct mapping at startup; in UP this + * will map to the HW thread on which we are running. + */ +u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = { + [0 ... NR_CPUS-1] = BAD_HWTHREAD_ID +}; + +/* + * Map a hardware thread ID to a Linux CPU number + * In SMP this will be fleshed out with the correct CPU ID for a particular + * hardware thread. In UP this will be initialised with the boot CPU ID. + */ +u8 hwthread_id_2_cpu[4] __read_mostly = { + [0 ... 3] = BAD_CPU_ID +}; + +/* The relative offset of the MMU mapped memory (from ldlk or bootloader) + * to the real physical memory. This is needed as we have to use the + * physical addresses in the MMU tables (pte entries), and not the virtual + * addresses. + * This variable is used in the __pa() and __va() macros, and should + * probably only be used via them. + */ +unsigned int meta_memoffset; + +static char __initdata *original_cmd_line; + +DEFINE_PER_CPU(PTBI, pTBI); + +/* + * Mapping are specified as "CPU_ID:HWTHREAD_ID", e.g. + * + * "hwthread_map=0:1,1:2,2:3,3:0" + * + * Linux CPU ID HWTHREAD_ID + * --------------------------- + * 0 1 + * 1 2 + * 2 3 + * 3 0 + */ +static int __init parse_hwthread_map(char *p) +{ + int cpu; + + while (*p) { + cpu = (*p++) - '0'; + if (cpu < 0 || cpu > 9) + goto err_cpu; + + p++; /* skip semi-colon */ + cpu_2_hwthread_id[cpu] = (*p++) - '0'; + if (cpu_2_hwthread_id[cpu] >= 4) + goto err_thread; + hwthread_id_2_cpu[cpu_2_hwthread_id[cpu]] = cpu; + + if (*p == ',') + p++; /* skip comma */ + } + + return 0; +err_cpu: + pr_err("%s: hwthread_map cpu argument out of range\n", __func__); + return -EINVAL; +err_thread: + pr_err("%s: hwthread_map thread argument out of range\n", __func__); + return -EINVAL; +} +early_param("hwthread_map", parse_hwthread_map); + +void __init dump_machine_table(void) +{ + struct machine_desc *p; + const char **compat; + + pr_info("Available machine support:\n\tNAME\t\tCOMPATIBLE LIST\n"); + for_each_machine_desc(p) { + pr_info("\t%s\t[", p->name); + for (compat = p->dt_compat; compat && *compat; ++compat) + printk(" '%s'", *compat); + printk(" ]\n"); + } + + pr_info("\nPlease check your kernel config and/or bootloader.\n"); + + hard_processor_halt(HALT_PANIC); +} + +#ifdef CONFIG_METAG_HALT_ON_PANIC +static int metag_panic_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + hard_processor_halt(HALT_PANIC); + return NOTIFY_DONE; +} + +static struct notifier_block metag_panic_block = { + metag_panic_event, + NULL, + 0 +}; +#endif + +void __init setup_arch(char **cmdline_p) +{ + unsigned long start_pfn; + unsigned long text_start = (unsigned long)(&_stext); + unsigned long cpu = smp_processor_id(); + unsigned long heap_start, heap_end; + unsigned long start_pte; + PTBI _pTBI; + PTBISEG p_heap; + int heap_id, i; + + metag_cache_probe(); + + /* try interpreting the argument as a device tree */ + machine_desc = setup_machine_fdt(original_cmd_line); + /* if it doesn't look like a device tree it must be a command line */ + if (!machine_desc) { +#ifdef CONFIG_METAG_BUILTIN_DTB + /* try the embedded device tree */ + machine_desc = setup_machine_fdt(__dtb_start); + if (!machine_desc) + panic("Invalid embedded device tree."); +#else + /* use the default machine description */ + machine_desc = default_machine_desc(); +#endif +#ifndef CONFIG_CMDLINE_FORCE + /* append the bootloader cmdline to any builtin fdt cmdline */ + if (boot_command_line[0] && original_cmd_line[0]) + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + strlcat(boot_command_line, original_cmd_line, + COMMAND_LINE_SIZE); +#endif + } + setup_meta_clocks(machine_desc->clocks); + + *cmdline_p = boot_command_line; + parse_early_param(); + + /* + * Make sure we don't alias in dcache or icache + */ + check_for_cache_aliasing(cpu); + + +#ifdef CONFIG_METAG_HALT_ON_PANIC + atomic_notifier_chain_register(&panic_notifier_list, + &metag_panic_block); +#endif + +#ifdef CONFIG_DUMMY_CONSOLE + conswitchp = &dummy_con; +#endif + + if (!(__core_reg_get(TXSTATUS) & TXSTATUS_PSTAT_BIT)) + panic("Privilege must be enabled for this thread."); + + _pTBI = __TBI(TBID_ISTAT_BIT); + + per_cpu(pTBI, cpu) = _pTBI; + + if (!per_cpu(pTBI, cpu)) + panic("No TBI found!"); + + /* + * Initialize all interrupt vectors to our copy of __TBIUnExpXXX, + * rather than the version from the bootloader. This makes call + * stacks easier to understand and may allow us to unmap the + * bootloader at some point. + * + * We need to keep the LWK handler that TBI installed in order to + * be able to do inter-thread comms. + */ + for (i = 0; i <= TBID_SIGNUM_MAX; i++) + if (i != TBID_SIGNUM_LWK) + _pTBI->fnSigs[i] = __TBIUnExpXXX; + + /* A Meta requirement is that the kernel is loaded (virtually) + * at the PAGE_OFFSET. + */ + if (PAGE_OFFSET != text_start) + panic("Kernel not loaded at PAGE_OFFSET (%#x) but at %#lx.", + PAGE_OFFSET, text_start); + + start_pte = mmu_read_second_level_page(text_start); + + /* + * Kernel pages should have the PRIV bit set by the bootloader. + */ + if (!(start_pte & _PAGE_KERNEL)) + panic("kernel pte does not have PRIV set"); + + /* + * See __pa and __va in include/asm/page.h. + * This value is negative when running in local space but the + * calculations work anyway. + */ + meta_memoffset = text_start - (start_pte & PAGE_MASK); + + /* Now lets look at the heap space */ + heap_id = (__TBIThreadId() & TBID_THREAD_BITS) + + TBID_SEG(0, TBID_SEGSCOPE_LOCAL, TBID_SEGTYPE_HEAP); + + p_heap = __TBIFindSeg(NULL, heap_id); + + if (!p_heap) + panic("Could not find heap from TBI!"); + + /* The heap begins at the first full page after the kernel data. */ + heap_start = (unsigned long) &_heap_start; + + /* The heap ends at the end of the heap segment specified with + * ldlk. + */ + if (is_global_space(text_start)) { + pr_debug("WARNING: running in global space!\n"); + heap_end = (unsigned long)p_heap->pGAddr + p_heap->Bytes; + } else { + heap_end = (unsigned long)p_heap->pLAddr + p_heap->Bytes; + } + + ROOT_DEV = Root_RAM0; + + /* init_mm is the mm struct used for the first task. It is then + * cloned for all other tasks spawned from that task. + * + * Note - we are using the virtual addresses here. + */ + init_mm.start_code = (unsigned long)(&_stext); + init_mm.end_code = (unsigned long)(&_etext); + init_mm.end_data = (unsigned long)(&_edata); + init_mm.brk = (unsigned long)heap_start; + + min_low_pfn = PFN_UP(__pa(text_start)); + max_low_pfn = PFN_DOWN(__pa(heap_end)); + + pfn_base = min_low_pfn; + + /* Round max_pfn up to a 4Mb boundary. The free_bootmem_node() + * call later makes sure to keep the rounded up pages marked reserved. + */ + max_pfn = max_low_pfn + ((1 << MAX_ORDER) - 1); + max_pfn &= ~((1 << MAX_ORDER) - 1); + + start_pfn = PFN_UP(__pa(heap_start)); + + if (min_low_pfn & ((1 << MAX_ORDER) - 1)) { + /* Theoretically, we could expand the space that the + * bootmem allocator covers - much as we do for the + * 'high' address, and then tell the bootmem system + * that the lowest chunk is 'not available'. Right + * now it is just much easier to constrain the + * user to always MAX_ORDER align their kernel space. + */ + + panic("Kernel must be %d byte aligned, currently at %#lx.", + 1 << (MAX_ORDER + PAGE_SHIFT), + min_low_pfn << PAGE_SHIFT); + } + +#ifdef CONFIG_HIGHMEM + highstart_pfn = highend_pfn = max_pfn; + high_memory = (void *) __va(PFN_PHYS(highstart_pfn)); +#else + high_memory = (void *)__va(PFN_PHYS(max_pfn)); +#endif + + paging_init(heap_end); + + setup_txprivext(); + + /* Setup the boot cpu's mapping. The rest will be setup below. */ + cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id(); + hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id(); + + unflatten_device_tree(); + +#ifdef CONFIG_SMP + smp_init_cpus(); +#endif + + if (machine_desc->init_early) + machine_desc->init_early(); +} + +static int __init customize_machine(void) +{ + /* customizes platform devices, or adds new ones */ + if (machine_desc->init_machine) + machine_desc->init_machine(); + return 0; +} +arch_initcall(customize_machine); + +static int __init init_machine_late(void) +{ + if (machine_desc->init_late) + machine_desc->init_late(); + return 0; +} +late_initcall(init_machine_late); + +#ifdef CONFIG_PROC_FS +/* + * Get CPU information for use by the procfs. + */ +static const char *get_cpu_capabilities(unsigned int txenable) +{ +#ifdef CONFIG_METAG_META21 + /* See CORE_ID in META HTP.GP TRM - Architecture Overview 2.1.238 */ + int coreid = metag_in32(METAC_CORE_ID); + unsigned int dsp_type = (coreid >> 3) & 7; + unsigned int fpu_type = (coreid >> 7) & 3; + + switch (dsp_type | fpu_type << 3) { + case (0x00): return "EDSP"; + case (0x01): return "DSP"; + case (0x08): return "EDSP+LFPU"; + case (0x09): return "DSP+LFPU"; + case (0x10): return "EDSP+FPU"; + case (0x11): return "DSP+FPU"; + } + return "UNKNOWN"; + +#else + if (!(txenable & TXENABLE_CLASS_BITS)) + return "DSP"; + else + return ""; +#endif +} + +static int show_cpuinfo(struct seq_file *m, void *v) +{ + const char *cpu; + unsigned int txenable, thread_id, major, minor; + unsigned long clockfreq = get_coreclock(); +#ifdef CONFIG_SMP + int i; + unsigned long lpj; +#endif + + cpu = "META"; + + txenable = __core_reg_get(TXENABLE); + major = (txenable & TXENABLE_MAJOR_REV_BITS) >> TXENABLE_MAJOR_REV_S; + minor = (txenable & TXENABLE_MINOR_REV_BITS) >> TXENABLE_MINOR_REV_S; + thread_id = (txenable >> 8) & 0x3; + +#ifdef CONFIG_SMP + for_each_online_cpu(i) { + lpj = per_cpu(cpu_data, i).loops_per_jiffy; + txenable = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, + cpu_2_hwthread_id[i]); + + seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n" + "Clocking:\t%lu.%1luMHz\n" + "BogoMips:\t%lu.%02lu\n" + "Calibration:\t%lu loops\n" + "Capabilities:\t%s\n\n", + cpu, major, minor, i, + clockfreq / 1000000, (clockfreq / 100000) % 10, + lpj / (500000 / HZ), (lpj / (5000 / HZ)) % 100, + lpj, + get_cpu_capabilities(txenable)); + } +#else + seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n" + "Clocking:\t%lu.%1luMHz\n" + "BogoMips:\t%lu.%02lu\n" + "Calibration:\t%lu loops\n" + "Capabilities:\t%s\n", + cpu, major, minor, thread_id, + clockfreq / 1000000, (clockfreq / 100000) % 10, + loops_per_jiffy / (500000 / HZ), + (loops_per_jiffy / (5000 / HZ)) % 100, + loops_per_jiffy, + get_cpu_capabilities(txenable)); +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_METAG_L2C + if (meta_l2c_is_present()) { + seq_printf(m, "L2 cache:\t%s\n" + "L2 cache size:\t%d KB\n", + meta_l2c_is_enabled() ? "enabled" : "disabled", + meta_l2c_size() >> 10); + } +#endif + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return (void *)(*pos == 0); +} +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + return NULL; +} +static void c_stop(struct seq_file *m, void *v) +{ +} +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; +#endif /* CONFIG_PROC_FS */ + +void __init metag_start_kernel(char *args) +{ + /* Zero the timer register so timestamps are from the point at + * which the kernel started running. + */ + __core_reg_set(TXTIMER, 0); + + /* Clear the bss. */ + memset(__bss_start, 0, + (unsigned long)__bss_stop - (unsigned long)__bss_start); + + /* Remember where these are for use in setup_arch */ + original_cmd_line = args; + + current_thread_info()->cpu = hard_processor_id(); + + start_kernel(); +} + +/* + * Setup TXPRIVEXT register to be prevent userland from touching our + * precious registers. + */ +void setup_txprivext(void) +{ + __core_reg_set(TXPRIVEXT, PRIV_BITS); +} + +PTBI pTBI_get(unsigned int cpu) +{ + return per_cpu(pTBI, cpu); +} + +#if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU) +char capabilites[] = "dsp fpu"; +#elif defined(CONFIG_METAG_DSP) +char capabilites[] = "dsp"; +#elif defined(CONFIG_METAG_FPU) +char capabilites[] = "fpu"; +#else +char capabilites[] = ""; +#endif + +static struct ctl_table caps_kern_table[] = { + { + .procname = "capabilities", + .data = capabilites, + .maxlen = sizeof(capabilites), + .mode = 0444, + .proc_handler = proc_dostring, + }, + {} +}; + +static struct ctl_table caps_root_table[] = { + { + .procname = "kernel", + .mode = 0555, + .child = caps_kern_table, + }, + {} +}; + +static int __init capabilities_register_sysctl(void) +{ + struct ctl_table_header *caps_table_header; + + caps_table_header = register_sysctl_table(caps_root_table); + if (!caps_table_header) { + pr_err("Unable to register CAPABILITIES sysctl\n"); + return -ENOMEM; + } + + return 0; +} + +core_initcall(capabilities_register_sysctl); -- cgit v1.2.3 From c438b58e65462cfff172b396d03d6bc45c971fca Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 9 Oct 2012 10:54:17 +0100 Subject: metag: TCM support Add some TCM support Signed-off-by: James Hogan --- arch/metag/include/asm/mmzone.h | 42 +++++++++++ arch/metag/include/asm/sparsemem.h | 13 ++++ arch/metag/include/asm/tcm.h | 30 ++++++++ arch/metag/kernel/tcm.c | 151 +++++++++++++++++++++++++++++++++++++ arch/metag/mm/numa.c | 81 ++++++++++++++++++++ 5 files changed, 317 insertions(+) create mode 100644 arch/metag/include/asm/mmzone.h create mode 100644 arch/metag/include/asm/sparsemem.h create mode 100644 arch/metag/include/asm/tcm.h create mode 100644 arch/metag/kernel/tcm.c create mode 100644 arch/metag/mm/numa.c (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/mmzone.h b/arch/metag/include/asm/mmzone.h new file mode 100644 index 000000000000..9c88a9c65f59 --- /dev/null +++ b/arch/metag/include/asm/mmzone.h @@ -0,0 +1,42 @@ +#ifndef __ASM_METAG_MMZONE_H +#define __ASM_METAG_MMZONE_H + +#ifdef CONFIG_NEED_MULTIPLE_NODES +#include + +extern struct pglist_data *node_data[]; +#define NODE_DATA(nid) (node_data[nid]) + +static inline int pfn_to_nid(unsigned long pfn) +{ + int nid; + + for (nid = 0; nid < MAX_NUMNODES; nid++) + if (pfn >= node_start_pfn(nid) && pfn <= node_end_pfn(nid)) + break; + + return nid; +} + +static inline struct pglist_data *pfn_to_pgdat(unsigned long pfn) +{ + return NODE_DATA(pfn_to_nid(pfn)); +} + +/* arch/metag/mm/numa.c */ +void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end); +#else +static inline void +setup_bootmem_node(int nid, unsigned long start, unsigned long end) +{ +} +#endif /* CONFIG_NEED_MULTIPLE_NODES */ + +#ifdef CONFIG_NUMA +/* SoC specific mem init */ +void __init soc_mem_setup(void); +#else +static inline void __init soc_mem_setup(void) {}; +#endif + +#endif /* __ASM_METAG_MMZONE_H */ diff --git a/arch/metag/include/asm/sparsemem.h b/arch/metag/include/asm/sparsemem.h new file mode 100644 index 000000000000..03fe255d697a --- /dev/null +++ b/arch/metag/include/asm/sparsemem.h @@ -0,0 +1,13 @@ +#ifndef __ASM_METAG_SPARSEMEM_H +#define __ASM_METAG_SPARSEMEM_H + +/* + * SECTION_SIZE_BITS 2^N: how big each section will be + * MAX_PHYSADDR_BITS 2^N: how much physical address space we have + * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space + */ +#define SECTION_SIZE_BITS 26 +#define MAX_PHYSADDR_BITS 32 +#define MAX_PHYSMEM_BITS 32 + +#endif /* __ASM_METAG_SPARSEMEM_H */ diff --git a/arch/metag/include/asm/tcm.h b/arch/metag/include/asm/tcm.h new file mode 100644 index 000000000000..7711c317b1d2 --- /dev/null +++ b/arch/metag/include/asm/tcm.h @@ -0,0 +1,30 @@ +#ifndef __ASM_TCM_H__ +#define __ASM_TCM_H__ + +#include +#include + +struct tcm_allocation { + struct list_head list; + unsigned int tag; + unsigned long addr; + unsigned long size; +}; + +/* + * TCM memory region descriptor. + */ +struct tcm_region { + unsigned int tag; + struct resource res; +}; + +#define TCM_INVALID_TAG 0xffffffff + +unsigned long tcm_alloc(unsigned int tag, size_t len); +void tcm_free(unsigned int tag, unsigned long addr, size_t len); +unsigned int tcm_lookup_tag(unsigned long p); + +int tcm_add_region(struct tcm_region *reg); + +#endif diff --git a/arch/metag/kernel/tcm.c b/arch/metag/kernel/tcm.c new file mode 100644 index 000000000000..5d102b31ce84 --- /dev/null +++ b/arch/metag/kernel/tcm.c @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2010 Imagination Technologies Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct tcm_pool { + struct list_head list; + unsigned int tag; + unsigned long start; + unsigned long end; + struct gen_pool *pool; +}; + +static LIST_HEAD(pool_list); + +static struct tcm_pool *find_pool(unsigned int tag) +{ + struct list_head *lh; + struct tcm_pool *pool; + + list_for_each(lh, &pool_list) { + pool = list_entry(lh, struct tcm_pool, list); + if (pool->tag == tag) + return pool; + } + + return NULL; +} + +/** + * tcm_alloc - allocate memory from a TCM pool + * @tag: tag of the pool to allocate memory from + * @len: number of bytes to be allocated + * + * Allocate the requested number of bytes from the pool matching + * the specified tag. Returns the address of the allocated memory + * or zero on failure. + */ +unsigned long tcm_alloc(unsigned int tag, size_t len) +{ + unsigned long vaddr; + struct tcm_pool *pool; + + pool = find_pool(tag); + if (!pool) + return 0; + + vaddr = gen_pool_alloc(pool->pool, len); + if (!vaddr) + return 0; + + return vaddr; +} + +/** + * tcm_free - free a block of memory to a TCM pool + * @tag: tag of the pool to free memory to + * @addr: address of the memory to be freed + * @len: number of bytes to be freed + * + * Free the requested number of bytes at a specific address to the + * pool matching the specified tag. + */ +void tcm_free(unsigned int tag, unsigned long addr, size_t len) +{ + struct tcm_pool *pool; + + pool = find_pool(tag); + if (!pool) + return; + gen_pool_free(pool->pool, addr, len); +} + +/** + * tcm_lookup_tag - find the tag matching an address + * @p: memory address to lookup the tag for + * + * Find the tag of the tcm memory region that contains the + * specified address. Returns %TCM_INVALID_TAG if no such + * memory region could be found. + */ +unsigned int tcm_lookup_tag(unsigned long p) +{ + struct list_head *lh; + struct tcm_pool *pool; + unsigned long addr = (unsigned long) p; + + list_for_each(lh, &pool_list) { + pool = list_entry(lh, struct tcm_pool, list); + if (addr >= pool->start && addr < pool->end) + return pool->tag; + } + + return TCM_INVALID_TAG; +} + +/** + * tcm_add_region - add a memory region to TCM pool list + * @reg: descriptor of region to be added + * + * Add a region of memory to the TCM pool list. Returns 0 on success. + */ +int __init tcm_add_region(struct tcm_region *reg) +{ + struct tcm_pool *pool; + + pool = kmalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) { + pr_err("Failed to alloc memory for TCM pool!\n"); + return -ENOMEM; + } + + pool->tag = reg->tag; + pool->start = reg->res.start; + pool->end = reg->res.end; + + /* + * 2^3 = 8 bytes granularity to allow for 64bit access alignment. + * -1 = NUMA node specifier. + */ + pool->pool = gen_pool_create(3, -1); + + if (!pool->pool) { + pr_err("Failed to create TCM pool!\n"); + kfree(pool); + return -ENOMEM; + } + + if (gen_pool_add(pool->pool, reg->res.start, + reg->res.end - reg->res.start + 1, -1)) { + pr_err("Failed to add memory to TCM pool!\n"); + return -ENOMEM; + } + pr_info("Added %s TCM pool (%08x bytes @ %08x)\n", + reg->res.name, reg->res.end - reg->res.start + 1, + reg->res.start); + + list_add_tail(&pool->list, &pool_list); + + return 0; +} diff --git a/arch/metag/mm/numa.c b/arch/metag/mm/numa.c new file mode 100644 index 000000000000..9ae578c9b620 --- /dev/null +++ b/arch/metag/mm/numa.c @@ -0,0 +1,81 @@ +/* + * Multiple memory node support for Meta machines + * + * Copyright (C) 2007 Paul Mundt + * Copyright (C) 2010 Imagination Technologies Ltd. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include + +struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; +EXPORT_SYMBOL_GPL(node_data); + +extern char _heap_start[]; + +/* + * On Meta machines the conventional approach is to stash system RAM + * in node 0, and other memory blocks in to node 1 and up, ordered by + * latency. Each node's pgdat is node-local at the beginning of the node, + * immediately followed by the node mem map. + */ +void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) +{ + unsigned long bootmap_pages, bootmem_paddr; + unsigned long start_pfn, end_pfn; + unsigned long pgdat_paddr; + + /* Don't allow bogus node assignment */ + BUG_ON(nid > MAX_NUMNODES || nid <= 0); + + start_pfn = start >> PAGE_SHIFT; + end_pfn = end >> PAGE_SHIFT; + + memblock_add(start, end - start); + + memblock_set_node(PFN_PHYS(start_pfn), + PFN_PHYS(end_pfn - start_pfn), nid); + + /* Node-local pgdat */ + pgdat_paddr = memblock_alloc_base(sizeof(struct pglist_data), + SMP_CACHE_BYTES, end); + NODE_DATA(nid) = __va(pgdat_paddr); + memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); + + NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; + NODE_DATA(nid)->node_start_pfn = start_pfn; + NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; + + /* Node-local bootmap */ + bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); + bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT, + PAGE_SIZE, end); + init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, + start_pfn, end_pfn); + + free_bootmem_with_active_regions(nid, end_pfn); + + /* Reserve the pgdat and bootmap space with the bootmem allocator */ + reserve_bootmem_node(NODE_DATA(nid), pgdat_paddr & PAGE_MASK, + sizeof(struct pglist_data), BOOTMEM_DEFAULT); + reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr, + bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); + + /* It's up */ + node_set_online(nid); + + /* Kick sparsemem */ + sparse_memory_present_with_active_regions(nid); +} + +void __init __weak soc_mem_setup(void) +{ +} -- cgit v1.2.3 From 262d96b0deb44ed58823447825d6efa5dddb4108 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 9 Oct 2012 10:54:27 +0100 Subject: metag: Signal handling Add signal handling code for metag. Signed-off-by: James Hogan Cc: Al Viro --- arch/metag/include/uapi/asm/sigcontext.h | 31 +++ arch/metag/include/uapi/asm/siginfo.h | 8 + arch/metag/kernel/signal.c | 344 +++++++++++++++++++++++++++++++ 3 files changed, 383 insertions(+) create mode 100644 arch/metag/include/uapi/asm/sigcontext.h create mode 100644 arch/metag/include/uapi/asm/siginfo.h create mode 100644 arch/metag/kernel/signal.c (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/uapi/asm/sigcontext.h b/arch/metag/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000000..ef79a910c1c4 --- /dev/null +++ b/arch/metag/include/uapi/asm/sigcontext.h @@ -0,0 +1,31 @@ +#ifndef _ASM_METAG_SIGCONTEXT_H +#define _ASM_METAG_SIGCONTEXT_H + +#include + +/* + * In a sigcontext structure we need to store the active state of the + * user process so that it does not get trashed when we call the signal + * handler. That not really the same as a user context that we are + * going to store on syscall etc. + */ +struct sigcontext { + struct user_gp_regs regs; /* needs to be first */ + + /* + * Catch registers describing a memory fault. + * If USER_GP_REGS_STATUS_CATCH_BIT is set in regs.status then catch + * buffers have been saved and will be replayed on sigreturn. + * Clear that bit to discard the catch state instead of replaying it. + */ + struct user_cb_regs cb; + + /* + * Read pipeline state. This will get restored on sigreturn. + */ + struct user_rp_state rp; + + unsigned long oldmask; +}; + +#endif diff --git a/arch/metag/include/uapi/asm/siginfo.h b/arch/metag/include/uapi/asm/siginfo.h new file mode 100644 index 000000000000..b2e0c8b62aef --- /dev/null +++ b/arch/metag/include/uapi/asm/siginfo.h @@ -0,0 +1,8 @@ +#ifndef _METAG_SIGINFO_H +#define _METAG_SIGINFO_H + +#define __ARCH_SI_TRAPNO + +#include + +#endif diff --git a/arch/metag/kernel/signal.c b/arch/metag/kernel/signal.c new file mode 100644 index 000000000000..3be61cf0b147 --- /dev/null +++ b/arch/metag/kernel/signal.c @@ -0,0 +1,344 @@ +/* + * Copyright (C) 1991,1992 Linus Torvalds + * Copyright (C) 2005-2012 Imagination Technologies Ltd. + * + * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define REG_FLAGS ctx.SaveMask +#define REG_RETVAL ctx.DX[0].U0 +#define REG_SYSCALL ctx.DX[0].U1 +#define REG_SP ctx.AX[0].U0 +#define REG_ARG1 ctx.DX[3].U1 +#define REG_ARG2 ctx.DX[3].U0 +#define REG_ARG3 ctx.DX[2].U1 +#define REG_PC ctx.CurrPC +#define REG_RTP ctx.DX[4].U1 + +struct rt_sigframe { + struct siginfo info; + struct ucontext uc; + unsigned long retcode[2]; +}; + +static int restore_sigcontext(struct pt_regs *regs, + struct sigcontext __user *sc) +{ + int err; + + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->restart_block.fn = do_no_restart_syscall; + + err = metag_gp_regs_copyin(regs, 0, sizeof(struct user_gp_regs), NULL, + &sc->regs); + if (!err) + err = metag_cb_regs_copyin(regs, 0, + sizeof(struct user_cb_regs), NULL, + &sc->cb); + if (!err) + err = metag_rp_state_copyin(regs, 0, + sizeof(struct user_rp_state), NULL, + &sc->rp); + + /* This is a user-mode context. */ + regs->REG_FLAGS |= TBICTX_PRIV_BIT; + + return err; +} + +long sys_rt_sigreturn(void) +{ + /* NOTE - Meta stack goes UPWARDS - so we wind the stack back */ + struct pt_regs *regs = current_pt_regs(); + struct rt_sigframe __user *frame; + sigset_t set; + + frame = (__force struct rt_sigframe __user *)(regs->REG_SP - + sizeof(*frame)); + + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto badframe; + + set_current_blocked(&set); + + if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) + goto badframe; + + if (restore_altstack(&frame->uc.uc_stack)) + goto badframe; + + return regs->REG_RETVAL; + +badframe: + force_sig(SIGSEGV, current); + + return 0; +} + +static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, + unsigned long mask) +{ + int err; + + err = metag_gp_regs_copyout(regs, 0, sizeof(struct user_gp_regs), NULL, + &sc->regs); + + if (!err) + err = metag_cb_regs_copyout(regs, 0, + sizeof(struct user_cb_regs), NULL, + &sc->cb); + if (!err) + err = metag_rp_state_copyout(regs, 0, + sizeof(struct user_rp_state), NULL, + &sc->rp); + + /* OK, clear that cbuf flag in the old context, or our stored + * catch buffer will be restored when we go to call the signal + * handler. Also clear out the CBRP RA/RD pipe bit incase + * that is pending as well! + * Note that as we have already stored this context, these + * flags will get restored on sigreturn to their original + * state. + */ + regs->REG_FLAGS &= ~(TBICTX_XCBF_BIT | TBICTX_CBUF_BIT | + TBICTX_CBRP_BIT); + + /* Clear out the LSM_STEP bits in case we are in the middle of + * and MSET/MGET. + */ + regs->ctx.Flags &= ~TXSTATUS_LSM_STEP_BITS; + + err |= __put_user(mask, &sc->oldmask); + + return err; +} + +/* + * Determine which stack to use.. + */ +static void __user *get_sigframe(struct k_sigaction *ka, unsigned long sp, + size_t frame_size) +{ + /* Meta stacks grows upwards */ + if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0)) + sp = current->sas_ss_sp; + + sp = (sp + 7) & ~7; /* 8byte align stack */ + + return (void __user *)sp; +} + +static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + int err = -EFAULT; + unsigned long code; + + frame = get_sigframe(ka, regs->REG_SP, sizeof(*frame)); + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + goto out; + + err = copy_siginfo_to_user(&frame->info, info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(0, (unsigned long __user *)&frame->uc.uc_link); + err |= __save_altstack(&frame->uc.uc_stack, regs->REG_SP); + err |= setup_sigcontext(&frame->uc.uc_mcontext, + regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + if (err) + goto out; + + /* Set up to return from userspace. */ + + /* MOV D1Re0 (D1.0), #__NR_rt_sigreturn */ + code = 0x03000004 | (__NR_rt_sigreturn << 3); + err |= __put_user(code, (unsigned long __user *)(&frame->retcode[0])); + + /* SWITCH #__METAG_SW_SYS */ + code = __METAG_SW_ENCODING(SYS); + err |= __put_user(code, (unsigned long __user *)(&frame->retcode[1])); + + if (err) + goto out; + + /* Set up registers for signal handler */ + regs->REG_RTP = (unsigned long) frame->retcode; + regs->REG_SP = (unsigned long) frame + sizeof(*frame); + regs->REG_ARG1 = sig; + regs->REG_ARG2 = (unsigned long) &frame->info; + regs->REG_ARG3 = (unsigned long) &frame->uc; + regs->REG_PC = (unsigned long) ka->sa.sa_handler; + + pr_debug("SIG deliver (%s:%d): sp=%p pc=%08x pr=%08x\n", + current->comm, current->pid, frame, regs->REG_PC, + regs->REG_RTP); + + /* Now pass size of 'new code' into sigtramp so we can do a more + * effective cache flush - directed rather than 'full flush'. + */ + flush_cache_sigtramp(regs->REG_RTP, sizeof(frame->retcode)); +out: + if (err) { + force_sigsegv(sig, current); + return -EFAULT; + } + return 0; +} + +static void handle_signal(unsigned long sig, siginfo_t *info, + struct k_sigaction *ka, struct pt_regs *regs) +{ + sigset_t *oldset = sigmask_to_save(); + + /* Set up the stack frame */ + if (setup_rt_frame(sig, ka, info, oldset, regs)) + return; + + signal_delivered(sig, info, ka, regs, test_thread_flag(TIF_SINGLESTEP)); +} + + /* + * Notes for Meta. + * We have moved from the old 2.4.9 SH way of using syscall_nr (in the stored + * context) to passing in the syscall flag on the stack. + * This is because having syscall_nr in our context does not fit with TBX, and + * corrupted the stack. + */ +static int do_signal(struct pt_regs *regs, int syscall) +{ + unsigned int retval = 0, continue_addr = 0, restart_addr = 0; + struct k_sigaction ka; + siginfo_t info; + int signr; + int restart = 0; + + /* + * By the end of rt_sigreturn the context describes the point that the + * signal was taken (which may happen to be just before a syscall if + * it's already been restarted). This should *never* be mistaken for a + * system call in need of restarting. + */ + if (syscall == __NR_rt_sigreturn) + syscall = -1; + + /* Did we come from a system call? */ + if (syscall >= 0) { + continue_addr = regs->REG_PC; + restart_addr = continue_addr - 4; + retval = regs->REG_RETVAL; + + /* + * Prepare for system call restart. We do this here so that a + * debugger will see the already changed PC. + */ + switch (retval) { + case -ERESTART_RESTARTBLOCK: + restart = -2; + case -ERESTARTNOHAND: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + ++restart; + regs->REG_PC = restart_addr; + break; + } + } + + /* + * Get the signal to deliver. When running under ptrace, at this point + * the debugger may change all our registers ... + */ + signr = get_signal_to_deliver(&info, &ka, regs, NULL); + /* + * Depending on the signal settings we may need to revert the decision + * to restart the system call. But skip this if a debugger has chosen to + * restart at a different PC. + */ + if (regs->REG_PC != restart_addr) + restart = 0; + if (signr > 0) { + if (unlikely(restart)) { + if (retval == -ERESTARTNOHAND + || retval == -ERESTART_RESTARTBLOCK + || (retval == -ERESTARTSYS + && !(ka.sa.sa_flags & SA_RESTART))) { + regs->REG_RETVAL = -EINTR; + regs->REG_PC = continue_addr; + } + } + + /* Whee! Actually deliver the signal. */ + handle_signal(signr, &info, &ka, regs); + return 0; + } + + /* Handlerless -ERESTART_RESTARTBLOCK re-enters via restart_syscall */ + if (unlikely(restart < 0)) + regs->REG_SYSCALL = __NR_restart_syscall; + + /* + * If there's no signal to deliver, we just put the saved sigmask back. + */ + restore_saved_sigmask(); + + return restart; +} + +int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, + int syscall) +{ + do { + if (likely(thread_flags & _TIF_NEED_RESCHED)) { + schedule(); + } else { + if (unlikely(!user_mode(regs))) + return 0; + local_irq_enable(); + if (thread_flags & _TIF_SIGPENDING) { + int restart = do_signal(regs, syscall); + if (unlikely(restart)) { + /* + * Restart without handlers. + * Deal with it without leaving + * the kernel space. + */ + return restart; + } + syscall = -1; + } else { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + } + } + local_irq_disable(); + thread_flags = current_thread_info()->flags; + } while (thread_flags & _TIF_WORK_MASK); + return 0; +} -- cgit v1.2.3 From 29dd78cf0b526d24063364a8c634b3e92514c1a2 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 5 Oct 2012 16:02:12 +0100 Subject: metag: Device tree Add device tree files to arch/metag. Signed-off-by: James Hogan Reviewed-by: Vineet Gupta --- arch/metag/boot/dts/Makefile | 16 +++++++ arch/metag/boot/dts/skeleton.dts | 10 ++++ arch/metag/boot/dts/skeleton.dtsi | 14 ++++++ arch/metag/include/asm/prom.h | 23 ++++++++++ arch/metag/kernel/devtree.c | 97 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 160 insertions(+) create mode 100644 arch/metag/boot/dts/Makefile create mode 100644 arch/metag/boot/dts/skeleton.dts create mode 100644 arch/metag/boot/dts/skeleton.dtsi create mode 100644 arch/metag/include/asm/prom.h create mode 100644 arch/metag/kernel/devtree.c (limited to 'arch/metag/kernel') diff --git a/arch/metag/boot/dts/Makefile b/arch/metag/boot/dts/Makefile new file mode 100644 index 000000000000..e0b5afd8bde8 --- /dev/null +++ b/arch/metag/boot/dts/Makefile @@ -0,0 +1,16 @@ +dtb-y += skeleton.dtb + +# Built-in dtb +builtindtb-y := skeleton + +ifneq ($(CONFIG_METAG_BUILTIN_DTB_NAME),"") + builtindtb-y := $(CONFIG_METAG_BUILTIN_DTB_NAME) +endif +obj-$(CONFIG_METAG_BUILTIN_DTB) += $(patsubst "%",%,$(builtindtb-y)).dtb.o + +targets += dtbs +targets += $(dtb-y) + +dtbs: $(addprefix $(obj)/, $(dtb-y)) + +clean-files += *.dtb diff --git a/arch/metag/boot/dts/skeleton.dts b/arch/metag/boot/dts/skeleton.dts new file mode 100644 index 000000000000..7244d1f0d555 --- /dev/null +++ b/arch/metag/boot/dts/skeleton.dts @@ -0,0 +1,10 @@ +/* + * Copyright (C) 2012 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +/dts-v1/; + +/include/ "skeleton.dtsi" diff --git a/arch/metag/boot/dts/skeleton.dtsi b/arch/metag/boot/dts/skeleton.dtsi new file mode 100644 index 000000000000..78229eacced7 --- /dev/null +++ b/arch/metag/boot/dts/skeleton.dtsi @@ -0,0 +1,14 @@ +/* + * Skeleton device tree; the bare minimum needed to boot; just include and + * add a compatible value. The bootloader will typically populate the memory + * node. + */ + +/ { + compatible = "img,meta"; + #address-cells = <1>; + #size-cells = <1>; + chosen { }; + aliases { }; + memory { device_type = "memory"; reg = <0 0>; }; +}; diff --git a/arch/metag/include/asm/prom.h b/arch/metag/include/asm/prom.h new file mode 100644 index 000000000000..d881396c392c --- /dev/null +++ b/arch/metag/include/asm/prom.h @@ -0,0 +1,23 @@ +/* + * arch/metag/include/asm/prom.h + * + * Copyright (C) 2012 Imagination Technologies Ltd. + * + * Based on ARM version: + * Copyright (C) 2009 Canonical Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef __ASM_METAG_PROM_H +#define __ASM_METAG_PROM_H + +#include +#define HAVE_ARCH_DEVTREE_FIXUPS + +extern struct machine_desc *setup_machine_fdt(void *dt); +extern void metag_dt_memblock_reserve(void); + +#endif /* __ASM_METAG_PROM_H */ diff --git a/arch/metag/kernel/devtree.c b/arch/metag/kernel/devtree.c new file mode 100644 index 000000000000..5b6b1d855492 --- /dev/null +++ b/arch/metag/kernel/devtree.c @@ -0,0 +1,97 @@ +/* + * linux/arch/metag/kernel/devtree.c + * + * Copyright (C) 2012 Imagination Technologies Ltd. + * + * Based on ARM version: + * Copyright (C) 2009 Canonical Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +void __init early_init_dt_add_memory_arch(u64 base, u64 size) +{ + pr_err("%s(%llx, %llx)\n", + __func__, base, size); +} + +void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) +{ + return alloc_bootmem_align(size, align); +} + +/** + * setup_machine_fdt - Machine setup when an dtb was passed to the kernel + * @dt: virtual address pointer to dt blob + * + * If a dtb was passed to the kernel, then use it to choose the correct + * machine_desc and to setup the system. + */ +struct machine_desc * __init setup_machine_fdt(void *dt) +{ + struct boot_param_header *devtree = dt; + struct machine_desc *mdesc, *mdesc_best = NULL; + unsigned int score, mdesc_score = ~1; + unsigned long dt_root; + const char *model; + + /* check device tree validity */ + if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) + return NULL; + + /* Search the mdescs for the 'best' compatible value match */ + initial_boot_params = devtree; + dt_root = of_get_flat_dt_root(); + + for_each_machine_desc(mdesc) { + score = of_flat_dt_match(dt_root, mdesc->dt_compat); + if (score > 0 && score < mdesc_score) { + mdesc_best = mdesc; + mdesc_score = score; + } + } + if (!mdesc_best) { + const char *prop; + long size; + + pr_err("\nError: unrecognized/unsupported device tree compatible list:\n[ "); + + prop = of_get_flat_dt_prop(dt_root, "compatible", &size); + if (prop) { + while (size > 0) { + printk("'%s' ", prop); + size -= strlen(prop) + 1; + prop += strlen(prop) + 1; + } + } + printk("]\n\n"); + + dump_machine_table(); /* does not return */ + } + + model = of_get_flat_dt_prop(dt_root, "model", NULL); + if (!model) + model = of_get_flat_dt_prop(dt_root, "compatible", NULL); + if (!model) + model = ""; + pr_info("Machine: %s, model: %s\n", mdesc_best->name, model); + + /* Retrieve various information from the /chosen node */ + of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); + + return mdesc_best; +} -- cgit v1.2.3 From bc3966bf1583a6c22b76397535174445c43952de Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 9 Oct 2012 10:54:36 +0100 Subject: metag: ptrace The ptrace interface for metag provides access to some core register sets using the PTRACE_GETREGSET and PTRACE_SETREGSET operations. The details of the internal context structures is abstracted into user API structures to both ease use and allow flexibility to change the internal context layouts. Copyin and copyout functions for these register sets are exposed to allow signal handling code to use them to copy to and from the signal context. struct user_gp_regs (NT_PRSTATUS) provides access to the core general purpose register context. struct user_cb_regs (NT_METAG_CBUF) provides access to the TXCATCH* registers which contains information abuot a memory fault, unaligned access error or watchpoint. This can be modified to alter the way the fault is replayed on resume ("catch replay"), or to prevent the replay taking place. struct user_rp_state (NT_METAG_RPIPE) provides access to the state of the Meta read pipeline which can be used to hide memory latencies in hand optimised data loops. Extended DSP register state, DSP RAM, and hardware breakpoint registers aren't yet exposed through ptrace. Signed-off-by: James Hogan Cc: Andrew Morton Cc: Denys Vlasenko Cc: Arnd Bergmann Cc: Tony Lindgren Cc: "Paul E. McKenney" --- arch/metag/include/asm/ptrace.h | 60 ++++++ arch/metag/include/uapi/asm/ptrace.h | 113 +++++++++++ arch/metag/kernel/ptrace.c | 380 +++++++++++++++++++++++++++++++++++ include/uapi/linux/elf.h | 2 + 4 files changed, 555 insertions(+) create mode 100644 arch/metag/include/asm/ptrace.h create mode 100644 arch/metag/include/uapi/asm/ptrace.h create mode 100644 arch/metag/kernel/ptrace.c (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/ptrace.h b/arch/metag/include/asm/ptrace.h new file mode 100644 index 000000000000..fcabc18daf25 --- /dev/null +++ b/arch/metag/include/asm/ptrace.h @@ -0,0 +1,60 @@ +#ifndef _METAG_PTRACE_H +#define _METAG_PTRACE_H + +#include +#include +#include + +#ifndef __ASSEMBLY__ + +/* this struct defines the way the registers are stored on the + stack during a system call. */ + +struct pt_regs { + TBICTX ctx; + TBICTXEXTCB0 extcb0[5]; +}; + +#define user_mode(regs) (((regs)->ctx.SaveMask & TBICTX_PRIV_BIT) > 0) + +#define instruction_pointer(regs) ((unsigned long)(regs)->ctx.CurrPC) +#define profile_pc(regs) instruction_pointer(regs) + +#define task_pt_regs(task) \ + ((struct pt_regs *)(task_stack_page(task) + \ + sizeof(struct thread_info))) + +#define current_pt_regs() \ + ((struct pt_regs *)((char *)current_thread_info() + \ + sizeof(struct thread_info))) + +int syscall_trace_enter(struct pt_regs *regs); +void syscall_trace_leave(struct pt_regs *regs); + +/* copy a struct user_gp_regs out to user */ +int metag_gp_regs_copyout(const struct pt_regs *regs, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf); +/* copy a struct user_gp_regs in from user */ +int metag_gp_regs_copyin(struct pt_regs *regs, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf); +/* copy a struct user_cb_regs out to user */ +int metag_cb_regs_copyout(const struct pt_regs *regs, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf); +/* copy a struct user_cb_regs in from user */ +int metag_cb_regs_copyin(struct pt_regs *regs, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf); +/* copy a struct user_rp_state out to user */ +int metag_rp_state_copyout(const struct pt_regs *regs, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf); +/* copy a struct user_rp_state in from user */ +int metag_rp_state_copyin(struct pt_regs *regs, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf); + +#endif /* __ASSEMBLY__ */ +#endif /* _METAG_PTRACE_H */ diff --git a/arch/metag/include/uapi/asm/ptrace.h b/arch/metag/include/uapi/asm/ptrace.h new file mode 100644 index 000000000000..45d97809d33e --- /dev/null +++ b/arch/metag/include/uapi/asm/ptrace.h @@ -0,0 +1,113 @@ +#ifndef _UAPI_METAG_PTRACE_H +#define _UAPI_METAG_PTRACE_H + +#ifndef __ASSEMBLY__ + +/* + * These are the layouts of the regsets returned by the GETREGSET ptrace call + */ + +/* user_gp_regs::status */ + +/* CBMarker bit (indicates catch state / catch replay) */ +#define USER_GP_REGS_STATUS_CATCH_BIT (1 << 22) +#define USER_GP_REGS_STATUS_CATCH_S 22 +/* LSM_STEP field (load/store multiple step) */ +#define USER_GP_REGS_STATUS_LSM_STEP_BITS (0x7 << 8) +#define USER_GP_REGS_STATUS_LSM_STEP_S 8 +/* SCC bit (indicates split 16x16 condition flags) */ +#define USER_GP_REGS_STATUS_SCC_BIT (1 << 4) +#define USER_GP_REGS_STATUS_SCC_S 4 + +/* normal condition flags */ +/* CF_Z bit (Zero flag) */ +#define USER_GP_REGS_STATUS_CF_Z_BIT (1 << 3) +#define USER_GP_REGS_STATUS_CF_Z_S 3 +/* CF_N bit (Negative flag) */ +#define USER_GP_REGS_STATUS_CF_N_BIT (1 << 2) +#define USER_GP_REGS_STATUS_CF_N_S 2 +/* CF_V bit (oVerflow flag) */ +#define USER_GP_REGS_STATUS_CF_V_BIT (1 << 1) +#define USER_GP_REGS_STATUS_CF_V_S 1 +/* CF_C bit (Carry flag) */ +#define USER_GP_REGS_STATUS_CF_C_BIT (1 << 0) +#define USER_GP_REGS_STATUS_CF_C_S 0 + +/* split 16x16 condition flags */ +/* SCF_LZ bit (Low Zero flag) */ +#define USER_GP_REGS_STATUS_SCF_LZ_BIT (1 << 3) +#define USER_GP_REGS_STATUS_SCF_LZ_S 3 +/* SCF_HZ bit (High Zero flag) */ +#define USER_GP_REGS_STATUS_SCF_HZ_BIT (1 << 2) +#define USER_GP_REGS_STATUS_SCF_HZ_S 2 +/* SCF_HC bit (High Carry flag) */ +#define USER_GP_REGS_STATUS_SCF_HC_BIT (1 << 1) +#define USER_GP_REGS_STATUS_SCF_HC_S 1 +/* SCF_LC bit (Low Carry flag) */ +#define USER_GP_REGS_STATUS_SCF_LC_BIT (1 << 0) +#define USER_GP_REGS_STATUS_SCF_LC_S 0 + +/** + * struct user_gp_regs - User general purpose registers + * @dx: GP data unit regs (dx[reg][unit] = D{unit:0-1}.{reg:0-7}) + * @ax: GP address unit regs (ax[reg][unit] = A{unit:0-1}.{reg:0-3}) + * @pc: PC register + * @status: TXSTATUS register (condition flags, LSM_STEP etc) + * @rpt: TXRPT registers (branch repeat counter) + * @bpobits: TXBPOBITS register ("branch prediction other" bits) + * @mode: TXMODE register + * @_pad1: Reserved padding to make sizeof obviously 64bit aligned + * + * This is the user-visible general purpose register state structure. + * + * It can be accessed through PTRACE_GETREGSET with NT_PRSTATUS. + * + * It is also used in the signal context. + */ +struct user_gp_regs { + unsigned long dx[8][2]; + unsigned long ax[4][2]; + unsigned long pc; + unsigned long status; + unsigned long rpt; + unsigned long bpobits; + unsigned long mode; + unsigned long _pad1; +}; + +/** + * struct user_cb_regs - User catch buffer registers + * @flags: TXCATCH0 register (fault flags) + * @addr: TXCATCH1 register (fault address) + * @data: TXCATCH2 and TXCATCH3 registers (low and high data word) + * + * This is the user-visible catch buffer register state structure containing + * information about a failed memory access, and allowing the access to be + * modified and replayed. + * + * It can be accessed through PTRACE_GETREGSET with NT_METAG_CBUF. + */ +struct user_cb_regs { + unsigned long flags; + unsigned long addr; + unsigned long long data; +}; + +/** + * struct user_rp_state - User read pipeline state + * @entries: Read pipeline entries + * @mask: Mask of valid pipeline entries (RPMask from TXDIVTIME register) + * + * This is the user-visible read pipeline state structure containing the entries + * currently in the read pipeline and the mask of valid entries. + * + * It can be accessed through PTRACE_GETREGSET with NT_METAG_RPIPE. + */ +struct user_rp_state { + unsigned long long entries[6]; + unsigned long mask; +}; + +#endif /* __ASSEMBLY__ */ + +#endif /* _UAPI_METAG_PTRACE_H */ diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c new file mode 100644 index 000000000000..47a8828615a5 --- /dev/null +++ b/arch/metag/kernel/ptrace.c @@ -0,0 +1,380 @@ +/* + * Copyright (C) 2005-2012 Imagination Technologies Ltd. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of + * this archive for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +/* + * user_regset definitions. + */ + +int metag_gp_regs_copyout(const struct pt_regs *regs, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + const void *ptr; + unsigned long data; + int ret; + + /* D{0-1}.{0-7} */ + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + regs->ctx.DX, 0, 4*16); + if (ret) + goto out; + /* A{0-1}.{0-1} */ + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + regs->ctx.AX, 4*16, 4*20); + if (ret) + goto out; + /* A{0-1}.2 */ + if (regs->ctx.SaveMask & TBICTX_XEXT_BIT) + ptr = regs->ctx.Ext.Ctx.pExt; + else + ptr = ®s->ctx.Ext.AX2; + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + ptr, 4*20, 4*22); + if (ret) + goto out; + /* A{0-1}.3 */ + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + ®s->ctx.AX3, 4*22, 4*24); + if (ret) + goto out; + /* PC */ + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + ®s->ctx.CurrPC, 4*24, 4*25); + if (ret) + goto out; + /* TXSTATUS */ + data = (unsigned long)regs->ctx.Flags; + if (regs->ctx.SaveMask & TBICTX_CBUF_BIT) + data |= USER_GP_REGS_STATUS_CATCH_BIT; + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &data, 4*25, 4*26); + if (ret) + goto out; + /* TXRPT, TXBPOBITS, TXMODE */ + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + ®s->ctx.CurrRPT, 4*26, 4*29); + if (ret) + goto out; + /* Padding */ + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + 4*29, 4*30); +out: + return ret; +} + +int metag_gp_regs_copyin(struct pt_regs *regs, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + void *ptr; + unsigned long data; + int ret; + + /* D{0-1}.{0-7} */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->ctx.DX, 0, 4*16); + if (ret) + goto out; + /* A{0-1}.{0-1} */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->ctx.AX, 4*16, 4*20); + if (ret) + goto out; + /* A{0-1}.2 */ + if (regs->ctx.SaveMask & TBICTX_XEXT_BIT) + ptr = regs->ctx.Ext.Ctx.pExt; + else + ptr = ®s->ctx.Ext.AX2; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ptr, 4*20, 4*22); + if (ret) + goto out; + /* A{0-1}.3 */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->ctx.AX3, 4*22, 4*24); + if (ret) + goto out; + /* PC */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->ctx.CurrPC, 4*24, 4*25); + if (ret) + goto out; + /* TXSTATUS */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &data, 4*25, 4*26); + if (ret) + goto out; + regs->ctx.Flags = data & 0xffff; + if (data & USER_GP_REGS_STATUS_CATCH_BIT) + regs->ctx.SaveMask |= TBICTX_XCBF_BIT | TBICTX_CBUF_BIT; + else + regs->ctx.SaveMask &= ~TBICTX_CBUF_BIT; + /* TXRPT, TXBPOBITS, TXMODE */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->ctx.CurrRPT, 4*26, 4*29); +out: + return ret; +} + +static int metag_gp_regs_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + const struct pt_regs *regs = task_pt_regs(target); + return metag_gp_regs_copyout(regs, pos, count, kbuf, ubuf); +} + +static int metag_gp_regs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + return metag_gp_regs_copyin(regs, pos, count, kbuf, ubuf); +} + +int metag_cb_regs_copyout(const struct pt_regs *regs, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + + /* TXCATCH{0-3} */ + if (regs->ctx.SaveMask & TBICTX_XCBF_BIT) + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + regs->extcb0, 0, 4*4); + else + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + 0, 4*4); + return ret; +} + +int metag_cb_regs_copyin(struct pt_regs *regs, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + /* TXCATCH{0-3} */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->extcb0, 0, 4*4); + return ret; +} + +static int metag_cb_regs_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + const struct pt_regs *regs = task_pt_regs(target); + return metag_cb_regs_copyout(regs, pos, count, kbuf, ubuf); +} + +static int metag_cb_regs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + return metag_cb_regs_copyin(regs, pos, count, kbuf, ubuf); +} + +int metag_rp_state_copyout(const struct pt_regs *regs, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + unsigned long mask; + u64 *ptr; + int ret, i; + + /* Empty read pipeline */ + if (!(regs->ctx.SaveMask & TBICTX_CBRP_BIT)) { + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + 0, 4*13); + goto out; + } + + mask = (regs->ctx.CurrDIVTIME & TXDIVTIME_RPMASK_BITS) >> + TXDIVTIME_RPMASK_S; + + /* Read pipeline entries */ + ptr = (void *)®s->extcb0[1]; + for (i = 0; i < 6; ++i, ++ptr) { + if (mask & (1 << i)) + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + ptr, 8*i, 8*(i + 1)); + else + ret = user_regset_copyout_zero(&pos, &count, &kbuf, + &ubuf, 8*i, 8*(i + 1)); + if (ret) + goto out; + } + /* Mask of entries */ + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &mask, 4*12, 4*13); +out: + return ret; +} + +int metag_rp_state_copyin(struct pt_regs *regs, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct user_rp_state rp; + unsigned long long *ptr; + int ret, i; + + /* Read the entire pipeline before making any changes */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &rp, 0, 4*13); + if (ret) + goto out; + + /* Write pipeline entries */ + ptr = (void *)®s->extcb0[1]; + for (i = 0; i < 6; ++i, ++ptr) + if (rp.mask & (1 << i)) + *ptr = rp.entries[i]; + + /* Update RPMask in TXDIVTIME */ + regs->ctx.CurrDIVTIME &= ~TXDIVTIME_RPMASK_BITS; + regs->ctx.CurrDIVTIME |= (rp.mask << TXDIVTIME_RPMASK_S) + & TXDIVTIME_RPMASK_BITS; + + /* Set/clear flags to indicate catch/read pipeline state */ + if (rp.mask) + regs->ctx.SaveMask |= TBICTX_XCBF_BIT | TBICTX_CBRP_BIT; + else + regs->ctx.SaveMask &= ~TBICTX_CBRP_BIT; +out: + return ret; +} + +static int metag_rp_state_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + const struct pt_regs *regs = task_pt_regs(target); + return metag_rp_state_copyout(regs, pos, count, kbuf, ubuf); +} + +static int metag_rp_state_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + return metag_rp_state_copyin(regs, pos, count, kbuf, ubuf); +} + +enum metag_regset { + REGSET_GENERAL, + REGSET_CBUF, + REGSET_READPIPE, +}; + +static const struct user_regset metag_regsets[] = { + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, + .n = ELF_NGREG, + .size = sizeof(long), + .align = sizeof(long long), + .get = metag_gp_regs_get, + .set = metag_gp_regs_set, + }, + [REGSET_CBUF] = { + .core_note_type = NT_METAG_CBUF, + .n = sizeof(struct user_cb_regs) / sizeof(long), + .size = sizeof(long), + .align = sizeof(long long), + .get = metag_cb_regs_get, + .set = metag_cb_regs_set, + }, + [REGSET_READPIPE] = { + .core_note_type = NT_METAG_RPIPE, + .n = sizeof(struct user_rp_state) / sizeof(long), + .size = sizeof(long), + .align = sizeof(long long), + .get = metag_rp_state_get, + .set = metag_rp_state_set, + }, +}; + +static const struct user_regset_view user_metag_view = { + .name = "metag", + .e_machine = EM_METAG, + .regsets = metag_regsets, + .n = ARRAY_SIZE(metag_regsets) +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_metag_view; +} + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure single step bits etc are not set. + */ +void ptrace_disable(struct task_struct *child) +{ + /* nothing to do.. */ +} + +long arch_ptrace(struct task_struct *child, long request, unsigned long addr, + unsigned long data) +{ + int ret; + + switch (request) { + default: + ret = ptrace_request(child, request, addr, data); + break; + } + + return ret; +} + +int syscall_trace_enter(struct pt_regs *regs) +{ + int ret = 0; + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + ret = tracehook_report_syscall_entry(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->ctx.DX[0].U1); + + return ret ? -1 : regs->ctx.DX[0].U1; +} + +void syscall_trace_leave(struct pt_regs *regs) +{ + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs->ctx.DX[0].U1); + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, 0); +} diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 126a8175e3e2..eb164a298b98 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -395,6 +395,8 @@ typedef struct elf64_shdr { #define NT_ARM_TLS 0x401 /* ARM TLS register */ #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ #define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ +#define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */ +#define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */ /* Note header in a PT_NOTE section */ -- cgit v1.2.3 From a2c5d4ed92bbc02ff4a37efc2adffe7d145abe4f Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 9 Oct 2012 10:54:39 +0100 Subject: metag: Time keeping Add time keeping code for metag. Meta hardware threads have 2 timers. The background timer (TXTIMER) is used as a free-running time base, and the interrupt timer (TXTIMERI) is used for the timer interrupt. Both counters traditionally count at approximately 1MHz. Signed-off-by: James Hogan Cc: John Stultz Cc: Thomas Gleixner --- MAINTAINERS | 1 + arch/metag/include/asm/clock.h | 51 ++++++++++ arch/metag/include/asm/delay.h | 29 ++++++ arch/metag/include/asm/mach/arch.h | 4 + arch/metag/kernel/clock.c | 53 ++++++++++ arch/metag/kernel/time.c | 15 +++ drivers/clocksource/Kconfig | 5 + drivers/clocksource/Makefile | 1 + drivers/clocksource/metag_generic.c | 198 ++++++++++++++++++++++++++++++++++++ include/clocksource/metag_generic.h | 21 ++++ 10 files changed, 378 insertions(+) create mode 100644 arch/metag/include/asm/clock.h create mode 100644 arch/metag/include/asm/delay.h create mode 100644 arch/metag/kernel/clock.c create mode 100644 arch/metag/kernel/time.c create mode 100644 drivers/clocksource/metag_generic.c create mode 100644 include/clocksource/metag_generic.h (limited to 'arch/metag/kernel') diff --git a/MAINTAINERS b/MAINTAINERS index 401145d7c5bb..749d76699ead 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5039,6 +5039,7 @@ S: Supported F: arch/metag/ F: Documentation/metag/ F: Documentation/devicetree/bindings/metag/ +F: drivers/clocksource/metag_generic.c MICROBLAZE ARCHITECTURE M: Michal Simek diff --git a/arch/metag/include/asm/clock.h b/arch/metag/include/asm/clock.h new file mode 100644 index 000000000000..3e2915a280c7 --- /dev/null +++ b/arch/metag/include/asm/clock.h @@ -0,0 +1,51 @@ +/* + * arch/metag/include/asm/clock.h + * + * Copyright (C) 2012 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _METAG_CLOCK_H_ +#define _METAG_CLOCK_H_ + +#include + +/** + * struct meta_clock_desc - Meta Core clock callbacks. + * @get_core_freq: Get the frequency of the Meta core. If this is NULL, the + * core frequency will be determined like this: + * Meta 1: based on loops_per_jiffy. + * Meta 2: (EXPAND_TIMER_DIV + 1) MHz. + */ +struct meta_clock_desc { + unsigned long (*get_core_freq)(void); +}; + +extern struct meta_clock_desc _meta_clock; + +/* + * Set up the default clock, ensuring all callbacks are valid - only accessible + * during boot. + */ +void setup_meta_clocks(struct meta_clock_desc *desc); + +/** + * get_coreclock() - Get the frequency of the Meta core clock. + * + * Returns: The Meta core clock frequency in Hz. + */ +static inline unsigned long get_coreclock(void) +{ + /* + * Use the current clock callback. If set correctly this will provide + * the most accurate frequency as it can be calculated directly from the + * PLL configuration. otherwise a default callback will have been set + * instead. + */ + return _meta_clock.get_core_freq(); +} + +#endif /* _METAG_CLOCK_H_ */ diff --git a/arch/metag/include/asm/delay.h b/arch/metag/include/asm/delay.h new file mode 100644 index 000000000000..9c92f996957a --- /dev/null +++ b/arch/metag/include/asm/delay.h @@ -0,0 +1,29 @@ +#ifndef _METAG_DELAY_H +#define _METAG_DELAY_H + +/* + * Copyright (C) 1993 Linus Torvalds + * + * Delay routines calling functions in arch/metag/lib/delay.c + */ + +/* Undefined functions to get compile-time errors */ +extern void __bad_udelay(void); +extern void __bad_ndelay(void); + +extern void __udelay(unsigned long usecs); +extern void __ndelay(unsigned long nsecs); +extern void __const_udelay(unsigned long xloops); +extern void __delay(unsigned long loops); + +/* 0x10c7 is 2**32 / 1000000 (rounded up) */ +#define udelay(n) (__builtin_constant_p(n) ? \ + ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ + __udelay(n)) + +/* 0x5 is 2**32 / 1000000000 (rounded up) */ +#define ndelay(n) (__builtin_constant_p(n) ? \ + ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ + __ndelay(n)) + +#endif /* _METAG_DELAY_H */ diff --git a/arch/metag/include/asm/mach/arch.h b/arch/metag/include/asm/mach/arch.h index 6845d80857e4..12c5664fea6e 100644 --- a/arch/metag/include/asm/mach/arch.h +++ b/arch/metag/include/asm/mach/arch.h @@ -16,10 +16,13 @@ #include +#include + /** * struct machine_desc - Describes a board controlled by a Meta. * @name: Board/SoC name. * @dt_compat: Array of device tree 'compatible' strings. + * @clocks: Clock callbacks. * * @nr_irqs: Maximum number of IRQs. * If 0, defaults to NR_IRQS in asm-generic/irq.h. @@ -37,6 +40,7 @@ struct machine_desc { const char *name; const char **dt_compat; + struct meta_clock_desc *clocks; unsigned int nr_irqs; diff --git a/arch/metag/kernel/clock.c b/arch/metag/kernel/clock.c new file mode 100644 index 000000000000..defc84056f18 --- /dev/null +++ b/arch/metag/kernel/clock.c @@ -0,0 +1,53 @@ +/* + * arch/metag/kernel/clock.c + * + * Copyright (C) 2012 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +#include +#include + +struct meta_clock_desc _meta_clock; + +/* Default machine get_core_freq callback. */ +static unsigned long get_core_freq_default(void) +{ +#ifdef CONFIG_METAG_META21 + /* + * Meta 2 cores divide down the core clock for the Meta timers, so we + * can estimate the core clock from the divider. + */ + return (metag_in32(EXPAND_TIMER_DIV) + 1) * 1000000; +#else + /* + * On Meta 1 we don't know the core clock, but assuming the Meta timer + * is correct it can be estimated based on loops_per_jiffy. + */ + return (loops_per_jiffy * HZ * 5) >> 1; +#endif +} + +/** + * setup_meta_clocks() - Set up the Meta clock. + * @desc: Clock descriptor usually provided by machine description + * + * Ensures all callbacks are valid. + */ +void __init setup_meta_clocks(struct meta_clock_desc *desc) +{ + /* copy callbacks */ + if (desc) + _meta_clock = *desc; + + /* set fallback functions */ + if (!_meta_clock.get_core_freq) + _meta_clock.get_core_freq = get_core_freq_default; +} + diff --git a/arch/metag/kernel/time.c b/arch/metag/kernel/time.c new file mode 100644 index 000000000000..17dc10733b2f --- /dev/null +++ b/arch/metag/kernel/time.c @@ -0,0 +1,15 @@ +/* + * Copyright (C) 2005-2013 Imagination Technologies Ltd. + * + * This file contains the Meta-specific time handling details. + * + */ + +#include + +#include + +void __init time_init(void) +{ + metag_generic_timer_init(); +} diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 7fdcbd3f4da5..75bc7520ace5 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -58,3 +58,8 @@ config CLKSRC_ARM_GENERIC def_bool y if ARM64 help This option enables support for the ARM generic timer. + +config CLKSRC_METAG_GENERIC + def_bool y if METAG + help + This option enables support for the Meta per-thread timers. diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index f93453d01673..09dcd49b7e31 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -18,3 +18,4 @@ obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o obj-$(CONFIG_CLKSRC_ARM_GENERIC) += arm_generic.o +obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c new file mode 100644 index 000000000000..ade7513a11d1 --- /dev/null +++ b/drivers/clocksource/metag_generic.c @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2005-2013 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + * Support for Meta per-thread timers. + * + * Meta hardware threads have 2 timers. The background timer (TXTIMER) is used + * as a free-running time base (hz clocksource), and the interrupt timer + * (TXTIMERI) is used for the timer interrupt (clock event). Both counters + * traditionally count at approximately 1MHz. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define HARDWARE_FREQ 1000000 /* 1MHz */ +#define HARDWARE_DIV 1 /* divide by 1 = 1MHz clock */ +#define HARDWARE_TO_NS_SHIFT 10 /* convert ticks to ns */ + +static unsigned int hwtimer_freq = HARDWARE_FREQ; +static DEFINE_PER_CPU(struct clock_event_device, local_clockevent); +static DEFINE_PER_CPU(char [11], local_clockevent_name); + +static int metag_timer_set_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + __core_reg_set(TXTIMERI, -delta); + return 0; +} + +static void metag_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + switch (mode) { + case CLOCK_EVT_MODE_ONESHOT: + case CLOCK_EVT_MODE_RESUME: + break; + + case CLOCK_EVT_MODE_SHUTDOWN: + /* We should disable the IRQ here */ + break; + + case CLOCK_EVT_MODE_PERIODIC: + case CLOCK_EVT_MODE_UNUSED: + WARN_ON(1); + break; + }; +} + +static cycle_t metag_clocksource_read(struct clocksource *cs) +{ + return __core_reg_get(TXTIMER); +} + +static struct clocksource clocksource_metag = { + .name = "META", + .rating = 200, + .mask = CLOCKSOURCE_MASK(32), + .read = metag_clocksource_read, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static irqreturn_t metag_timer_interrupt(int irq, void *dummy) +{ + struct clock_event_device *evt = &__get_cpu_var(local_clockevent); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static struct irqaction metag_timer_irq = { + .name = "META core timer", + .handler = metag_timer_interrupt, + .flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU, +}; + +unsigned long long sched_clock(void) +{ + unsigned long long ticks = __core_reg_get(TXTIMER); + return ticks << HARDWARE_TO_NS_SHIFT; +} + +static void __cpuinit arch_timer_setup(unsigned int cpu) +{ + unsigned int txdivtime; + struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); + char *name = per_cpu(local_clockevent_name, cpu); + + txdivtime = __core_reg_get(TXDIVTIME); + + txdivtime &= ~TXDIVTIME_DIV_BITS; + txdivtime |= (HARDWARE_DIV & TXDIVTIME_DIV_BITS); + + __core_reg_set(TXDIVTIME, txdivtime); + + sprintf(name, "META %d", cpu); + clk->name = name; + clk->features = CLOCK_EVT_FEAT_ONESHOT, + + clk->rating = 200, + clk->shift = 12, + clk->irq = tbisig_map(TBID_SIGNUM_TRT), + clk->set_mode = metag_timer_set_mode, + clk->set_next_event = metag_timer_set_next_event, + + clk->mult = div_sc(hwtimer_freq, NSEC_PER_SEC, clk->shift); + clk->max_delta_ns = clockevent_delta2ns(0x7fffffff, clk); + clk->min_delta_ns = clockevent_delta2ns(0xf, clk); + clk->cpumask = cpumask_of(cpu); + + clockevents_register_device(clk); + + /* + * For all non-boot CPUs we need to synchronize our free + * running clock (TXTIMER) with the boot CPU's clock. + * + * While this won't be accurate, it should be close enough. + */ + if (cpu) { + unsigned int thread0 = cpu_2_hwthread_id[0]; + unsigned long val; + + val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0); + __core_reg_set(TXTIMER, val); + } +} + +static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + int cpu = (long)hcpu; + + switch (action) { + case CPU_STARTING: + case CPU_STARTING_FROZEN: + arch_timer_setup(cpu); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata arch_timer_cpu_nb = { + .notifier_call = arch_timer_cpu_notify, +}; + +int __init metag_generic_timer_init(void) +{ + /* + * On Meta 2 SoCs, the actual frequency of the timer is based on the + * Meta core clock speed divided by an integer, so it is only + * approximately 1MHz. Calculating the real frequency here drastically + * reduces clock skew on these SoCs. + */ +#ifdef CONFIG_METAG_META21 + hwtimer_freq = get_coreclock() / (metag_in32(EXPAND_TIMER_DIV) + 1); +#endif + clocksource_register_hz(&clocksource_metag, hwtimer_freq); + + setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq); + + /* Configure timer on boot CPU */ + arch_timer_setup(smp_processor_id()); + + /* Hook cpu boot to configure other CPU's timers */ + register_cpu_notifier(&arch_timer_cpu_nb); + + return 0; +} diff --git a/include/clocksource/metag_generic.h b/include/clocksource/metag_generic.h new file mode 100644 index 000000000000..ac17e7d06cfb --- /dev/null +++ b/include/clocksource/metag_generic.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2013 Imaginaton Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __CLKSOURCE_METAG_GENERIC_H +#define __CLKSOURCE_METAG_GENERIC_H + +extern int metag_generic_timer_init(void); + +#endif /* __CLKSOURCE_METAG_GENERIC_H */ -- cgit v1.2.3 From ac919f0883e53d7785745566692c8a0620abd7ea Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 9 Oct 2012 10:54:43 +0100 Subject: metag: Traps Add trap code for metag. At the lowest level Meta traps (and return from interrupt instruction - RTI) simply swap the PC and PCX registers and optionally toggle the interrupt status bit (ISTAT). Low level TBX code in tbipcx.S handles the core context save, determine the TBX signal number based on the core trigger that fired (using the TXSTATI status register), and call TBX signal handlers (mostly in traps.c) via a vector table. Signed-off-by: James Hogan Cc: Al Viro --- arch/metag/include/asm/switch.h | 21 + arch/metag/include/asm/traps.h | 48 ++ arch/metag/kernel/kick.c | 98 ++++ arch/metag/kernel/tbiunexp.S | 22 + arch/metag/kernel/traps.c | 978 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 1167 insertions(+) create mode 100644 arch/metag/include/asm/switch.h create mode 100644 arch/metag/include/asm/traps.h create mode 100644 arch/metag/kernel/kick.c create mode 100644 arch/metag/kernel/tbiunexp.S create mode 100644 arch/metag/kernel/traps.c (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/switch.h b/arch/metag/include/asm/switch.h new file mode 100644 index 000000000000..1fd6a587c844 --- /dev/null +++ b/arch/metag/include/asm/switch.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2012 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ASM_METAG_SWITCH_H +#define _ASM_METAG_SWITCH_H + +/* metag SWITCH codes */ +#define __METAG_SW_PERM_BREAK 0x400002 /* compiled in breakpoint */ +#define __METAG_SW_SYS_LEGACY 0x440000 /* legacy system calls */ +#define __METAG_SW_SYS 0x440001 /* system calls */ + +/* metag SWITCH instruction encoding */ +#define __METAG_SW_ENCODING(TYPE) (0xaf000000 | (__METAG_SW_##TYPE)) + +#endif /* _ASM_METAG_SWITCH_H */ diff --git a/arch/metag/include/asm/traps.h b/arch/metag/include/asm/traps.h new file mode 100644 index 000000000000..ac808740bd84 --- /dev/null +++ b/arch/metag/include/asm/traps.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2005,2008 Imagination Technologies + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef _METAG_TBIVECTORS_H +#define _METAG_TBIVECTORS_H + +#ifndef __ASSEMBLY__ + +#include + +typedef TBIRES (*kick_irq_func_t)(TBIRES, int, int, int, PTBI, int *); + +extern TBIRES kick_handler(TBIRES, int, int, int, PTBI); +struct kick_irq_handler { + struct list_head list; + kick_irq_func_t func; +}; + +extern void kick_register_func(struct kick_irq_handler *); +extern void kick_unregister_func(struct kick_irq_handler *); + +extern void head_end(TBIRES, unsigned long); +extern void restart_critical_section(TBIRES State); +extern TBIRES tail_end_sys(TBIRES, int, int *); +static inline TBIRES tail_end(TBIRES state) +{ + return tail_end_sys(state, -1, NULL); +} + +DECLARE_PER_CPU(PTBI, pTBI); +extern PTBI pTBI_get(unsigned int); + +extern int ret_from_fork(TBIRES arg); + +extern int do_page_fault(struct pt_regs *regs, unsigned long address, + unsigned int write_access, unsigned int trapno); + +extern TBIRES __TBIUnExpXXX(TBIRES State, int SigNum, int Triggers, int Inst, + PTBI pTBI); + +#endif + +#endif /* _METAG_TBIVECTORS_H */ diff --git a/arch/metag/kernel/kick.c b/arch/metag/kernel/kick.c new file mode 100644 index 000000000000..c3090962c45b --- /dev/null +++ b/arch/metag/kernel/kick.c @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2009 Imagination Technologies + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * The Meta KICK interrupt mechanism is generally a useful feature, so + * we provide an interface for registering multiple interrupt + * handlers. All the registered interrupt handlers are "chained". When + * a KICK interrupt is received the first function in the list is + * called. If that interrupt handler cannot handle the KICK the next + * one is called, then the next until someone handles it (or we run + * out of functions). As soon as one function handles the interrupt no + * other handlers are called. + * + * The only downside of chaining interrupt handlers is that each + * handler must be able to detect whether the KICK was intended for it + * or not. For example, when the IPI handler runs and it sees that + * there are no IPI messages it must not signal that the KICK was + * handled, thereby giving the other handlers a chance to run. + * + * The reason that we provide our own interface for calling KICK + * handlers instead of using the generic kernel infrastructure is that + * the KICK handlers require access to a CPU's pTBI structure. So we + * pass it as an argument. + */ +#include +#include +#include + +#include + +/* + * All accesses/manipulations of kick_handlers_list should be + * performed while holding kick_handlers_lock. + */ +static DEFINE_SPINLOCK(kick_handlers_lock); +static LIST_HEAD(kick_handlers_list); + +void kick_register_func(struct kick_irq_handler *kh) +{ + unsigned long flags; + + spin_lock_irqsave(&kick_handlers_lock, flags); + + list_add_tail(&kh->list, &kick_handlers_list); + + spin_unlock_irqrestore(&kick_handlers_lock, flags); +} + +void kick_unregister_func(struct kick_irq_handler *kh) +{ + unsigned long flags; + + spin_lock_irqsave(&kick_handlers_lock, flags); + + list_del(&kh->list); + + spin_unlock_irqrestore(&kick_handlers_lock, flags); +} + +TBIRES +kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI) +{ + struct kick_irq_handler *kh; + struct list_head *lh; + int handled = 0; + TBIRES ret; + + head_end(State, ~INTS_OFF_MASK); + + /* If we interrupted user code handle any critical sections. */ + if (State.Sig.SaveMask & TBICTX_PRIV_BIT) + restart_critical_section(State); + + trace_hardirqs_off(); + + /* + * There is no need to disable interrupts here because we + * can't nest KICK interrupts in a KICK interrupt handler. + */ + spin_lock(&kick_handlers_lock); + + list_for_each(lh, &kick_handlers_list) { + kh = list_entry(lh, struct kick_irq_handler, list); + + ret = kh->func(State, SigNum, Triggers, Inst, pTBI, &handled); + if (handled) + break; + } + + spin_unlock(&kick_handlers_lock); + + WARN_ON(!handled); + + return tail_end(ret); +} diff --git a/arch/metag/kernel/tbiunexp.S b/arch/metag/kernel/tbiunexp.S new file mode 100644 index 000000000000..907bbe0b2e68 --- /dev/null +++ b/arch/metag/kernel/tbiunexp.S @@ -0,0 +1,22 @@ +/* Pass a breakpoint through to Codescape */ + +#include + + .text + .global ___TBIUnExpXXX + .type ___TBIUnExpXXX,function +___TBIUnExpXXX: + TSTT D0Ar2,#TBICTX_CRIT_BIT ! Result of nestable int call? + BZ $LTBINormCase ! UnExpXXX at background level + MOV D0Re0,TXMASKI ! Read TXMASKI + XOR TXMASKI,D1Re0,D1Re0 ! Turn off BGNDHALT handling! + OR D0Ar2,D0Ar2,D0Re0 ! Preserve bits cleared +$LTBINormCase: + MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2 ! Save args on stack + SETL [A0StP++],D0Ar2,D1Ar1 ! Init area for returned values + SWITCH #0xC20208 ! Total stack frame size 8 Dwords + ! write back size 2 Dwords + GETL D0Re0,D1Re0,[--A0StP] ! Get result + SUB A0StP,A0StP,#(8*3) ! Recover stack frame + MOV PC,D1RtP + .size ___TBIUnExpXXX,.-___TBIUnExpXXX diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c new file mode 100644 index 000000000000..1ad363ce1ee9 --- /dev/null +++ b/arch/metag/kernel/traps.c @@ -0,0 +1,978 @@ +/* + * Meta exception handling. + * + * Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Passing syscall arguments as long long is quicker. */ +typedef unsigned int (*LPSYSCALL) (unsigned long long, + unsigned long long, + unsigned long long); + +/* + * Users of LNKSET should compare the bus error bits obtained from DEFR + * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between + * different cores revisions. + */ +#define TXDEFR_LNKSET_SUCCESS 0x02000000 +#define TXDEFR_LNKSET_FAILURE 0x04000000 + +/* + * Our global TBI handle. Initialised from setup.c/setup_arch. + */ +DECLARE_PER_CPU(PTBI, pTBI); + +#ifdef CONFIG_SMP +static DEFINE_PER_CPU(unsigned int, trigger_mask); +#else +unsigned int global_trigger_mask; +#endif + +unsigned long per_cpu__stack_save[NR_CPUS]; + +static const char * const trap_names[] = { + [TBIXXF_SIGNUM_IIF] = "Illegal instruction fault", + [TBIXXF_SIGNUM_PGF] = "Privilege violation", + [TBIXXF_SIGNUM_DHF] = "Unaligned data access fault", + [TBIXXF_SIGNUM_IGF] = "Code fetch general read failure", + [TBIXXF_SIGNUM_DGF] = "Data access general read/write fault", + [TBIXXF_SIGNUM_IPF] = "Code fetch page fault", + [TBIXXF_SIGNUM_DPF] = "Data access page fault", + [TBIXXF_SIGNUM_IHF] = "Instruction breakpoint", + [TBIXXF_SIGNUM_DWF] = "Read-only data access fault", +}; + +const char *trap_name(int trapno) +{ + if (trapno >= 0 && trapno < ARRAY_SIZE(trap_names) + && trap_names[trapno]) + return trap_names[trapno]; + return "Unknown fault"; +} + +static DEFINE_SPINLOCK(die_lock); + +void die(const char *str, struct pt_regs *regs, long err, + unsigned long addr) +{ + static int die_counter; + + oops_enter(); + + spin_lock_irq(&die_lock); + console_verbose(); + bust_spinlocks(1); + pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff, + trap_name(err & 0xffff), addr, ++die_counter); + + print_modules(); + show_regs(regs); + + pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm, + task_pid_nr(current), task_stack_page(current) + THREAD_SIZE); + + bust_spinlocks(0); + add_taint(TAINT_DIE); + if (kexec_should_crash(current)) + crash_kexec(regs); + + if (in_interrupt()) + panic("Fatal exception in interrupt"); + + if (panic_on_oops) + panic("Fatal exception"); + + spin_unlock_irq(&die_lock); + oops_exit(); + do_exit(SIGSEGV); +} + +#ifdef CONFIG_METAG_DSP +/* + * The ECH encoding specifies the size of a DSPRAM as, + * + * "slots" / 4 + * + * A "slot" is the size of two DSPRAM bank entries; an entry from + * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank + * entry is 4 bytes. + */ +#define SLOT_SZ 8 +static inline unsigned int decode_dspram_size(unsigned int size) +{ + unsigned int _sz = size & 0x7f; + + return _sz * SLOT_SZ * 4; +} + +static void dspram_save(struct meta_ext_context *dsp_ctx, + unsigned int ramA_sz, unsigned int ramB_sz) +{ + unsigned int ram_sz[2]; + int i; + + ram_sz[0] = ramA_sz; + ram_sz[1] = ramB_sz; + + for (i = 0; i < 2; i++) { + if (ram_sz[i] != 0) { + unsigned int sz; + + if (i == 0) + sz = decode_dspram_size(ram_sz[i] >> 8); + else + sz = decode_dspram_size(ram_sz[i]); + + if (dsp_ctx->ram[i] == NULL) { + dsp_ctx->ram[i] = kmalloc(sz, GFP_KERNEL); + + if (dsp_ctx->ram[i] == NULL) + panic("couldn't save DSP context"); + } else { + if (ram_sz[i] > dsp_ctx->ram_sz[i]) { + kfree(dsp_ctx->ram[i]); + + dsp_ctx->ram[i] = kmalloc(sz, + GFP_KERNEL); + + if (dsp_ctx->ram[i] == NULL) + panic("couldn't save DSP context"); + } + } + + if (i == 0) + __TBIDspramSaveA(ram_sz[i], dsp_ctx->ram[i]); + else + __TBIDspramSaveB(ram_sz[i], dsp_ctx->ram[i]); + + dsp_ctx->ram_sz[i] = ram_sz[i]; + } + } +} +#endif /* CONFIG_METAG_DSP */ + +/* + * Allow interrupts to be nested and save any "extended" register + * context state, e.g. DSP regs and RAMs. + */ +static void nest_interrupts(TBIRES State, unsigned long mask) +{ +#ifdef CONFIG_METAG_DSP + struct meta_ext_context *dsp_ctx; + unsigned int D0_8; + + /* + * D0.8 may contain an ECH encoding. The upper 16 bits + * tell us what DSP resources the current process is + * using. OR the bits into the SaveMask so that + * __TBINestInts() knows what resources to save as + * part of this context. + * + * Don't save the context if we're nesting interrupts in the + * kernel because the kernel doesn't use DSP hardware. + */ + D0_8 = __core_reg_get(D0.8); + + if (D0_8 && (State.Sig.SaveMask & TBICTX_PRIV_BIT)) { + State.Sig.SaveMask |= (D0_8 >> 16); + + dsp_ctx = current->thread.dsp_context; + if (dsp_ctx == NULL) { + dsp_ctx = kzalloc(sizeof(*dsp_ctx), GFP_KERNEL); + if (dsp_ctx == NULL) + panic("couldn't save DSP context: ENOMEM"); + + current->thread.dsp_context = dsp_ctx; + } + + current->thread.user_flags |= (D0_8 & 0xffff0000); + __TBINestInts(State, &dsp_ctx->regs, mask); + dspram_save(dsp_ctx, D0_8 & 0x7f00, D0_8 & 0x007f); + } else + __TBINestInts(State, NULL, mask); +#else + __TBINestInts(State, NULL, mask); +#endif +} + +void head_end(TBIRES State, unsigned long mask) +{ + unsigned int savemask = (unsigned short)State.Sig.SaveMask; + unsigned int ctx_savemask = (unsigned short)State.Sig.pCtx->SaveMask; + + if (savemask & TBICTX_PRIV_BIT) { + ctx_savemask |= TBICTX_PRIV_BIT; + current->thread.user_flags = savemask; + } + + /* Always undo the sleep bit */ + ctx_savemask &= ~TBICTX_WAIT_BIT; + + /* Always save the catch buffer and RD pipe if they are dirty */ + savemask |= TBICTX_XCBF_BIT; + + /* Only save the catch and RD if we have not already done so. + * Note - the RD bits are in the pCtx only, and not in the + * State.SaveMask. + */ + if ((savemask & TBICTX_CBUF_BIT) || + (ctx_savemask & TBICTX_CBRP_BIT)) { + /* Have we already saved the buffers though? + * - See TestTrack 5071 */ + if (ctx_savemask & TBICTX_XCBF_BIT) { + /* Strip off the bits so the call to __TBINestInts + * won't save the buffers again. */ + savemask &= ~TBICTX_CBUF_BIT; + ctx_savemask &= ~TBICTX_CBRP_BIT; + } + } + +#ifdef CONFIG_METAG_META21 + { + unsigned int depth, txdefr; + + /* + * Save TXDEFR state. + * + * The process may have been interrupted after a LNKSET, but + * before it could read the DEFR state, so we mustn't lose that + * state or it could end up retrying an atomic operation that + * succeeded. + * + * All interrupts are disabled at this point so we + * don't need to perform any locking. We must do this + * dance before we use LNKGET or LNKSET. + */ + BUG_ON(current->thread.int_depth > HARDIRQ_BITS); + + depth = current->thread.int_depth++; + + txdefr = __core_reg_get(TXDEFR); + + txdefr &= TXDEFR_BUS_STATE_BITS; + if (txdefr & TXDEFR_LNKSET_SUCCESS) + current->thread.txdefr_failure &= ~(1 << depth); + else + current->thread.txdefr_failure |= (1 << depth); + } +#endif + + State.Sig.SaveMask = savemask; + State.Sig.pCtx->SaveMask = ctx_savemask; + + nest_interrupts(State, mask); + +#ifdef CONFIG_METAG_POISON_CATCH_BUFFERS + /* Poison the catch registers. This shows up any mistakes we have + * made in their handling MUCH quicker. + */ + __core_reg_set(TXCATCH0, 0x87650021); + __core_reg_set(TXCATCH1, 0x87654322); + __core_reg_set(TXCATCH2, 0x87654323); + __core_reg_set(TXCATCH3, 0x87654324); +#endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */ +} + +TBIRES tail_end_sys(TBIRES State, int syscall, int *restart) +{ + struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; + unsigned long flags; + + local_irq_disable(); + + if (user_mode(regs)) { + flags = current_thread_info()->flags; + if (flags & _TIF_WORK_MASK && + do_work_pending(regs, flags, syscall)) { + *restart = 1; + return State; + } + +#ifdef CONFIG_METAG_FPU + if (current->thread.fpu_context && + current->thread.fpu_context->needs_restore) { + __TBICtxFPURestore(State, current->thread.fpu_context); + /* + * Clearing this bit ensures the FP unit is not made + * active again unless it is used. + */ + State.Sig.SaveMask &= ~TBICTX_FPAC_BIT; + current->thread.fpu_context->needs_restore = false; + } + State.Sig.TrigMask |= TBI_TRIG_BIT(TBID_SIGNUM_DFR); +#endif + } + + /* TBI will turn interrupts back on at some point. */ + if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask)) + trace_hardirqs_on(); + +#ifdef CONFIG_METAG_DSP + /* + * If we previously saved an extended context then restore it + * now. Otherwise, clear D0.8 because this process is not + * using DSP hardware. + */ + if (State.Sig.pCtx->SaveMask & TBICTX_XEXT_BIT) { + unsigned int D0_8; + struct meta_ext_context *dsp_ctx = current->thread.dsp_context; + + /* Make sure we're going to return to userland. */ + BUG_ON(current->thread.int_depth != 1); + + if (dsp_ctx->ram_sz[0] > 0) + __TBIDspramRestoreA(dsp_ctx->ram_sz[0], + dsp_ctx->ram[0]); + if (dsp_ctx->ram_sz[1] > 0) + __TBIDspramRestoreB(dsp_ctx->ram_sz[1], + dsp_ctx->ram[1]); + + State.Sig.SaveMask |= State.Sig.pCtx->SaveMask; + __TBICtxRestore(State, current->thread.dsp_context); + D0_8 = __core_reg_get(D0.8); + D0_8 |= current->thread.user_flags & 0xffff0000; + D0_8 |= (dsp_ctx->ram_sz[1] | dsp_ctx->ram_sz[0]) & 0xffff; + __core_reg_set(D0.8, D0_8); + } else + __core_reg_set(D0.8, 0); +#endif /* CONFIG_METAG_DSP */ + +#ifdef CONFIG_METAG_META21 + { + unsigned int depth, txdefr; + + /* + * If there hasn't been a LNKSET since the last LNKGET then the + * link flag will be set, causing the next LNKSET to succeed if + * the addresses match. The two LNK operations may not be a pair + * (e.g. see atomic_read()), so the LNKSET should fail. + * We use a conditional-never LNKSET to clear the link flag + * without side effects. + */ + asm volatile("LNKSETDNV [D0Re0],D0Re0"); + + depth = --current->thread.int_depth; + + BUG_ON(user_mode(regs) && depth); + + txdefr = __core_reg_get(TXDEFR); + + txdefr &= ~TXDEFR_BUS_STATE_BITS; + + /* Do we need to restore a failure code into TXDEFR? */ + if (current->thread.txdefr_failure & (1 << depth)) + txdefr |= (TXDEFR_LNKSET_FAILURE | TXDEFR_BUS_TRIG_BIT); + else + txdefr |= (TXDEFR_LNKSET_SUCCESS | TXDEFR_BUS_TRIG_BIT); + + __core_reg_set(TXDEFR, txdefr); + } +#endif + return State; +} + +#ifdef CONFIG_SMP +/* + * If we took an interrupt in the middle of __kuser_get_tls then we need + * to rewind the PC to the start of the function in case the process + * gets migrated to another thread (SMP only) and it reads the wrong tls + * data. + */ +static inline void _restart_critical_section(TBIRES State) +{ + unsigned long get_tls_start; + unsigned long get_tls_end; + + get_tls_start = (unsigned long)__kuser_get_tls - + (unsigned long)&__user_gateway_start; + + get_tls_start += USER_GATEWAY_PAGE; + + get_tls_end = (unsigned long)__kuser_get_tls_end - + (unsigned long)&__user_gateway_start; + + get_tls_end += USER_GATEWAY_PAGE; + + if ((State.Sig.pCtx->CurrPC >= get_tls_start) && + (State.Sig.pCtx->CurrPC < get_tls_end)) + State.Sig.pCtx->CurrPC = get_tls_start; +} +#else +/* + * If we took an interrupt in the middle of + * __kuser_cmpxchg then we need to rewind the PC to the + * start of the function. + */ +static inline void _restart_critical_section(TBIRES State) +{ + unsigned long cmpxchg_start; + unsigned long cmpxchg_end; + + cmpxchg_start = (unsigned long)__kuser_cmpxchg - + (unsigned long)&__user_gateway_start; + + cmpxchg_start += USER_GATEWAY_PAGE; + + cmpxchg_end = (unsigned long)__kuser_cmpxchg_end - + (unsigned long)&__user_gateway_start; + + cmpxchg_end += USER_GATEWAY_PAGE; + + if ((State.Sig.pCtx->CurrPC >= cmpxchg_start) && + (State.Sig.pCtx->CurrPC < cmpxchg_end)) + State.Sig.pCtx->CurrPC = cmpxchg_start; +} +#endif + +/* Used by kick_handler() */ +void restart_critical_section(TBIRES State) +{ + _restart_critical_section(State); +} + +TBIRES trigger_handler(TBIRES State, int SigNum, int Triggers, int Inst, + PTBI pTBI) +{ + head_end(State, ~INTS_OFF_MASK); + + /* If we interrupted user code handle any critical sections. */ + if (State.Sig.SaveMask & TBICTX_PRIV_BIT) + _restart_critical_section(State); + + trace_hardirqs_off(); + + do_IRQ(SigNum, (struct pt_regs *)State.Sig.pCtx); + + return tail_end(State); +} + +static unsigned int load_fault(PTBICTXEXTCB0 pbuf) +{ + return pbuf->CBFlags & TXCATCH0_READ_BIT; +} + +static unsigned long fault_address(PTBICTXEXTCB0 pbuf) +{ + return pbuf->CBAddr; +} + +static void unhandled_fault(struct pt_regs *regs, unsigned long addr, + int signo, int code, int trapno) +{ + if (user_mode(regs)) { + siginfo_t info; + + if (show_unhandled_signals && unhandled_signal(current, signo) + && printk_ratelimit()) { + + pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n", + current->pid, regs->ctx.CurrPC, addr, + trapno, trap_name(trapno)); + print_vma_addr(" in ", regs->ctx.CurrPC); + print_vma_addr(" rtp in ", regs->ctx.DX[4].U1); + printk("\n"); + show_regs(regs); + } + + info.si_signo = signo; + info.si_errno = 0; + info.si_code = code; + info.si_addr = (__force void __user *)addr; + info.si_trapno = trapno; + force_sig_info(signo, &info, current); + } else { + die("Oops", regs, trapno, addr); + } +} + +static int handle_data_fault(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs, + unsigned int data_address, int trapno) +{ + int ret; + + ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno); + + return ret; +} + +static unsigned long get_inst_fault_address(struct pt_regs *regs) +{ + return regs->ctx.CurrPC; +} + +TBIRES fault_handler(TBIRES State, int SigNum, int Triggers, + int Inst, PTBI pTBI) +{ + struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; + PTBICTXEXTCB0 pcbuf = (PTBICTXEXTCB0)®s->extcb0; + unsigned long data_address; + + head_end(State, ~INTS_OFF_MASK); + + /* Hardware breakpoint or data watch */ + if ((SigNum == TBIXXF_SIGNUM_IHF) || + ((SigNum == TBIXXF_SIGNUM_DHF) && + (pcbuf[0].CBFlags & (TXCATCH0_WATCH1_BIT | + TXCATCH0_WATCH0_BIT)))) { + State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, + pTBI); + return tail_end(State); + } + + local_irq_enable(); + + data_address = fault_address(pcbuf); + + switch (SigNum) { + case TBIXXF_SIGNUM_IGF: + /* 1st-level entry invalid (instruction fetch) */ + case TBIXXF_SIGNUM_IPF: { + /* 2nd-level entry invalid (instruction fetch) */ + unsigned long addr = get_inst_fault_address(regs); + do_page_fault(regs, addr, 0, SigNum); + break; + } + + case TBIXXF_SIGNUM_DGF: + /* 1st-level entry invalid (data access) */ + case TBIXXF_SIGNUM_DPF: + /* 2nd-level entry invalid (data access) */ + case TBIXXF_SIGNUM_DWF: + /* Write to read only page */ + handle_data_fault(pcbuf, regs, data_address, SigNum); + break; + + case TBIXXF_SIGNUM_IIF: + /* Illegal instruction */ + unhandled_fault(regs, regs->ctx.CurrPC, SIGILL, ILL_ILLOPC, + SigNum); + break; + + case TBIXXF_SIGNUM_DHF: + /* Unaligned access */ + unhandled_fault(regs, data_address, SIGBUS, BUS_ADRALN, + SigNum); + break; + case TBIXXF_SIGNUM_PGF: + /* Privilege violation */ + unhandled_fault(regs, data_address, SIGSEGV, SEGV_ACCERR, + SigNum); + break; + default: + BUG(); + break; + } + + return tail_end(State); +} + +static bool switch_is_syscall(unsigned int inst) +{ + return inst == __METAG_SW_ENCODING(SYS); +} + +static bool switch_is_legacy_syscall(unsigned int inst) +{ + return inst == __METAG_SW_ENCODING(SYS_LEGACY); +} + +static inline void step_over_switch(struct pt_regs *regs, unsigned int inst) +{ + regs->ctx.CurrPC += 4; +} + +static inline int test_syscall_work(void) +{ + return current_thread_info()->flags & _TIF_WORK_SYSCALL_MASK; +} + +TBIRES switch1_handler(TBIRES State, int SigNum, int Triggers, + int Inst, PTBI pTBI) +{ + struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; + unsigned int sysnumber; + unsigned long long a1_a2, a3_a4, a5_a6; + LPSYSCALL syscall_entry; + int restart; + + head_end(State, ~INTS_OFF_MASK); + + /* + * If this is not a syscall SWITCH it could be a breakpoint. + */ + if (!switch_is_syscall(Inst)) { + /* + * Alert the user if they're trying to use legacy system + * calls. This suggests they need to update their C + * library and build against up to date kernel headers. + */ + if (switch_is_legacy_syscall(Inst)) + pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n"); + /* + * We don't know how to handle the SWITCH and cannot + * safely ignore it, so treat all unknown switches + * (including breakpoints) as traps. + */ + force_sig(SIGTRAP, current); + return tail_end(State); + } + + local_irq_enable(); + +restart_syscall: + restart = 0; + sysnumber = regs->ctx.DX[0].U1; + + if (test_syscall_work()) + sysnumber = syscall_trace_enter(regs); + + /* Skip over the SWITCH instruction - or you just get 'stuck' on it! */ + step_over_switch(regs, Inst); + + if (sysnumber >= __NR_syscalls) { + pr_debug("unknown syscall number: %d\n", sysnumber); + syscall_entry = (LPSYSCALL) sys_ni_syscall; + } else { + syscall_entry = (LPSYSCALL) sys_call_table[sysnumber]; + } + + /* Use 64bit loads for speed. */ + a5_a6 = *(unsigned long long *)®s->ctx.DX[1]; + a3_a4 = *(unsigned long long *)®s->ctx.DX[2]; + a1_a2 = *(unsigned long long *)®s->ctx.DX[3]; + + /* here is the actual call to the syscall handler functions */ + regs->ctx.DX[0].U0 = syscall_entry(a1_a2, a3_a4, a5_a6); + + if (test_syscall_work()) + syscall_trace_leave(regs); + + State = tail_end_sys(State, sysnumber, &restart); + /* Handlerless restarts shouldn't go via userland */ + if (restart) + goto restart_syscall; + return State; +} + +TBIRES switchx_handler(TBIRES State, int SigNum, int Triggers, + int Inst, PTBI pTBI) +{ + struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; + + /* + * This can be caused by any user process simply executing an unusual + * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the + * thread to stop, so signal a SIGTRAP instead. + */ + head_end(State, ~INTS_OFF_MASK); + if (user_mode(regs)) + force_sig(SIGTRAP, current); + else + State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, pTBI); + return tail_end(State); +} + +#ifdef CONFIG_METAG_META21 +TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI) +{ + struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; + unsigned int error_state = Triggers; + siginfo_t info; + + head_end(State, ~INTS_OFF_MASK); + + local_irq_enable(); + + info.si_signo = SIGFPE; + + if (error_state & TXSTAT_FPE_INVALID_BIT) + info.si_code = FPE_FLTINV; + else if (error_state & TXSTAT_FPE_DIVBYZERO_BIT) + info.si_code = FPE_FLTDIV; + else if (error_state & TXSTAT_FPE_OVERFLOW_BIT) + info.si_code = FPE_FLTOVF; + else if (error_state & TXSTAT_FPE_UNDERFLOW_BIT) + info.si_code = FPE_FLTUND; + else if (error_state & TXSTAT_FPE_INEXACT_BIT) + info.si_code = FPE_FLTRES; + else + info.si_code = 0; + info.si_errno = 0; + info.si_addr = (__force void __user *)regs->ctx.CurrPC; + force_sig_info(SIGFPE, &info, current); + + return tail_end(State); +} +#endif + +#ifdef CONFIG_METAG_SUSPEND_MEM +struct traps_context { + PTBIAPIFN fnSigs[TBID_SIGNUM_MAX + 1]; +}; + +static struct traps_context *metag_traps_context; + +int traps_save_context(void) +{ + unsigned long cpu = smp_processor_id(); + PTBI _pTBI = per_cpu(pTBI, cpu); + struct traps_context *context; + + context = kzalloc(sizeof(*context), GFP_ATOMIC); + if (!context) + return -ENOMEM; + + memcpy(context->fnSigs, (void *)_pTBI->fnSigs, sizeof(context->fnSigs)); + + metag_traps_context = context; + return 0; +} + +int traps_restore_context(void) +{ + unsigned long cpu = smp_processor_id(); + PTBI _pTBI = per_cpu(pTBI, cpu); + struct traps_context *context = metag_traps_context; + + metag_traps_context = NULL; + + memcpy((void *)_pTBI->fnSigs, context->fnSigs, sizeof(context->fnSigs)); + + kfree(context); + return 0; +} +#endif + +#ifdef CONFIG_SMP +unsigned int get_trigger_mask(void) +{ + unsigned long cpu = smp_processor_id(); + return per_cpu(trigger_mask, cpu); +} + +static void set_trigger_mask(unsigned int mask) +{ + unsigned long cpu = smp_processor_id(); + per_cpu(trigger_mask, cpu) = mask; +} +#else +static void set_trigger_mask(unsigned int mask) +{ + global_trigger_mask = mask; +} +#endif + +void __cpuinit per_cpu_trap_init(unsigned long cpu) +{ + TBIRES int_context; + unsigned int thread = cpu_2_hwthread_id[cpu]; + + set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */ + TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */ + TBI_TRIG_BIT(TBID_SIGNUM_SW1) | + TBI_TRIG_BIT(TBID_SIGNUM_SWS)); + + /* non-priv - use current stack */ + int_context.Sig.pCtx = NULL; + /* Start with interrupts off */ + int_context.Sig.TrigMask = INTS_OFF_MASK; + int_context.Sig.SaveMask = 0; + + /* And call __TBIASyncTrigger() */ + __TBIASyncTrigger(int_context); +} + +void __init trap_init(void) +{ + unsigned long cpu = smp_processor_id(); + PTBI _pTBI = per_cpu(pTBI, cpu); + + _pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler; + _pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler; + _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler; + _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler; + _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler; + _pTBI->fnSigs[TBID_SIGNUM_SWK] = kick_handler; + +#ifdef CONFIG_METAG_META21 + _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR; + _pTBI->fnSigs[TBID_SIGNUM_FPE] = fpe_handler; +#endif + + per_cpu_trap_init(cpu); +} + +void tbi_startup_interrupt(int irq) +{ + unsigned long cpu = smp_processor_id(); + PTBI _pTBI = per_cpu(pTBI, cpu); + + BUG_ON(irq > TBID_SIGNUM_MAX); + + /* For TR1 and TR2, the thread id is encoded in the irq number */ + if (irq >= TBID_SIGNUM_T10 && irq < TBID_SIGNUM_TR3) + cpu = hwthread_id_2_cpu[(irq - TBID_SIGNUM_T10) % 4]; + + set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq)); + + _pTBI->fnSigs[irq] = trigger_handler; +} + +void tbi_shutdown_interrupt(int irq) +{ + unsigned long cpu = smp_processor_id(); + PTBI _pTBI = per_cpu(pTBI, cpu); + + BUG_ON(irq > TBID_SIGNUM_MAX); + + set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq)); + + _pTBI->fnSigs[irq] = __TBIUnExpXXX; +} + +int ret_from_fork(TBIRES arg) +{ + struct task_struct *prev = arg.Switch.pPara; + struct task_struct *tsk = current; + struct pt_regs *regs = task_pt_regs(tsk); + int (*fn)(void *); + TBIRES Next; + + schedule_tail(prev); + + if (tsk->flags & PF_KTHREAD) { + fn = (void *)regs->ctx.DX[4].U1; + BUG_ON(!fn); + + fn((void *)regs->ctx.DX[3].U1); + } + + if (test_syscall_work()) + syscall_trace_leave(regs); + + preempt_disable(); + + Next.Sig.TrigMask = get_trigger_mask(); + Next.Sig.SaveMask = 0; + Next.Sig.pCtx = ®s->ctx; + + set_gateway_tls(current->thread.tls_ptr); + + preempt_enable_no_resched(); + + /* And interrupts should come back on when we resume the real usermode + * code. Call __TBIASyncResume() + */ + __TBIASyncResume(tail_end(Next)); + /* ASyncResume should NEVER return */ + BUG(); + return 0; +} + +void show_trace(struct task_struct *tsk, unsigned long *sp, + struct pt_regs *regs) +{ + unsigned long addr; +#ifdef CONFIG_FRAME_POINTER + unsigned long fp, fpnew; + unsigned long stack; +#endif + + if (regs && user_mode(regs)) + return; + + printk("\nCall trace: "); +#ifdef CONFIG_KALLSYMS + printk("\n"); +#endif + + if (!tsk) + tsk = current; + +#ifdef CONFIG_FRAME_POINTER + if (regs) { + print_ip_sym(regs->ctx.CurrPC); + fp = regs->ctx.AX[1].U0; + } else { + fp = __core_reg_get(A0FrP); + } + + /* detect when the frame pointer has been used for other purposes and + * doesn't point to the stack (it may point completely elsewhere which + * kstack_end may not detect). + */ + stack = (unsigned long)task_stack_page(tsk); + while (fp >= stack && fp + 8 <= stack + THREAD_SIZE) { + addr = __raw_readl((unsigned long *)(fp + 4)) - 4; + if (kernel_text_address(addr)) + print_ip_sym(addr); + else + break; + /* stack grows up, so frame pointers must decrease */ + fpnew = __raw_readl((unsigned long *)(fp + 0)); + if (fpnew >= fp) + break; + fp = fpnew; + } +#else + while (!kstack_end(sp)) { + addr = (*sp--) - 4; + if (kernel_text_address(addr)) + print_ip_sym(addr); + } +#endif + + printk("\n"); + + debug_show_held_locks(tsk); +} + +void show_stack(struct task_struct *tsk, unsigned long *sp) +{ + if (!tsk) + tsk = current; + if (tsk == current) + sp = (unsigned long *)current_stack_pointer; + else + sp = (unsigned long *)tsk->thread.kernel_context->AX[0].U0; + + show_trace(tsk, sp, NULL); +} + +void dump_stack(void) +{ + show_stack(NULL, NULL); +} +EXPORT_SYMBOL(dump_stack); -- cgit v1.2.3 From 63047ea36070d11f902ab7d09a5a18aea037c0f7 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 9 Oct 2012 10:54:47 +0100 Subject: metag: IRQ handling Add core IRQ handling for metag. The code in irq.c exposes the TBX signal numbers as Linux IRQs. Signed-off-by: James Hogan --- arch/metag/include/asm/irq.h | 32 ++++ arch/metag/include/asm/irqflags.h | 94 +++++++++++ arch/metag/kernel/irq.c | 318 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 444 insertions(+) create mode 100644 arch/metag/include/asm/irq.h create mode 100644 arch/metag/include/asm/irqflags.h create mode 100644 arch/metag/kernel/irq.c (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/irq.h b/arch/metag/include/asm/irq.h new file mode 100644 index 000000000000..be0c8f3c5a5d --- /dev/null +++ b/arch/metag/include/asm/irq.h @@ -0,0 +1,32 @@ +#ifndef __ASM_METAG_IRQ_H +#define __ASM_METAG_IRQ_H + +#ifdef CONFIG_4KSTACKS +extern void irq_ctx_init(int cpu); +extern void irq_ctx_exit(int cpu); +# define __ARCH_HAS_DO_SOFTIRQ +#else +# define irq_ctx_init(cpu) do { } while (0) +# define irq_ctx_exit(cpu) do { } while (0) +#endif + +void tbi_startup_interrupt(int); +void tbi_shutdown_interrupt(int); + +struct pt_regs; + +int tbisig_map(unsigned int hw); +extern void do_IRQ(int irq, struct pt_regs *regs); + +#ifdef CONFIG_METAG_SUSPEND_MEM +int traps_save_context(void); +int traps_restore_context(void); +#endif + +#include + +#ifdef CONFIG_HOTPLUG_CPU +extern void migrate_irqs(void); +#endif + +#endif /* __ASM_METAG_IRQ_H */ diff --git a/arch/metag/include/asm/irqflags.h b/arch/metag/include/asm/irqflags.h new file mode 100644 index 000000000000..cba5e135bc9a --- /dev/null +++ b/arch/metag/include/asm/irqflags.h @@ -0,0 +1,94 @@ +/* + * IRQ flags handling + * + * This file gets included from lowlevel asm headers too, to provide + * wrapped versions of the local_irq_*() APIs, based on the + * raw_local_irq_*() functions from the lowlevel headers. + */ +#ifndef _ASM_IRQFLAGS_H +#define _ASM_IRQFLAGS_H + +#ifndef __ASSEMBLY__ + +#include +#include + +#define INTS_OFF_MASK TXSTATI_BGNDHALT_BIT + +#ifdef CONFIG_SMP +extern unsigned int get_trigger_mask(void); +#else + +extern unsigned int global_trigger_mask; + +static inline unsigned int get_trigger_mask(void) +{ + return global_trigger_mask; +} +#endif + +static inline unsigned long arch_local_save_flags(void) +{ + return __core_reg_get(TXMASKI); +} + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return (flags & ~INTS_OFF_MASK) == 0; +} + +static inline int arch_irqs_disabled(void) +{ + unsigned long flags = arch_local_save_flags(); + + return arch_irqs_disabled_flags(flags); +} + +static inline unsigned long __irqs_disabled(void) +{ + /* + * We shouldn't enable exceptions if they are not already + * enabled. This is required for chancalls to work correctly. + */ + return arch_local_save_flags() & INTS_OFF_MASK; +} + +/* + * For spinlocks, etc: + */ +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags = __irqs_disabled(); + + asm volatile("SWAP %0,TXMASKI\n" : "=r" (flags) : "0" (flags) + : "memory"); + + return flags; +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory"); +} + +static inline void arch_local_irq_disable(void) +{ + unsigned long flags = __irqs_disabled(); + + asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory"); +} + +static inline void arch_local_irq_enable(void) +{ +#ifdef CONFIG_SMP + preempt_disable(); + arch_local_irq_restore(get_trigger_mask()); + preempt_enable_no_resched(); +#else + arch_local_irq_restore(get_trigger_mask()); +#endif +} + +#endif /* (__ASSEMBLY__) */ + +#endif /* !(_ASM_IRQFLAGS_H) */ diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c new file mode 100644 index 000000000000..7c043491e1e3 --- /dev/null +++ b/arch/metag/kernel/irq.c @@ -0,0 +1,318 @@ +/* + * Linux/Meta general interrupt handling code + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_4KSTACKS +union irq_ctx { + struct thread_info tinfo; + u32 stack[THREAD_SIZE/sizeof(u32)]; +}; + +static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; +static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; +#endif + +struct irq_domain *root_domain; + +static unsigned int startup_meta_irq(struct irq_data *data) +{ + tbi_startup_interrupt(data->hwirq); + return 0; +} + +static void shutdown_meta_irq(struct irq_data *data) +{ + tbi_shutdown_interrupt(data->hwirq); +} + +void do_IRQ(int irq, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); +#ifdef CONFIG_4KSTACKS + struct irq_desc *desc; + union irq_ctx *curctx, *irqctx; + u32 *isp; +#endif + + irq_enter(); + + irq = irq_linear_revmap(root_domain, irq); + +#ifdef CONFIG_DEBUG_STACKOVERFLOW + /* Debugging check for stack overflow: is there less than 1KB free? */ + { + unsigned long sp; + + sp = __core_reg_get(A0StP); + sp &= THREAD_SIZE - 1; + + if (unlikely(sp > (THREAD_SIZE - 1024))) + pr_err("Stack overflow in do_IRQ: %ld\n", sp); + } +#endif + + +#ifdef CONFIG_4KSTACKS + curctx = (union irq_ctx *) current_thread_info(); + irqctx = hardirq_ctx[smp_processor_id()]; + + /* + * this is where we switch to the IRQ stack. However, if we are + * already using the IRQ stack (because we interrupted a hardirq + * handler) we can't do that and just have to keep using the + * current stack (which is the irq stack already after all) + */ + if (curctx != irqctx) { + /* build the stack frame on the IRQ stack */ + isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info)); + irqctx->tinfo.task = curctx->tinfo.task; + + /* + * Copy the softirq bits in preempt_count so that the + * softirq checks work in the hardirq context. + */ + irqctx->tinfo.preempt_count = + (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | + (curctx->tinfo.preempt_count & SOFTIRQ_MASK); + + desc = irq_to_desc(irq); + + asm volatile ( + "MOV D0.5,%0\n" + "MOV D1Ar1,%1\n" + "MOV D1RtP,%2\n" + "MOV D0Ar2,%3\n" + "SWAP A0StP,D0.5\n" + "SWAP PC,D1RtP\n" + "MOV A0StP,D0.5\n" + : + : "r" (isp), "r" (irq), "r" (desc->handle_irq), + "r" (desc) + : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4", + "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP", + "D0.5" + ); + } else +#endif + generic_handle_irq(irq); + + irq_exit(); + + set_irq_regs(old_regs); +} + +#ifdef CONFIG_4KSTACKS + +static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; + +static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; + +/* + * allocate per-cpu stacks for hardirq and for softirq processing + */ +void irq_ctx_init(int cpu) +{ + union irq_ctx *irqctx; + + if (hardirq_ctx[cpu]) + return; + + irqctx = (union irq_ctx *) &hardirq_stack[cpu * THREAD_SIZE]; + irqctx->tinfo.task = NULL; + irqctx->tinfo.exec_domain = NULL; + irqctx->tinfo.cpu = cpu; + irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; + irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); + + hardirq_ctx[cpu] = irqctx; + + irqctx = (union irq_ctx *) &softirq_stack[cpu * THREAD_SIZE]; + irqctx->tinfo.task = NULL; + irqctx->tinfo.exec_domain = NULL; + irqctx->tinfo.cpu = cpu; + irqctx->tinfo.preempt_count = 0; + irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); + + softirq_ctx[cpu] = irqctx; + + pr_info("CPU %u irqstacks, hard=%p soft=%p\n", + cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); +} + +void irq_ctx_exit(int cpu) +{ + hardirq_ctx[smp_processor_id()] = NULL; +} + +extern asmlinkage void __do_softirq(void); + +asmlinkage void do_softirq(void) +{ + unsigned long flags; + struct thread_info *curctx; + union irq_ctx *irqctx; + u32 *isp; + + if (in_interrupt()) + return; + + local_irq_save(flags); + + if (local_softirq_pending()) { + curctx = current_thread_info(); + irqctx = softirq_ctx[smp_processor_id()]; + irqctx->tinfo.task = curctx->task; + + /* build the stack frame on the softirq stack */ + isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info)); + + asm volatile ( + "MOV D0.5,%0\n" + "SWAP A0StP,D0.5\n" + "CALLR D1RtP,___do_softirq\n" + "MOV A0StP,D0.5\n" + : + : "r" (isp) + : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4", + "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP", + "D0.5" + ); + /* + * Shouldn't happen, we returned above if in_interrupt(): + */ + WARN_ON_ONCE(softirq_count()); + } + + local_irq_restore(flags); +} +#endif + +static struct irq_chip meta_irq_type = { + .name = "META-IRQ", + .irq_startup = startup_meta_irq, + .irq_shutdown = shutdown_meta_irq, +}; + +/** + * tbisig_map() - Map a TBI signal number to a virtual IRQ number. + * @hw: Number of the TBI signal. Must be in range. + * + * Returns: The virtual IRQ number of the TBI signal number IRQ specified by + * @hw. + */ +int tbisig_map(unsigned int hw) +{ + return irq_create_mapping(root_domain, hw); +} + +/** + * metag_tbisig_map() - map a tbi signal to a Linux virtual IRQ number + * @d: root irq domain + * @irq: virtual irq number + * @hw: hardware irq number (TBI signal number) + * + * This sets up a virtual irq for a specified TBI signal number. + */ +static int metag_tbisig_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ +#ifdef CONFIG_SMP + irq_set_chip_and_handler(irq, &meta_irq_type, handle_percpu_irq); +#else + irq_set_chip_and_handler(irq, &meta_irq_type, handle_simple_irq); +#endif + return 0; +} + +static const struct irq_domain_ops metag_tbisig_domain_ops = { + .map = metag_tbisig_map, +}; + +/* + * void init_IRQ(void) + * + * Parameters: None + * + * Returns: Nothing + * + * This function should be called during kernel startup to initialize + * the IRQ handling routines. + */ +void __init init_IRQ(void) +{ + root_domain = irq_domain_add_linear(NULL, 32, + &metag_tbisig_domain_ops, NULL); + if (unlikely(!root_domain)) + panic("init_IRQ: cannot add root IRQ domain"); + + irq_ctx_init(smp_processor_id()); + + if (machine_desc->init_irq) + machine_desc->init_irq(); +} + +int __init arch_probe_nr_irqs(void) +{ + if (machine_desc->nr_irqs) + nr_irqs = machine_desc->nr_irqs; + return 0; +} + +#ifdef CONFIG_HOTPLUG_CPU +static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irq_chip *chip = irq_data_get_irq_chip(data); + + raw_spin_lock_irq(&desc->lock); + if (chip->irq_set_affinity) + chip->irq_set_affinity(data, cpumask_of(cpu), false); + raw_spin_unlock_irq(&desc->lock); +} + +/* + * The CPU has been marked offline. Migrate IRQs off this CPU. If + * the affinity settings do not allow other CPUs, force them onto any + * available CPU. + */ +void migrate_irqs(void) +{ + unsigned int i, cpu = smp_processor_id(); + struct irq_desc *desc; + + for_each_irq_desc(i, desc) { + struct irq_data *data = irq_desc_get_irq_data(desc); + unsigned int newcpu; + + if (irqd_is_per_cpu(data)) + continue; + + if (!cpumask_test_cpu(cpu, data->affinity)) + continue; + + newcpu = cpumask_any_and(data->affinity, cpu_online_mask); + + if (newcpu >= nr_cpu_ids) { + pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", + i, cpu); + + cpumask_setall(data->affinity); + newcpu = cpumask_any_and(data->affinity, + cpu_online_mask); + } + + route_irq(data, i, newcpu); + } +} +#endif /* CONFIG_HOTPLUG_CPU */ -- cgit v1.2.3 From 5698c50d9da4ab2f57d98c64ea97675dcaf2a608 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 9 Oct 2012 10:54:47 +0100 Subject: metag: Internal and external irqchips Meta core internal interrupts (from HWSTATMETA and friends) are vectored onto the TR1 core trigger for the current thread. This is demultiplexed in irq-metag.c to individual Linux IRQs for each internal interrupt. External SoC interrupts (from HWSTATEXT and friends) are vectored onto the TR2 core trigger for the current thread. This is demultiplexed in irq-metag-ext.c to individual Linux IRQs for each external SoC interrupt. The external irqchip has devicetree bindings for configuring the number of irq banks and the type of masking available. Signed-off-by: James Hogan Cc: Arnd Bergmann Cc: Grant Likely Cc: Rob Herring Cc: Rob Landley Cc: Dom Cobley Cc: Simon Arlott Cc: Viresh Kumar Cc: Maxime Ripard Cc: devicetree-discuss@lists.ozlabs.org Cc: linux-doc@vger.kernel.org --- .../devicetree/bindings/metag/meta-intc.txt | 82 ++ MAINTAINERS | 2 + arch/metag/kernel/irq.c | 5 + drivers/irqchip/Makefile | 2 + drivers/irqchip/irq-metag-ext.c | 868 +++++++++++++++++++++ drivers/irqchip/irq-metag.c | 343 ++++++++ include/linux/irqchip/metag-ext.h | 33 + include/linux/irqchip/metag.h | 24 + 8 files changed, 1359 insertions(+) create mode 100644 Documentation/devicetree/bindings/metag/meta-intc.txt create mode 100644 drivers/irqchip/irq-metag-ext.c create mode 100644 drivers/irqchip/irq-metag.c create mode 100644 include/linux/irqchip/metag-ext.h create mode 100644 include/linux/irqchip/metag.h (limited to 'arch/metag/kernel') diff --git a/Documentation/devicetree/bindings/metag/meta-intc.txt b/Documentation/devicetree/bindings/metag/meta-intc.txt new file mode 100644 index 000000000000..8c47dcbfabc6 --- /dev/null +++ b/Documentation/devicetree/bindings/metag/meta-intc.txt @@ -0,0 +1,82 @@ +* Meta External Trigger Controller Binding + +This binding specifies what properties must be available in the device tree +representation of a Meta external trigger controller. + +Required properties: + + - compatible: Specifies the compatibility list for the interrupt controller. + The type shall be and the value shall include "img,meta-intc". + + - num-banks: Specifies the number of interrupt banks (each of which can + handle 32 interrupt sources). + + - interrupt-controller: The presence of this property identifies the node + as an interupt controller. No property value shall be defined. + + - #interrupt-cells: Specifies the number of cells needed to encode an + interrupt source. The type shall be a and the value shall be 2. + + - #address-cells: Specifies the number of cells needed to encode an + address. The type shall be and the value shall be 0. As such, + 'interrupt-map' nodes do not have to specify a parent unit address. + +Optional properties: + + - no-mask: The controller doesn't have any mask registers. + +* Interrupt Specifier Definition + + Interrupt specifiers consists of 2 cells encoded as follows: + + - <1st-cell>: The interrupt-number that identifies the interrupt source. + + - <2nd-cell>: The Linux interrupt flags containing level-sense information, + encoded as follows: + 1 = edge triggered + 4 = level-sensitive + +* Examples + +Example 1: + + /* + * Meta external trigger block + */ + intc: intc { + // This is an interrupt controller node. + interrupt-controller; + + // No address cells so that 'interrupt-map' nodes which + // reference this interrupt controller node do not need a parent + // address specifier. + #address-cells = <0>; + + // Two cells to encode interrupt sources. + #interrupt-cells = <2>; + + // Number of interrupt banks + num-banks = <2>; + + // No HWMASKEXT is available (specify on Chorus2 and Comet ES1) + no-mask; + + // Compatible with Meta hardware trigger block. + compatible = "img,meta-intc"; + }; + +Example 2: + + /* + * An interrupt generating device that is wired to a Meta external + * trigger block. + */ + uart1: uart@0x02004c00 { + // Interrupt source '5' that is level-sensitive. + // Note that there are only two cells as specified in the + // interrupt parent's '#interrupt-cells' property. + interrupts = <5 4 /* level */>; + + // The interrupt controller that this device is wired to. + interrupt-parent = <&intc>; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 749d76699ead..9b2b7699da4d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5040,6 +5040,8 @@ F: arch/metag/ F: Documentation/metag/ F: Documentation/devicetree/bindings/metag/ F: drivers/clocksource/metag_generic.c +F: drivers/irqchip/irq-metag.c +F: drivers/irqchip/irq-metag-ext.c MICROBLAZE ARCHITECTURE M: Michal Simek diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c index 7c043491e1e3..87707efeb0a3 100644 --- a/arch/metag/kernel/irq.c +++ b/arch/metag/kernel/irq.c @@ -6,6 +6,8 @@ #include #include #include +#include +#include #include #include @@ -258,6 +260,9 @@ void __init init_IRQ(void) irq_ctx_init(smp_processor_id()); + init_internal_IRQ(); + init_external_IRQ(); + if (machine_desc->init_irq) machine_desc->init_irq(); } diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index bf4609a5bd9d..ff02f6b98863 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -1,4 +1,6 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o +obj-$(CONFIG_METAG) += irq-metag-ext.o +obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c new file mode 100644 index 000000000000..92c41ab4dbfd --- /dev/null +++ b/drivers/irqchip/irq-metag-ext.c @@ -0,0 +1,868 @@ +/* + * Meta External interrupt code. + * + * Copyright (C) 2005-2012 Imagination Technologies Ltd. + * + * External interrupts on Meta are configured at two-levels, in the CPU core and + * in the external trigger block. Interrupts from SoC peripherals are + * multiplexed onto a single Meta CPU "trigger" - traditionally it has always + * been trigger 2 (TR2). For info on how de-multiplexing happens check out + * meta_intc_irq_demux(). + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define HWSTAT_STRIDE 8 +#define HWVEC_BLK_STRIDE 0x1000 + +/** + * struct meta_intc_priv - private meta external interrupt data + * @nr_banks: Number of interrupt banks + * @domain: IRQ domain for all banks of external IRQs + * @unmasked: Record of unmasked IRQs + * @levels_altered: Record of altered level bits + */ +struct meta_intc_priv { + unsigned int nr_banks; + struct irq_domain *domain; + + unsigned long unmasked[4]; + +#ifdef CONFIG_METAG_SUSPEND_MEM + unsigned long levels_altered[4]; +#endif +}; + +/* Private data for the one and only external interrupt controller */ +static struct meta_intc_priv meta_intc_priv; + +/** + * meta_intc_offset() - Get the offset into the bank of a hardware IRQ number + * @hw: Hardware IRQ number (within external trigger block) + * + * Returns: Bit offset into the IRQ's bank registers + */ +static unsigned int meta_intc_offset(irq_hw_number_t hw) +{ + return hw & 0x1f; +} + +/** + * meta_intc_bank() - Get the bank number of a hardware IRQ number + * @hw: Hardware IRQ number (within external trigger block) + * + * Returns: Bank number indicating which register the IRQ's bits are + */ +static unsigned int meta_intc_bank(irq_hw_number_t hw) +{ + return hw >> 5; +} + +/** + * meta_intc_stat_addr() - Get the address of a HWSTATEXT register + * @hw: Hardware IRQ number (within external trigger block) + * + * Returns: Address of a HWSTATEXT register containing the status bit for + * the specified hardware IRQ number + */ +static void __iomem *meta_intc_stat_addr(irq_hw_number_t hw) +{ + return (void __iomem *)(HWSTATEXT + + HWSTAT_STRIDE * meta_intc_bank(hw)); +} + +/** + * meta_intc_level_addr() - Get the address of a HWLEVELEXT register + * @hw: Hardware IRQ number (within external trigger block) + * + * Returns: Address of a HWLEVELEXT register containing the sense bit for + * the specified hardware IRQ number + */ +static void __iomem *meta_intc_level_addr(irq_hw_number_t hw) +{ + return (void __iomem *)(HWLEVELEXT + + HWSTAT_STRIDE * meta_intc_bank(hw)); +} + +/** + * meta_intc_mask_addr() - Get the address of a HWMASKEXT register + * @hw: Hardware IRQ number (within external trigger block) + * + * Returns: Address of a HWMASKEXT register containing the mask bit for the + * specified hardware IRQ number + */ +static void __iomem *meta_intc_mask_addr(irq_hw_number_t hw) +{ + return (void __iomem *)(HWMASKEXT + + HWSTAT_STRIDE * meta_intc_bank(hw)); +} + +/** + * meta_intc_vec_addr() - Get the vector address of a hardware interrupt + * @hw: Hardware IRQ number (within external trigger block) + * + * Returns: Address of a HWVECEXT register controlling the core trigger to + * vector the IRQ onto + */ +static inline void __iomem *meta_intc_vec_addr(irq_hw_number_t hw) +{ + return (void __iomem *)(HWVEC0EXT + + HWVEC_BLK_STRIDE * meta_intc_bank(hw) + + HWVECnEXT_STRIDE * meta_intc_offset(hw)); +} + +/** + * meta_intc_startup_irq() - set up an external irq + * @data: data for the external irq to start up + * + * Multiplex interrupts for irq onto TR2. Clear any pending interrupts and + * unmask irq, both using the appropriate callbacks. + */ +static unsigned int meta_intc_startup_irq(struct irq_data *data) +{ + irq_hw_number_t hw = data->hwirq; + void __iomem *vec_addr = meta_intc_vec_addr(hw); + int thread = hard_processor_id(); + + /* Perform any necessary acking. */ + if (data->chip->irq_ack) + data->chip->irq_ack(data); + + /* Wire up this interrupt to the core with HWVECxEXT. */ + metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); + + /* Perform any necessary unmasking. */ + data->chip->irq_unmask(data); + + return 0; +} + +/** + * meta_intc_shutdown_irq() - turn off an external irq + * @data: data for the external irq to turn off + * + * Mask irq using the appropriate callback and stop muxing it onto TR2. + */ +static void meta_intc_shutdown_irq(struct irq_data *data) +{ + irq_hw_number_t hw = data->hwirq; + void __iomem *vec_addr = meta_intc_vec_addr(hw); + + /* Mask the IRQ */ + data->chip->irq_mask(data); + + /* + * Disable the IRQ at the core by removing the interrupt from + * the HW vector mapping. + */ + metag_out32(0, vec_addr); +} + +/** + * meta_intc_ack_irq() - acknowledge an external irq + * @data: data for the external irq to ack + * + * Clear down an edge interrupt in the status register. + */ +static void meta_intc_ack_irq(struct irq_data *data) +{ + irq_hw_number_t hw = data->hwirq; + unsigned int bit = 1 << meta_intc_offset(hw); + void __iomem *stat_addr = meta_intc_stat_addr(hw); + + /* Ack the int, if it is still 'on'. + * NOTE - this only works for edge triggered interrupts. + */ + if (metag_in32(stat_addr) & bit) + metag_out32(bit, stat_addr); +} + +/** + * record_irq_is_masked() - record the IRQ masked so it doesn't get handled + * @data: data for the external irq to record + * + * This should get called whenever an external IRQ is masked (by whichever + * callback is used). It records the IRQ masked so that it doesn't get handled + * if it still shows up in the status register. + */ +static void record_irq_is_masked(struct irq_data *data) +{ + struct meta_intc_priv *priv = &meta_intc_priv; + irq_hw_number_t hw = data->hwirq; + + clear_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]); +} + +/** + * record_irq_is_unmasked() - record the IRQ unmasked so it can be handled + * @data: data for the external irq to record + * + * This should get called whenever an external IRQ is unmasked (by whichever + * callback is used). It records the IRQ unmasked so that it gets handled if it + * shows up in the status register. + */ +static void record_irq_is_unmasked(struct irq_data *data) +{ + struct meta_intc_priv *priv = &meta_intc_priv; + irq_hw_number_t hw = data->hwirq; + + set_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]); +} + +/* + * For use by wrapper IRQ drivers + */ + +/** + * meta_intc_mask_irq_simple() - minimal mask used by wrapper IRQ drivers + * @data: data for the external irq being masked + * + * This should be called by any wrapper IRQ driver mask functions. it doesn't do + * any masking but records the IRQ as masked so that the core code knows the + * mask has taken place. It is the callers responsibility to ensure that the IRQ + * won't trigger an interrupt to the core. + */ +void meta_intc_mask_irq_simple(struct irq_data *data) +{ + record_irq_is_masked(data); +} + +/** + * meta_intc_unmask_irq_simple() - minimal unmask used by wrapper IRQ drivers + * @data: data for the external irq being unmasked + * + * This should be called by any wrapper IRQ driver unmask functions. it doesn't + * do any unmasking but records the IRQ as unmasked so that the core code knows + * the unmask has taken place. It is the callers responsibility to ensure that + * the IRQ can now trigger an interrupt to the core. + */ +void meta_intc_unmask_irq_simple(struct irq_data *data) +{ + record_irq_is_unmasked(data); +} + + +/** + * meta_intc_mask_irq() - mask an external irq using HWMASKEXT + * @data: data for the external irq to mask + * + * This is a default implementation of a mask function which makes use of the + * HWMASKEXT registers available in newer versions. + * + * Earlier versions without these registers should use SoC level IRQ masking + * which call the meta_intc_*_simple() functions above, or if that isn't + * available should use the fallback meta_intc_*_nomask() functions below. + */ +static void meta_intc_mask_irq(struct irq_data *data) +{ + irq_hw_number_t hw = data->hwirq; + unsigned int bit = 1 << meta_intc_offset(hw); + void __iomem *mask_addr = meta_intc_mask_addr(hw); + unsigned long flags; + + record_irq_is_masked(data); + + /* update the interrupt mask */ + __global_lock2(flags); + metag_out32(metag_in32(mask_addr) & ~bit, mask_addr); + __global_unlock2(flags); +} + +/** + * meta_intc_unmask_irq() - unmask an external irq using HWMASKEXT + * @data: data for the external irq to unmask + * + * This is a default implementation of an unmask function which makes use of the + * HWMASKEXT registers available on new versions. It should be paired with + * meta_intc_mask_irq() above. + */ +static void meta_intc_unmask_irq(struct irq_data *data) +{ + irq_hw_number_t hw = data->hwirq; + unsigned int bit = 1 << meta_intc_offset(hw); + void __iomem *mask_addr = meta_intc_mask_addr(hw); + unsigned long flags; + + record_irq_is_unmasked(data); + + /* update the interrupt mask */ + __global_lock2(flags); + metag_out32(metag_in32(mask_addr) | bit, mask_addr); + __global_unlock2(flags); +} + +/** + * meta_intc_mask_irq_nomask() - mask an external irq by unvectoring + * @data: data for the external irq to mask + * + * This is the version of the mask function for older versions which don't have + * HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the IRQ is + * unvectored from the core and retriggered if necessary later. + */ +static void meta_intc_mask_irq_nomask(struct irq_data *data) +{ + irq_hw_number_t hw = data->hwirq; + void __iomem *vec_addr = meta_intc_vec_addr(hw); + + record_irq_is_masked(data); + + /* there is no interrupt mask, so unvector the interrupt */ + metag_out32(0, vec_addr); +} + +/** + * meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring + * @data: data for the external irq to unmask + * + * This is the version of the unmask function for older versions which don't + * have HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the + * IRQ is revectored back to the core and retriggered if necessary. + * + * The retriggering done by this function is specific to edge interrupts. + */ +static void meta_intc_unmask_edge_irq_nomask(struct irq_data *data) +{ + irq_hw_number_t hw = data->hwirq; + unsigned int bit = 1 << meta_intc_offset(hw); + void __iomem *stat_addr = meta_intc_stat_addr(hw); + void __iomem *vec_addr = meta_intc_vec_addr(hw); + unsigned int thread = hard_processor_id(); + + record_irq_is_unmasked(data); + + /* there is no interrupt mask, so revector the interrupt */ + metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); + + /* + * Re-trigger interrupt + * + * Writing a 1 toggles, and a 0->1 transition triggers. We only + * retrigger if the status bit is already set, which means we + * need to clear it first. Retriggering is fundamentally racy + * because if the interrupt fires again after we clear it we + * could end up clearing it again and the interrupt handler + * thinking it hasn't fired. Therefore we need to keep trying to + * retrigger until the bit is set. + */ + if (metag_in32(stat_addr) & bit) { + metag_out32(bit, stat_addr); + while (!(metag_in32(stat_addr) & bit)) + metag_out32(bit, stat_addr); + } +} + +/** + * meta_intc_unmask_level_irq_nomask() - unmask a level irq by revectoring + * @data: data for the external irq to unmask + * + * This is the version of the unmask function for older versions which don't + * have HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the + * IRQ is revectored back to the core and retriggered if necessary. + * + * The retriggering done by this function is specific to level interrupts. + */ +static void meta_intc_unmask_level_irq_nomask(struct irq_data *data) +{ + irq_hw_number_t hw = data->hwirq; + unsigned int bit = 1 << meta_intc_offset(hw); + void __iomem *stat_addr = meta_intc_stat_addr(hw); + void __iomem *vec_addr = meta_intc_vec_addr(hw); + unsigned int thread = hard_processor_id(); + + record_irq_is_unmasked(data); + + /* there is no interrupt mask, so revector the interrupt */ + metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); + + /* Re-trigger interrupt */ + /* Writing a 1 triggers interrupt */ + if (metag_in32(stat_addr) & bit) + metag_out32(bit, stat_addr); +} + +/** + * meta_intc_irq_set_type() - set the type of an external irq + * @data: data for the external irq to set the type of + * @flow_type: new irq flow type + * + * Set the flow type of an external interrupt. This updates the irq chip and irq + * handler depending on whether the irq is edge or level sensitive (the polarity + * is ignored), and also sets up the bit in HWLEVELEXT so the hardware knows + * when to trigger. + */ +static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type) +{ +#ifdef CONFIG_METAG_SUSPEND_MEM + struct meta_intc_priv *priv = &meta_intc_priv; +#endif + unsigned int irq = data->irq; + irq_hw_number_t hw = data->hwirq; + unsigned int bit = 1 << meta_intc_offset(hw); + void __iomem *level_addr = meta_intc_level_addr(hw); + unsigned long flags; + unsigned int level; + + /* update the chip/handler */ + if (flow_type & IRQ_TYPE_LEVEL_MASK) + __irq_set_chip_handler_name_locked(irq, &meta_intc_level_chip, + handle_level_irq, NULL); + else + __irq_set_chip_handler_name_locked(irq, &meta_intc_edge_chip, + handle_edge_irq, NULL); + + /* and clear/set the bit in HWLEVELEXT */ + __global_lock2(flags); + level = metag_in32(level_addr); + if (flow_type & IRQ_TYPE_LEVEL_MASK) + level |= bit; + else + level &= ~bit; + metag_out32(level, level_addr); +#ifdef CONFIG_METAG_SUSPEND_MEM + priv->levels_altered[meta_intc_bank(hw)] |= bit; +#endif + __global_unlock2(flags); + + return 0; +} + +/** + * meta_intc_irq_demux() - external irq de-multiplexer + * @irq: the virtual interrupt number + * @desc: the interrupt description structure for this irq + * + * The cpu receives an interrupt on TR2 when a SoC interrupt has occurred. It is + * this function's job to demux this irq and figure out exactly which external + * irq needs servicing. + * + * Whilst using TR2 to detect external interrupts is a software convention it is + * (hopefully) unlikely to change. + */ +static void meta_intc_irq_demux(unsigned int irq, struct irq_desc *desc) +{ + struct meta_intc_priv *priv = &meta_intc_priv; + irq_hw_number_t hw; + unsigned int bank, irq_no, status; + void __iomem *stat_addr = meta_intc_stat_addr(0); + + /* + * Locate which interrupt has caused our handler to run. + */ + for (bank = 0; bank < priv->nr_banks; ++bank) { + /* Which interrupts are currently pending in this bank? */ +recalculate: + status = metag_in32(stat_addr) & priv->unmasked[bank]; + + for (hw = bank*32; status; status >>= 1, ++hw) { + if (status & 0x1) { + /* + * Map the hardware IRQ number to a virtual + * Linux IRQ number. + */ + irq_no = irq_linear_revmap(priv->domain, hw); + + /* + * Only fire off external interrupts that are + * registered to be handled by the kernel. + * Other external interrupts are probably being + * handled by other Meta hardware threads. + */ + generic_handle_irq(irq_no); + + /* + * The handler may have re-enabled interrupts + * which could have caused a nested invocation + * of this code and make the copy of the + * status register we are using invalid. + */ + goto recalculate; + } + } + stat_addr += HWSTAT_STRIDE; + } +} + +#ifdef CONFIG_SMP +/** + * meta_intc_set_affinity() - set the affinity for an interrupt + * @data: data for the external irq to set the affinity of + * @cpumask: cpu mask representing cpus which can handle the interrupt + * @force: whether to force (ignored) + * + * Revector the specified external irq onto a specific cpu's TR2 trigger, so + * that that cpu tends to be the one who handles it. + */ +static int meta_intc_set_affinity(struct irq_data *data, + const struct cpumask *cpumask, bool force) +{ + irq_hw_number_t hw = data->hwirq; + void __iomem *vec_addr = meta_intc_vec_addr(hw); + unsigned int cpu, thread; + + /* + * Wire up this interrupt from HWVECxEXT to the Meta core. + * + * Note that we can't wire up HWVECxEXT to interrupt more than + * one cpu (the interrupt code doesn't support it), so we just + * pick the first cpu we find in 'cpumask'. + */ + cpu = cpumask_any(cpumask); + thread = cpu_2_hwthread_id[cpu]; + + metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); + + return 0; +} +#else +#define meta_intc_set_affinity NULL +#endif + +#ifdef CONFIG_PM_SLEEP +#define META_INTC_CHIP_FLAGS (IRQCHIP_MASK_ON_SUSPEND \ + | IRQCHIP_SKIP_SET_WAKE) +#else +#define META_INTC_CHIP_FLAGS 0 +#endif + +/* public edge/level irq chips which SoCs can override */ + +struct irq_chip meta_intc_edge_chip = { + .irq_startup = meta_intc_startup_irq, + .irq_shutdown = meta_intc_shutdown_irq, + .irq_ack = meta_intc_ack_irq, + .irq_mask = meta_intc_mask_irq, + .irq_unmask = meta_intc_unmask_irq, + .irq_set_type = meta_intc_irq_set_type, + .irq_set_affinity = meta_intc_set_affinity, + .flags = META_INTC_CHIP_FLAGS, +}; + +struct irq_chip meta_intc_level_chip = { + .irq_startup = meta_intc_startup_irq, + .irq_shutdown = meta_intc_shutdown_irq, + .irq_set_type = meta_intc_irq_set_type, + .irq_mask = meta_intc_mask_irq, + .irq_unmask = meta_intc_unmask_irq, + .irq_set_affinity = meta_intc_set_affinity, + .flags = META_INTC_CHIP_FLAGS, +}; + +/** + * meta_intc_map() - map an external irq + * @d: irq domain of external trigger block + * @irq: virtual irq number + * @hw: hardware irq number within external trigger block + * + * This sets up a virtual irq for a specified hardware interrupt. The irq chip + * and handler is configured, using the HWLEVELEXT registers to determine + * edge/level flow type. These registers will have been set when the irq type is + * set (or set to a default at init time). + */ +static int meta_intc_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + unsigned int bit = 1 << meta_intc_offset(hw); + void __iomem *level_addr = meta_intc_level_addr(hw); + + /* Go by the current sense in the HWLEVELEXT register */ + if (metag_in32(level_addr) & bit) + irq_set_chip_and_handler(irq, &meta_intc_level_chip, + handle_level_irq); + else + irq_set_chip_and_handler(irq, &meta_intc_edge_chip, + handle_edge_irq); + return 0; +} + +static const struct irq_domain_ops meta_intc_domain_ops = { + .map = meta_intc_map, + .xlate = irq_domain_xlate_twocell, +}; + +#ifdef CONFIG_METAG_SUSPEND_MEM + +/** + * struct meta_intc_context - suspend context + * @levels: State of HWLEVELEXT registers + * @masks: State of HWMASKEXT registers + * @vectors: State of HWVECEXT registers + * @txvecint: State of TxVECINT registers + * + * This structure stores the IRQ state across suspend. + */ +struct meta_intc_context { + u32 levels[4]; + u32 masks[4]; + u8 vectors[4*32]; + + u8 txvecint[4][4]; +}; + +/* suspend context */ +static struct meta_intc_context *meta_intc_context; + +/** + * meta_intc_suspend() - store irq state + * + * To avoid interfering with other threads we only save the IRQ state of IRQs in + * use by Linux. + */ +static int meta_intc_suspend(void) +{ + struct meta_intc_priv *priv = &meta_intc_priv; + int i, j; + irq_hw_number_t hw; + unsigned int bank; + unsigned long flags; + struct meta_intc_context *context; + void __iomem *level_addr, *mask_addr, *vec_addr; + u32 mask, bit; + + context = kzalloc(sizeof(*context), GFP_ATOMIC); + if (!context) + return -ENOMEM; + + hw = 0; + level_addr = meta_intc_level_addr(0); + mask_addr = meta_intc_mask_addr(0); + for (bank = 0; bank < priv->nr_banks; ++bank) { + vec_addr = meta_intc_vec_addr(hw); + + /* create mask of interrupts in use */ + mask = 0; + for (bit = 1; bit; bit <<= 1) { + i = irq_linear_revmap(priv->domain, hw); + /* save mapped irqs which are enabled or have actions */ + if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) || + irq_has_action(i))) { + mask |= bit; + + /* save trigger vector */ + context->vectors[hw] = metag_in32(vec_addr); + } + + ++hw; + vec_addr += HWVECnEXT_STRIDE; + } + + /* save level state if any IRQ levels altered */ + if (priv->levels_altered[bank]) + context->levels[bank] = metag_in32(level_addr); + /* save mask state if any IRQs in use */ + if (mask) + context->masks[bank] = metag_in32(mask_addr); + + level_addr += HWSTAT_STRIDE; + mask_addr += HWSTAT_STRIDE; + } + + /* save trigger matrixing */ + __global_lock2(flags); + for (i = 0; i < 4; ++i) + for (j = 0; j < 4; ++j) + context->txvecint[i][j] = metag_in32(T0VECINT_BHALT + + TnVECINT_STRIDE*i + + 8*j); + __global_unlock2(flags); + + meta_intc_context = context; + return 0; +} + +/** + * meta_intc_resume() - restore saved irq state + * + * Restore the saved IRQ state and drop it. + */ +static void meta_intc_resume(void) +{ + struct meta_intc_priv *priv = &meta_intc_priv; + int i, j; + irq_hw_number_t hw; + unsigned int bank; + unsigned long flags; + struct meta_intc_context *context = meta_intc_context; + void __iomem *level_addr, *mask_addr, *vec_addr; + u32 mask, bit, tmp; + + meta_intc_context = NULL; + + hw = 0; + level_addr = meta_intc_level_addr(0); + mask_addr = meta_intc_mask_addr(0); + for (bank = 0; bank < priv->nr_banks; ++bank) { + vec_addr = meta_intc_vec_addr(hw); + + /* create mask of interrupts in use */ + mask = 0; + for (bit = 1; bit; bit <<= 1) { + i = irq_linear_revmap(priv->domain, hw); + /* restore mapped irqs, enabled or with actions */ + if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) || + irq_has_action(i))) { + mask |= bit; + + /* restore trigger vector */ + metag_out32(context->vectors[hw], vec_addr); + } + + ++hw; + vec_addr += HWVECnEXT_STRIDE; + } + + if (mask) { + /* restore mask state */ + __global_lock2(flags); + tmp = metag_in32(mask_addr); + tmp = (tmp & ~mask) | (context->masks[bank] & mask); + metag_out32(tmp, mask_addr); + __global_unlock2(flags); + } + + mask = priv->levels_altered[bank]; + if (mask) { + /* restore level state */ + __global_lock2(flags); + tmp = metag_in32(level_addr); + tmp = (tmp & ~mask) | (context->levels[bank] & mask); + metag_out32(tmp, level_addr); + __global_unlock2(flags); + } + + level_addr += HWSTAT_STRIDE; + mask_addr += HWSTAT_STRIDE; + } + + /* restore trigger matrixing */ + __global_lock2(flags); + for (i = 0; i < 4; ++i) { + for (j = 0; j < 4; ++j) { + metag_out32(context->txvecint[i][j], + T0VECINT_BHALT + + TnVECINT_STRIDE*i + + 8*j); + } + } + __global_unlock2(flags); + + kfree(context); +} + +static struct syscore_ops meta_intc_syscore_ops = { + .suspend = meta_intc_suspend, + .resume = meta_intc_resume, +}; + +static void __init meta_intc_init_syscore_ops(struct meta_intc_priv *priv) +{ + register_syscore_ops(&meta_intc_syscore_ops); +} +#else +#define meta_intc_init_syscore_ops(priv) do {} while (0) +#endif + +/** + * meta_intc_init_cpu() - register with a Meta cpu + * @priv: private interrupt controller data + * @cpu: the CPU to register on + * + * Configure @cpu's TR2 irq so that we can demux external irqs. + */ +static void __init meta_intc_init_cpu(struct meta_intc_priv *priv, int cpu) +{ + unsigned int thread = cpu_2_hwthread_id[cpu]; + unsigned int signum = TBID_SIGNUM_TR2(thread); + int irq = tbisig_map(signum); + + /* Register the multiplexed IRQ handler */ + irq_set_chained_handler(irq, meta_intc_irq_demux); + irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW); +} + +/** + * meta_intc_no_mask() - indicate lack of HWMASKEXT registers + * + * Called from SoC code (or init code below) to dynamically indicate the lack of + * HWMASKEXT registers (for example depending on some SoC revision register). + * This alters the irq mask and unmask callbacks to use the fallback + * unvectoring/retriggering technique instead of using HWMASKEXT registers. + */ +void __init meta_intc_no_mask(void) +{ + meta_intc_edge_chip.irq_mask = meta_intc_mask_irq_nomask; + meta_intc_edge_chip.irq_unmask = meta_intc_unmask_edge_irq_nomask; + meta_intc_level_chip.irq_mask = meta_intc_mask_irq_nomask; + meta_intc_level_chip.irq_unmask = meta_intc_unmask_level_irq_nomask; +} + +/** + * init_external_IRQ() - initialise the external irq controller + * + * Set up the external irq controller using device tree properties. This is + * called from init_IRQ(). + */ +int __init init_external_IRQ(void) +{ + struct meta_intc_priv *priv = &meta_intc_priv; + struct device_node *node; + int ret, cpu; + u32 val; + bool no_masks = false; + + node = of_find_compatible_node(NULL, NULL, "img,meta-intc"); + if (!node) + return -ENOENT; + + /* Get number of banks */ + ret = of_property_read_u32(node, "num-banks", &val); + if (ret) { + pr_err("meta-intc: No num-banks property found\n"); + return ret; + } + if (val < 1 || val > 4) { + pr_err("meta-intc: num-banks (%u) out of range\n", val); + return -EINVAL; + } + priv->nr_banks = val; + + /* Are any mask registers present? */ + if (of_get_property(node, "no-mask", NULL)) + no_masks = true; + + /* No HWMASKEXT registers present? */ + if (no_masks) + meta_intc_no_mask(); + + /* Set up an IRQ domain */ + /* + * This is a legacy IRQ domain for now until all the platform setup code + * has been converted to devicetree. + */ + priv->domain = irq_domain_add_linear(node, priv->nr_banks*32, + &meta_intc_domain_ops, priv); + if (unlikely(!priv->domain)) { + pr_err("meta-intc: cannot add IRQ domain\n"); + return -ENOMEM; + } + + /* Setup TR2 for all cpus. */ + for_each_possible_cpu(cpu) + meta_intc_init_cpu(priv, cpu); + + /* Set up system suspend/resume callbacks */ + meta_intc_init_syscore_ops(priv); + + pr_info("meta-intc: External IRQ controller initialised (%u IRQs)\n", + priv->nr_banks*32); + + return 0; +} diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c new file mode 100644 index 000000000000..8e94d7a3b20d --- /dev/null +++ b/drivers/irqchip/irq-metag.c @@ -0,0 +1,343 @@ +/* + * Meta internal (HWSTATMETA) interrupt code. + * + * Copyright (C) 2011-2012 Imagination Technologies Ltd. + * + * This code is based on the code in SoC/common/irq.c and SoC/comet/irq.c + * The code base could be generalised/merged as a lot of the functionality is + * similar. Until this is done, we try to keep the code simple here. + */ + +#include +#include +#include + +#include +#include + +#define PERF0VECINT 0x04820580 +#define PERF1VECINT 0x04820588 +#define PERF0TRIG_OFFSET 16 +#define PERF1TRIG_OFFSET 17 + +/** + * struct metag_internal_irq_priv - private meta internal interrupt data + * @domain: IRQ domain for all internal Meta IRQs (HWSTATMETA) + * @unmasked: Record of unmasked IRQs + */ +struct metag_internal_irq_priv { + struct irq_domain *domain; + + unsigned long unmasked; +}; + +/* Private data for the one and only internal interrupt controller */ +static struct metag_internal_irq_priv metag_internal_irq_priv; + +static unsigned int metag_internal_irq_startup(struct irq_data *data); +static void metag_internal_irq_shutdown(struct irq_data *data); +static void metag_internal_irq_ack(struct irq_data *data); +static void metag_internal_irq_mask(struct irq_data *data); +static void metag_internal_irq_unmask(struct irq_data *data); +#ifdef CONFIG_SMP +static int metag_internal_irq_set_affinity(struct irq_data *data, + const struct cpumask *cpumask, bool force); +#endif + +static struct irq_chip internal_irq_edge_chip = { + .name = "HWSTATMETA-IRQ", + .irq_startup = metag_internal_irq_startup, + .irq_shutdown = metag_internal_irq_shutdown, + .irq_ack = metag_internal_irq_ack, + .irq_mask = metag_internal_irq_mask, + .irq_unmask = metag_internal_irq_unmask, +#ifdef CONFIG_SMP + .irq_set_affinity = metag_internal_irq_set_affinity, +#endif +}; + +/* + * metag_hwvec_addr - get the address of *VECINT regs of irq + * + * This function is a table of supported triggers on HWSTATMETA + * Could do with a structure, but better keep it simple. Changes + * in this code should be rare. + */ +static inline void __iomem *metag_hwvec_addr(irq_hw_number_t hw) +{ + void __iomem *addr; + + switch (hw) { + case PERF0TRIG_OFFSET: + addr = (void __iomem *)PERF0VECINT; + break; + case PERF1TRIG_OFFSET: + addr = (void __iomem *)PERF1VECINT; + break; + default: + addr = NULL; + break; + } + return addr; +} + +/* + * metag_internal_startup - setup an internal irq + * @irq: the irq to startup + * + * Multiplex interrupts for @irq onto TR1. Clear any pending + * interrupts. + */ +static unsigned int metag_internal_irq_startup(struct irq_data *data) +{ + /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */ + metag_internal_irq_ack(data); + + /* Enable the interrupt by unmasking it */ + metag_internal_irq_unmask(data); + + return 0; +} + +/* + * metag_internal_irq_shutdown - turn off the irq + * @irq: the irq number to turn off + * + * Mask @irq and clear any pending interrupts. + * Stop muxing @irq onto TR1. + */ +static void metag_internal_irq_shutdown(struct irq_data *data) +{ + /* Disable the IRQ at the core by masking it. */ + metag_internal_irq_mask(data); + + /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */ + metag_internal_irq_ack(data); +} + +/* + * metag_internal_irq_ack - acknowledge irq + * @irq: the irq to ack + */ +static void metag_internal_irq_ack(struct irq_data *data) +{ + irq_hw_number_t hw = data->hwirq; + unsigned int bit = 1 << hw; + + if (metag_in32(HWSTATMETA) & bit) + metag_out32(bit, HWSTATMETA); +} + +/** + * metag_internal_irq_mask() - mask an internal irq by unvectoring + * @data: data for the internal irq to mask + * + * HWSTATMETA has no mask register. Instead the IRQ is unvectored from the core + * and retriggered if necessary later. + */ +static void metag_internal_irq_mask(struct irq_data *data) +{ + struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; + irq_hw_number_t hw = data->hwirq; + void __iomem *vec_addr = metag_hwvec_addr(hw); + + clear_bit(hw, &priv->unmasked); + + /* there is no interrupt mask, so unvector the interrupt */ + metag_out32(0, vec_addr); +} + +/** + * meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring + * @data: data for the internal irq to unmask + * + * HWSTATMETA has no mask register. Instead the IRQ is revectored back to the + * core and retriggered if necessary. + */ +static void metag_internal_irq_unmask(struct irq_data *data) +{ + struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; + irq_hw_number_t hw = data->hwirq; + unsigned int bit = 1 << hw; + void __iomem *vec_addr = metag_hwvec_addr(hw); + unsigned int thread = hard_processor_id(); + + set_bit(hw, &priv->unmasked); + + /* there is no interrupt mask, so revector the interrupt */ + metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), vec_addr); + + /* + * Re-trigger interrupt + * + * Writing a 1 toggles, and a 0->1 transition triggers. We only + * retrigger if the status bit is already set, which means we + * need to clear it first. Retriggering is fundamentally racy + * because if the interrupt fires again after we clear it we + * could end up clearing it again and the interrupt handler + * thinking it hasn't fired. Therefore we need to keep trying to + * retrigger until the bit is set. + */ + if (metag_in32(HWSTATMETA) & bit) { + metag_out32(bit, HWSTATMETA); + while (!(metag_in32(HWSTATMETA) & bit)) + metag_out32(bit, HWSTATMETA); + } +} + +#ifdef CONFIG_SMP +/* + * metag_internal_irq_set_affinity - set the affinity for an interrupt + */ +static int metag_internal_irq_set_affinity(struct irq_data *data, + const struct cpumask *cpumask, bool force) +{ + unsigned int cpu, thread; + irq_hw_number_t hw = data->hwirq; + /* + * Wire up this interrupt from *VECINT to the Meta core. + * + * Note that we can't wire up *VECINT to interrupt more than + * one cpu (the interrupt code doesn't support it), so we just + * pick the first cpu we find in 'cpumask'. + */ + cpu = cpumask_any(cpumask); + thread = cpu_2_hwthread_id[cpu]; + + metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), + metag_hwvec_addr(hw)); + + return 0; +} +#endif + +/* + * metag_internal_irq_demux - irq de-multiplexer + * @irq: the interrupt number + * @desc: the interrupt description structure for this irq + * + * The cpu receives an interrupt on TR1 when an interrupt has + * occurred. It is this function's job to demux this irq and + * figure out exactly which trigger needs servicing. + */ +static void metag_internal_irq_demux(unsigned int irq, struct irq_desc *desc) +{ + struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc); + irq_hw_number_t hw; + unsigned int irq_no; + u32 status; + +recalculate: + status = metag_in32(HWSTATMETA) & priv->unmasked; + + for (hw = 0; status != 0; status >>= 1, ++hw) { + if (status & 0x1) { + /* + * Map the hardware IRQ number to a virtual Linux IRQ + * number. + */ + irq_no = irq_linear_revmap(priv->domain, hw); + + /* + * Only fire off interrupts that are + * registered to be handled by the kernel. + * Other interrupts are probably being + * handled by other Meta hardware threads. + */ + generic_handle_irq(irq_no); + + /* + * The handler may have re-enabled interrupts + * which could have caused a nested invocation + * of this code and make the copy of the + * status register we are using invalid. + */ + goto recalculate; + } + } +} + +/** + * internal_irq_map() - Map an internal meta IRQ to a virtual IRQ number. + * @hw: Number of the internal IRQ. Must be in range. + * + * Returns: The virtual IRQ number of the Meta internal IRQ specified by + * @hw. + */ +int internal_irq_map(unsigned int hw) +{ + struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; + if (!priv->domain) + return -ENODEV; + return irq_create_mapping(priv->domain, hw); +} + +/** + * metag_internal_irq_init_cpu - regsister with the Meta cpu + * @cpu: the CPU to register on + * + * Configure @cpu's TR1 irq so that we can demux irqs. + */ +static void metag_internal_irq_init_cpu(struct metag_internal_irq_priv *priv, + int cpu) +{ + unsigned int thread = cpu_2_hwthread_id[cpu]; + unsigned int signum = TBID_SIGNUM_TR1(thread); + int irq = tbisig_map(signum); + + /* Register the multiplexed IRQ handler */ + irq_set_handler_data(irq, priv); + irq_set_chained_handler(irq, metag_internal_irq_demux); + irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW); +} + +/** + * metag_internal_intc_map() - map an internal irq + * @d: irq domain of internal trigger block + * @irq: virtual irq number + * @hw: hardware irq number within internal trigger block + * + * This sets up a virtual irq for a specified hardware interrupt. The irq chip + * and handler is configured. + */ +static int metag_internal_intc_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + /* only register interrupt if it is mapped */ + if (!metag_hwvec_addr(hw)) + return -EINVAL; + + irq_set_chip_and_handler(irq, &internal_irq_edge_chip, + handle_edge_irq); + return 0; +} + +static const struct irq_domain_ops metag_internal_intc_domain_ops = { + .map = metag_internal_intc_map, +}; + +/** + * metag_internal_irq_register - register internal IRQs + * + * Register the irq chip and handler function for all internal IRQs + */ +int __init init_internal_IRQ(void) +{ + struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; + unsigned int cpu; + + /* Set up an IRQ domain */ + priv->domain = irq_domain_add_linear(NULL, 32, + &metag_internal_intc_domain_ops, + priv); + if (unlikely(!priv->domain)) { + pr_err("meta-internal-intc: cannot add IRQ domain\n"); + return -ENOMEM; + } + + /* Setup TR1 for all cpus. */ + for_each_possible_cpu(cpu) + metag_internal_irq_init_cpu(priv, cpu); + + return 0; +}; diff --git a/include/linux/irqchip/metag-ext.h b/include/linux/irqchip/metag-ext.h new file mode 100644 index 000000000000..697af0fe7c5a --- /dev/null +++ b/include/linux/irqchip/metag-ext.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2012 Imagination Technologies + */ + +#ifndef _LINUX_IRQCHIP_METAG_EXT_H_ +#define _LINUX_IRQCHIP_METAG_EXT_H_ + +struct irq_data; +struct platform_device; + +/* called from core irq code at init */ +int init_external_IRQ(void); + +/* + * called from SoC init_irq() callback to dynamically indicate the lack of + * HWMASKEXT registers. + */ +void meta_intc_no_mask(void); + +/* + * These allow SoCs to specialise the interrupt controller from their init_irq + * callbacks. + */ + +extern struct irq_chip meta_intc_edge_chip; +extern struct irq_chip meta_intc_level_chip; + +/* this should be called in the mask callback */ +void meta_intc_mask_irq_simple(struct irq_data *data); +/* this should be called in the unmask callback */ +void meta_intc_unmask_irq_simple(struct irq_data *data); + +#endif /* _LINUX_IRQCHIP_METAG_EXT_H_ */ diff --git a/include/linux/irqchip/metag.h b/include/linux/irqchip/metag.h new file mode 100644 index 000000000000..4ebdfb3101ab --- /dev/null +++ b/include/linux/irqchip/metag.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2011 Imagination Technologies + */ + +#ifndef _LINUX_IRQCHIP_METAG_H_ +#define _LINUX_IRQCHIP_METAG_H_ + +#include + +#ifdef CONFIG_METAG_PERFCOUNTER_IRQS +extern int init_internal_IRQ(void); +extern int internal_irq_map(unsigned int hw); +#else +static inline int init_internal_IRQ(void) +{ + return 0; +} +static inline int internal_irq_map(unsigned int hw) +{ + return -EINVAL; +} +#endif + +#endif /* _LINUX_IRQCHIP_METAG_H_ */ -- cgit v1.2.3 From 26025bbfbba33a9425be1b89eccb4664ea4c17b6 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 9 Oct 2012 10:54:51 +0100 Subject: metag: System Calls Add metag system call and gateway page interfaces. The metag architecture port uses the generic system call numbers from asm-generic/unistd.h, as well as a user gateway page mapped at 0x6ffff000 which contains fast atomic primitives (depending on SMP) and a fast method of accessing TLS data. System calls use the SWITCH instruction with the immediate 0x440001 to signal a system call. Signed-off-by: James Hogan --- arch/metag/include/asm/mman.h | 11 +++ arch/metag/include/asm/syscall.h | 104 ++++++++++++++++++++ arch/metag/include/asm/syscalls.h | 39 ++++++++ arch/metag/include/asm/unistd.h | 12 +++ arch/metag/include/asm/user_gateway.h | 44 +++++++++ arch/metag/include/uapi/asm/unistd.h | 21 ++++ arch/metag/kernel/sys_metag.c | 180 ++++++++++++++++++++++++++++++++++ arch/metag/kernel/user_gateway.S | 97 ++++++++++++++++++ 8 files changed, 508 insertions(+) create mode 100644 arch/metag/include/asm/mman.h create mode 100644 arch/metag/include/asm/syscall.h create mode 100644 arch/metag/include/asm/syscalls.h create mode 100644 arch/metag/include/asm/unistd.h create mode 100644 arch/metag/include/asm/user_gateway.h create mode 100644 arch/metag/include/uapi/asm/unistd.h create mode 100644 arch/metag/kernel/sys_metag.c create mode 100644 arch/metag/kernel/user_gateway.S (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/mman.h b/arch/metag/include/asm/mman.h new file mode 100644 index 000000000000..17999dba9275 --- /dev/null +++ b/arch/metag/include/asm/mman.h @@ -0,0 +1,11 @@ +#ifndef __METAG_MMAN_H__ +#define __METAG_MMAN_H__ + +#include + +#ifndef __ASSEMBLY__ +#define arch_mmap_check metag_mmap_check +int metag_mmap_check(unsigned long addr, unsigned long len, + unsigned long flags); +#endif +#endif /* __METAG_MMAN_H__ */ diff --git a/arch/metag/include/asm/syscall.h b/arch/metag/include/asm/syscall.h new file mode 100644 index 000000000000..24fc97939f77 --- /dev/null +++ b/arch/metag/include/asm/syscall.h @@ -0,0 +1,104 @@ +/* + * Access to user system call parameters and results + * + * Copyright (C) 2008 Imagination Technologies Ltd. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * See asm-generic/syscall.h for descriptions of what we must do here. + */ + +#ifndef _ASM_METAG_SYSCALL_H +#define _ASM_METAG_SYSCALL_H + +#include +#include +#include + +#include + +static inline long syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + unsigned long insn; + + /* + * FIXME there's no way to find out how we got here other than to + * examine the memory at the PC to see if it is a syscall + * SWITCH instruction. + */ + if (get_user(insn, (unsigned long *)(regs->ctx.CurrPC - 4))) + return -1; + + if (insn == __METAG_SW_ENCODING(SYS)) + return regs->ctx.DX[0].U1; + else + return -1L; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + /* do nothing */ +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + unsigned long error = regs->ctx.DX[0].U0; + return IS_ERR_VALUE(error) ? error : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->ctx.DX[0].U0; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + regs->ctx.DX[0].U0 = (long) error ?: val; +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + unsigned long *args) +{ + unsigned int reg, j; + BUG_ON(i + n > 6); + + for (j = i, reg = 6 - i; j < (i + n); j++, reg--) { + if (reg % 2) + args[j] = regs->ctx.DX[(reg + 1) / 2].U0; + else + args[j] = regs->ctx.DX[reg / 2].U1; + } +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + const unsigned long *args) +{ + unsigned int reg; + BUG_ON(i + n > 6); + + for (reg = 6 - i; i < (i + n); i++, reg--) { + if (reg % 2) + regs->ctx.DX[(reg + 1) / 2].U0 = args[i]; + else + regs->ctx.DX[reg / 2].U1 = args[i]; + } +} + +#define NR_syscalls __NR_syscalls + +/* generic syscall table */ +extern const void *sys_call_table[]; + +#endif /* _ASM_METAG_SYSCALL_H */ diff --git a/arch/metag/include/asm/syscalls.h b/arch/metag/include/asm/syscalls.h new file mode 100644 index 000000000000..a02b95556522 --- /dev/null +++ b/arch/metag/include/asm/syscalls.h @@ -0,0 +1,39 @@ +#ifndef _ASM_METAG_SYSCALLS_H +#define _ASM_METAG_SYSCALLS_H + +#include +#include +#include +#include + +/* kernel/signal.c */ +#define sys_rt_sigreturn sys_rt_sigreturn +asmlinkage long sys_rt_sigreturn(void); + +#include + +/* kernel/sys_metag.c */ +asmlinkage int sys_metag_setglobalbit(char __user *, int); +asmlinkage void sys_metag_set_fpu_flags(unsigned int); +asmlinkage int sys_metag_set_tls(void __user *); +asmlinkage void *sys_metag_get_tls(void); + +asmlinkage long sys_truncate64_metag(const char __user *, unsigned long, + unsigned long); +asmlinkage long sys_ftruncate64_metag(unsigned int, unsigned long, + unsigned long); +asmlinkage long sys_fadvise64_64_metag(int, unsigned long, unsigned long, + unsigned long, unsigned long, int); +asmlinkage long sys_readahead_metag(int, unsigned long, unsigned long, size_t); +asmlinkage ssize_t sys_pread64_metag(unsigned long, char __user *, size_t, + unsigned long, unsigned long); +asmlinkage ssize_t sys_pwrite64_metag(unsigned long, char __user *, size_t, + unsigned long, unsigned long); +asmlinkage long sys_sync_file_range_metag(int, unsigned long, unsigned long, + unsigned long, unsigned long, + unsigned int); + +int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, + int syscall); + +#endif /* _ASM_METAG_SYSCALLS_H */ diff --git a/arch/metag/include/asm/unistd.h b/arch/metag/include/asm/unistd.h new file mode 100644 index 000000000000..32955a18fb32 --- /dev/null +++ b/arch/metag/include/asm/unistd.h @@ -0,0 +1,12 @@ +/* + * Copyright (C) 2012 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +#define __ARCH_WANT_SYS_CLONE diff --git a/arch/metag/include/asm/user_gateway.h b/arch/metag/include/asm/user_gateway.h new file mode 100644 index 000000000000..e404c09e3b74 --- /dev/null +++ b/arch/metag/include/asm/user_gateway.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2010 Imagination Technologies + */ + +#ifndef __ASM_METAG_USER_GATEWAY_H +#define __ASM_METAG_USER_GATEWAY_H + +#include + +/* Page of kernel code accessible to userspace. */ +#define USER_GATEWAY_PAGE 0x6ffff000 +/* Offset of TLS pointer array in gateway page. */ +#define USER_GATEWAY_TLS 0x100 + +#ifndef __ASSEMBLY__ + +extern char __user_gateway_start; +extern char __user_gateway_end; + +/* Kernel mapping of the gateway page. */ +extern void *gateway_page; + +static inline void set_gateway_tls(void __user *tls_ptr) +{ + void **gateway_tls = (void **)(gateway_page + USER_GATEWAY_TLS + + hard_processor_id() * 4); + + *gateway_tls = (__force void *)tls_ptr; +#ifdef CONFIG_METAG_META12 + /* Avoid cache aliases on virtually tagged cache. */ + __builtin_dcache_flush((void *)USER_GATEWAY_PAGE + USER_GATEWAY_TLS + + hard_processor_id() * sizeof(void *)); +#endif +} + +extern int __kuser_get_tls(void); +extern char *__kuser_get_tls_end[]; + +extern int __kuser_cmpxchg(int, int, unsigned long *); +extern char *__kuser_cmpxchg_end[]; + +#endif + +#endif diff --git a/arch/metag/include/uapi/asm/unistd.h b/arch/metag/include/uapi/asm/unistd.h new file mode 100644 index 000000000000..b80b8e899d22 --- /dev/null +++ b/arch/metag/include/uapi/asm/unistd.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2012 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* Use the standard ABI for syscalls. */ +#include + +/* metag-specific syscalls. */ +#define __NR_metag_setglobalbit (__NR_arch_specific_syscall + 1) +__SYSCALL(__NR_metag_setglobalbit, sys_metag_setglobalbit) +#define __NR_metag_set_fpu_flags (__NR_arch_specific_syscall + 2) +__SYSCALL(__NR_metag_set_fpu_flags, sys_metag_set_fpu_flags) +#define __NR_metag_set_tls (__NR_arch_specific_syscall + 3) +__SYSCALL(__NR_metag_set_tls, sys_metag_set_tls) +#define __NR_metag_get_tls (__NR_arch_specific_syscall + 4) +__SYSCALL(__NR_metag_get_tls, sys_metag_get_tls) diff --git a/arch/metag/kernel/sys_metag.c b/arch/metag/kernel/sys_metag.c new file mode 100644 index 000000000000..efe833a452f7 --- /dev/null +++ b/arch/metag/kernel/sys_metag.c @@ -0,0 +1,180 @@ +/* + * This file contains various random system calls that + * have a non-standard calling sequence on the Linux/Meta + * platform. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define merge_64(hi, lo) ((((unsigned long long)(hi)) << 32) + \ + ((lo) & 0xffffffffUL)) + +int metag_mmap_check(unsigned long addr, unsigned long len, + unsigned long flags) +{ + /* We can't have people trying to write to the bottom of the + * memory map, there are mysterious unspecified things there that + * we don't want people trampling on. + */ + if ((flags & MAP_FIXED) && (addr < TASK_UNMAPPED_BASE)) + return -EINVAL; + + return 0; +} + +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) +{ + /* The shift for mmap2 is constant, regardless of PAGE_SIZE setting. */ + if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1)) + return -EINVAL; + + pgoff >>= PAGE_SHIFT - 12; + + return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); +} + +asmlinkage int sys_metag_setglobalbit(char __user *addr, int mask) +{ + char tmp; + int ret = 0; + unsigned int flags; + + if (!((__force unsigned int)addr >= LINCORE_BASE)) + return -EFAULT; + + __global_lock2(flags); + + metag_data_cache_flush((__force void *)addr, sizeof(mask)); + + ret = __get_user(tmp, addr); + if (ret) + goto out; + tmp |= mask; + ret = __put_user(tmp, addr); + + metag_data_cache_flush((__force void *)addr, sizeof(mask)); + +out: + __global_unlock2(flags); + + return ret; +} + +#define TXDEFR_FPU_MASK ((0x1f << 16) | 0x1f) + +asmlinkage void sys_metag_set_fpu_flags(unsigned int flags) +{ + unsigned int temp; + + flags &= TXDEFR_FPU_MASK; + + temp = __core_reg_get(TXDEFR); + temp &= ~TXDEFR_FPU_MASK; + temp |= flags; + __core_reg_set(TXDEFR, temp); +} + +asmlinkage int sys_metag_set_tls(void __user *ptr) +{ + current->thread.tls_ptr = ptr; + set_gateway_tls(ptr); + + return 0; +} + +asmlinkage void *sys_metag_get_tls(void) +{ + return (__force void *)current->thread.tls_ptr; +} + +asmlinkage long sys_truncate64_metag(const char __user *path, unsigned long lo, + unsigned long hi) +{ + return sys_truncate64(path, merge_64(hi, lo)); +} + +asmlinkage long sys_ftruncate64_metag(unsigned int fd, unsigned long lo, + unsigned long hi) +{ + return sys_ftruncate64(fd, merge_64(hi, lo)); +} + +asmlinkage long sys_fadvise64_64_metag(int fd, unsigned long offs_lo, + unsigned long offs_hi, + unsigned long len_lo, + unsigned long len_hi, int advice) +{ + return sys_fadvise64_64(fd, merge_64(offs_hi, offs_lo), + merge_64(len_hi, len_lo), advice); +} + +asmlinkage long sys_readahead_metag(int fd, unsigned long lo, unsigned long hi, + size_t count) +{ + return sys_readahead(fd, merge_64(hi, lo), count); +} + +asmlinkage ssize_t sys_pread64_metag(unsigned long fd, char __user *buf, + size_t count, unsigned long lo, + unsigned long hi) +{ + return sys_pread64(fd, buf, count, merge_64(hi, lo)); +} + +asmlinkage ssize_t sys_pwrite64_metag(unsigned long fd, char __user *buf, + size_t count, unsigned long lo, + unsigned long hi) +{ + return sys_pwrite64(fd, buf, count, merge_64(hi, lo)); +} + +asmlinkage long sys_sync_file_range_metag(int fd, unsigned long offs_lo, + unsigned long offs_hi, + unsigned long len_lo, + unsigned long len_hi, + unsigned int flags) +{ + return sys_sync_file_range(fd, merge_64(offs_hi, offs_lo), + merge_64(len_hi, len_lo), flags); +} + +/* Provide the actual syscall number to call mapping. */ +#undef __SYSCALL +#define __SYSCALL(nr, call) [nr] = (call), + +/* + * We need wrappers for anything with unaligned 64bit arguments + */ +#define sys_truncate64 sys_truncate64_metag +#define sys_ftruncate64 sys_ftruncate64_metag +#define sys_fadvise64_64 sys_fadvise64_64_metag +#define sys_readahead sys_readahead_metag +#define sys_pread64 sys_pread64_metag +#define sys_pwrite64 sys_pwrite64_metag +#define sys_sync_file_range sys_sync_file_range_metag + +/* + * Note that we can't include here since the header + * guard will defeat us; checks for __SYSCALL as well. + */ +const void *sys_call_table[__NR_syscalls] = { + [0 ... __NR_syscalls-1] = sys_ni_syscall, +#include +}; diff --git a/arch/metag/kernel/user_gateway.S b/arch/metag/kernel/user_gateway.S new file mode 100644 index 000000000000..7167f3e8db6b --- /dev/null +++ b/arch/metag/kernel/user_gateway.S @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2010 Imagination Technologies Ltd. + * + * This file contains code that can be accessed from userspace and can + * access certain kernel data structures without the overhead of a system + * call. + */ + +#include +#include + +/* + * User helpers. + * + * These are segment of kernel provided user code reachable from user space + * at a fixed address in kernel memory. This is used to provide user space + * with some operations which require kernel help because of unimplemented + * native feature and/or instructions in some Meta CPUs. The idea is for + * this code to be executed directly in user mode for best efficiency but + * which is too intimate with the kernel counter part to be left to user + * libraries. The kernel reserves the right to change this code as needed + * without warning. Only the entry points and their results are guaranteed + * to be stable. + * + * Each segment is 64-byte aligned. This mechanism should be used only for + * for things that are really small and justified, and not be abused freely. + */ + .text + .global ___user_gateway_start +___user_gateway_start: + + /* get_tls + * Offset: 0 + * Description: Get the TLS pointer for this process. + */ + .global ___kuser_get_tls + .type ___kuser_get_tls,function +___kuser_get_tls: + MOVT D1Ar1,#HI(USER_GATEWAY_PAGE + USER_GATEWAY_TLS) + ADD D1Ar1,D1Ar1,#LO(USER_GATEWAY_PAGE + USER_GATEWAY_TLS) + MOV D1Ar3,TXENABLE + AND D1Ar3,D1Ar3,#(TXENABLE_THREAD_BITS) + LSR D1Ar3,D1Ar3,#(TXENABLE_THREAD_S - 2) + GETD D0Re0,[D1Ar1+D1Ar3] +___kuser_get_tls_end: /* Beyond this point the read will complete */ + MOV PC,D1RtP + .size ___kuser_get_tls,.-___kuser_get_tls + .global ___kuser_get_tls_end + + /* cmpxchg + * Offset: 64 + * Description: Replace the value at 'ptr' with 'newval' if the current + * value is 'oldval'. Return zero if we succeeded, + * non-zero otherwise. + * + * Reference prototype: + * + * int __kuser_cmpxchg(int oldval, int newval, unsigned long *ptr) + * + */ + .balign 64 + .global ___kuser_cmpxchg + .type ___kuser_cmpxchg,function +___kuser_cmpxchg: +#ifdef CONFIG_SMP + /* + * We must use LNKGET/LNKSET with an SMP kernel because the other method + * does not provide atomicity across multiple CPUs. + */ +0: LNKGETD D0Re0,[D1Ar3] + CMP D0Re0,D1Ar1 + LNKSETDZ [D1Ar3],D0Ar2 + BNZ 1f + DEFR D0Re0,TXSTAT + ANDT D0Re0,D0Re0,#HI(0x3f000000) + CMPT D0Re0,#HI(0x02000000) + BNE 0b +#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE + DCACHE [D1Ar3], D0Re0 +#endif +1: MOV D0Re0,#1 + XORZ D0Re0,D0Re0,D0Re0 + MOV PC,D1RtP +#else + GETD D0Re0,[D1Ar3] + CMP D0Re0,D1Ar1 + SETDZ [D1Ar3],D0Ar2 +___kuser_cmpxchg_end: /* Beyond this point the write will complete */ + MOV D0Re0,#1 + XORZ D0Re0,D0Re0,D0Re0 + MOV PC,D1RtP +#endif /* CONFIG_SMP */ + .size ___kuser_cmpxchg,.-___kuser_cmpxchg + .global ___kuser_cmpxchg_end + + .global ___user_gateway_end +___user_gateway_end: -- cgit v1.2.3 From 44dea393cf98a09b4b9f00dc3dd7e2c211f4b0e8 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 9 Oct 2012 10:54:54 +0100 Subject: metag: Scheduling/Process management Signed-off-by: James Hogan --- arch/metag/include/asm/thread_info.h | 155 ++++++++++++ arch/metag/kernel/process.c | 461 +++++++++++++++++++++++++++++++++++ 2 files changed, 616 insertions(+) create mode 100644 arch/metag/include/asm/thread_info.h create mode 100644 arch/metag/kernel/process.c (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h new file mode 100644 index 000000000000..0ecd34d8b5f6 --- /dev/null +++ b/arch/metag/include/asm/thread_info.h @@ -0,0 +1,155 @@ +/* thread_info.h: Meta low-level thread information + * + * Copyright (C) 2002 David Howells (dhowells@redhat.com) + * - Incorporating suggestions made by Linus Torvalds and Dave Miller + * + * Meta port by Imagination Technologies + */ + +#ifndef _ASM_THREAD_INFO_H +#define _ASM_THREAD_INFO_H + +#include +#include + +#ifndef __ASSEMBLY__ +#include +#endif + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly constants must + * also be changed + */ +#ifndef __ASSEMBLY__ + +/* This must be 8 byte aligned so we can ensure stack alignment. */ +struct thread_info { + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + unsigned long flags; /* low level flags */ + unsigned long status; /* thread-synchronous flags */ + u32 cpu; /* current CPU */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ + + mm_segment_t addr_limit; /* thread address space */ + struct restart_block restart_block; + + u8 supervisor_stack[0]; +}; + +#else /* !__ASSEMBLY__ */ + +#include + +#endif + +#define PREEMPT_ACTIVE 0x10000000 + +#ifdef CONFIG_4KSTACKS +#define THREAD_SHIFT 12 +#else +#define THREAD_SHIFT 13 +#endif + +#if THREAD_SHIFT >= PAGE_SHIFT +#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) +#else +#define THREAD_SIZE_ORDER 0 +#endif + +#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) + +#define STACK_WARN (THREAD_SIZE/8) +/* + * macros/functions for gaining access to the thread information structure + */ +#ifndef __ASSEMBLY__ + +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = 0, \ + .cpu = 0, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +/* how to get the current stack pointer from C */ +register unsigned long current_stack_pointer asm("A0StP") __used; + +/* how to get the thread information struct from C */ +static inline struct thread_info *current_thread_info(void) +{ + return (struct thread_info *)(current_stack_pointer & + ~(THREAD_SIZE - 1)); +} + +#define __HAVE_ARCH_KSTACK_END +static inline int kstack_end(void *addr) +{ + return addr == (void *) (((unsigned long) addr & ~(THREAD_SIZE - 1)) + + sizeof(struct thread_info)); +} + +#endif + +/* + * thread information flags + * - these are process state flags that various assembly files may need to + * access + * - pending work-to-be-done flags are in LSW + * - other flags in MSW + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_SINGLESTEP 3 /* restore singlestep on return to user + mode */ +#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ +#define TIF_SECCOMP 5 /* secure computing */ +#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ +#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ +#define TIF_POLLING_NRFLAG 8 /* true if poll_idle() is polling + TIF_NEED_RESCHED */ +#define TIF_MEMDIE 9 /* is terminating due to OOM killer */ +#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint instrumentation */ + + +#define _TIF_SYSCALL_TRACE (1< +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Wait for the next interrupt and enable local interrupts + */ +static inline void arch_idle(void) +{ + int tmp; + + /* + * Quickly jump straight into the interrupt entry point without actually + * triggering an interrupt. When TXSTATI gets read the processor will + * block until an interrupt is triggered. + */ + asm volatile (/* Switch into ISTAT mode */ + "RTH\n\t" + /* Enable local interrupts */ + "MOV TXMASKI, %1\n\t" + /* + * We can't directly "SWAP PC, PCX", so we swap via a + * temporary. Essentially we do: + * PCX_new = 1f (the place to continue execution) + * PC = PCX_old + */ + "ADD %0, CPC0, #(1f-.)\n\t" + "SWAP PCX, %0\n\t" + "MOV PC, %0\n" + /* Continue execution here with interrupts enabled */ + "1:" + : "=a" (tmp) + : "r" (get_trigger_mask())); +} + +void cpu_idle(void) +{ + set_thread_flag(TIF_POLLING_NRFLAG); + + while (1) { + tick_nohz_idle_enter(); + rcu_idle_enter(); + + while (!need_resched()) { + /* + * We need to disable interrupts here to ensure we don't + * miss a wakeup call. + */ + local_irq_disable(); + if (!need_resched()) { +#ifdef CONFIG_HOTPLUG_CPU + if (cpu_is_offline(smp_processor_id())) + cpu_die(); +#endif + arch_idle(); + } else { + local_irq_enable(); + } + } + + rcu_idle_exit(); + tick_nohz_idle_exit(); + schedule_preempt_disabled(); + } +} + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +void (*soc_restart)(char *cmd); +void (*soc_halt)(void); + +void machine_restart(char *cmd) +{ + if (soc_restart) + soc_restart(cmd); + hard_processor_halt(HALT_OK); +} + +void machine_halt(void) +{ + if (soc_halt) + soc_halt(); + smp_send_stop(); + hard_processor_halt(HALT_OK); +} + +void machine_power_off(void) +{ + if (pm_power_off) + pm_power_off(); + smp_send_stop(); + hard_processor_halt(HALT_OK); +} + +#define FLAG_Z 0x8 +#define FLAG_N 0x4 +#define FLAG_O 0x2 +#define FLAG_C 0x1 + +void show_regs(struct pt_regs *regs) +{ + int i; + const char *AX0_names[] = {"A0StP", "A0FrP"}; + const char *AX1_names[] = {"A1GbP", "A1LbP"}; + + const char *DX0_names[] = { + "D0Re0", + "D0Ar6", + "D0Ar4", + "D0Ar2", + "D0FrT", + "D0.5 ", + "D0.6 ", + "D0.7 " + }; + + const char *DX1_names[] = { + "D1Re0", + "D1Ar5", + "D1Ar3", + "D1Ar1", + "D1RtP", + "D1.5 ", + "D1.6 ", + "D1.7 " + }; + + pr_info(" pt_regs @ %p\n", regs); + pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask); + pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags, + regs->ctx.Flags & FLAG_Z ? 'Z' : 'z', + regs->ctx.Flags & FLAG_N ? 'N' : 'n', + regs->ctx.Flags & FLAG_O ? 'O' : 'o', + regs->ctx.Flags & FLAG_C ? 'C' : 'c'); + pr_info(" TXRPT = 0x%08x\n", regs->ctx.CurrRPT); + pr_info(" PC = 0x%08x\n", regs->ctx.CurrPC); + + /* AX regs */ + for (i = 0; i < 2; i++) { + pr_info(" %s = 0x%08x ", + AX0_names[i], + regs->ctx.AX[i].U0); + printk(" %s = 0x%08x\n", + AX1_names[i], + regs->ctx.AX[i].U1); + } + + if (regs->ctx.SaveMask & TBICTX_XEXT_BIT) + pr_warn(" Extended state present - AX2.[01] will be WRONG\n"); + + /* Special place with AXx.2 */ + pr_info(" A0.2 = 0x%08x ", + regs->ctx.Ext.AX2.U0); + printk(" A1.2 = 0x%08x\n", + regs->ctx.Ext.AX2.U1); + + /* 'extended' AX regs (nominally, just AXx.3) */ + for (i = 0; i < (TBICTX_AX_REGS - 3); i++) { + pr_info(" A0.%d = 0x%08x ", i + 3, regs->ctx.AX3[i].U0); + printk(" A1.%d = 0x%08x\n", i + 3, regs->ctx.AX3[i].U1); + } + + for (i = 0; i < 8; i++) { + pr_info(" %s = 0x%08x ", DX0_names[i], regs->ctx.DX[i].U0); + printk(" %s = 0x%08x\n", DX1_names[i], regs->ctx.DX[i].U1); + } + + show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs); +} + +int copy_thread(unsigned long clone_flags, unsigned long usp, + unsigned long arg, struct task_struct *tsk) +{ + struct pt_regs *childregs = task_pt_regs(tsk); + void *kernel_context = ((void *) childregs + + sizeof(struct pt_regs)); + unsigned long global_base; + + BUG_ON(((unsigned long)childregs) & 0x7); + BUG_ON(((unsigned long)kernel_context) & 0x7); + + memset(&tsk->thread.kernel_context, 0, + sizeof(tsk->thread.kernel_context)); + + tsk->thread.kernel_context = __TBISwitchInit(kernel_context, + ret_from_fork, + 0, 0); + + if (unlikely(tsk->flags & PF_KTHREAD)) { + /* + * Make sure we don't leak any kernel data to child's regs + * if kernel thread becomes a userspace thread in the future + */ + memset(childregs, 0 , sizeof(struct pt_regs)); + + global_base = __core_reg_get(A1GbP); + childregs->ctx.AX[0].U1 = (unsigned long) global_base; + childregs->ctx.AX[0].U0 = (unsigned long) kernel_context; + /* Set D1Ar1=arg and D1RtP=usp (fn) */ + childregs->ctx.DX[4].U1 = usp; + childregs->ctx.DX[3].U1 = arg; + tsk->thread.int_depth = 2; + return 0; + } + /* + * Get a pointer to where the new child's register block should have + * been pushed. + * The Meta's stack grows upwards, and the context is the the first + * thing to be pushed by TBX (phew) + */ + *childregs = *current_pt_regs(); + /* Set the correct stack for the clone mode */ + if (usp) + childregs->ctx.AX[0].U0 = ALIGN(usp, 8); + tsk->thread.int_depth = 1; + + /* set return value for child process */ + childregs->ctx.DX[0].U0 = 0; + + /* The TLS pointer is passed as an argument to sys_clone. */ + if (clone_flags & CLONE_SETTLS) + tsk->thread.tls_ptr = + (__force void __user *)childregs->ctx.DX[1].U1; + +#ifdef CONFIG_METAG_FPU + if (tsk->thread.fpu_context) { + struct meta_fpu_context *ctx; + + ctx = kmemdup(tsk->thread.fpu_context, + sizeof(struct meta_fpu_context), GFP_ATOMIC); + tsk->thread.fpu_context = ctx; + } +#endif + +#ifdef CONFIG_METAG_DSP + if (tsk->thread.dsp_context) { + struct meta_ext_context *ctx; + int i; + + ctx = kmemdup(tsk->thread.dsp_context, + sizeof(struct meta_ext_context), GFP_ATOMIC); + for (i = 0; i < 2; i++) + ctx->ram[i] = kmemdup(ctx->ram[i], ctx->ram_sz[i], + GFP_ATOMIC); + tsk->thread.dsp_context = ctx; + } +#endif + + return 0; +} + +#ifdef CONFIG_METAG_FPU +static void alloc_fpu_context(struct thread_struct *thread) +{ + thread->fpu_context = kzalloc(sizeof(struct meta_fpu_context), + GFP_ATOMIC); +} + +static void clear_fpu(struct thread_struct *thread) +{ + thread->user_flags &= ~TBICTX_FPAC_BIT; + kfree(thread->fpu_context); + thread->fpu_context = NULL; +} +#else +static void clear_fpu(struct thread_struct *thread) +{ +} +#endif + +#ifdef CONFIG_METAG_DSP +static void clear_dsp(struct thread_struct *thread) +{ + if (thread->dsp_context) { + kfree(thread->dsp_context->ram[0]); + kfree(thread->dsp_context->ram[1]); + + kfree(thread->dsp_context); + + thread->dsp_context = NULL; + } + + __core_reg_set(D0.8, 0); +} +#else +static void clear_dsp(struct thread_struct *thread) +{ +} +#endif + +struct task_struct *__sched __switch_to(struct task_struct *prev, + struct task_struct *next) +{ + TBIRES to, from; + + to.Switch.pCtx = next->thread.kernel_context; + to.Switch.pPara = prev; + +#ifdef CONFIG_METAG_FPU + if (prev->thread.user_flags & TBICTX_FPAC_BIT) { + struct pt_regs *regs = task_pt_regs(prev); + TBIRES state; + + state.Sig.SaveMask = prev->thread.user_flags; + state.Sig.pCtx = ®s->ctx; + + if (!prev->thread.fpu_context) + alloc_fpu_context(&prev->thread); + if (prev->thread.fpu_context) + __TBICtxFPUSave(state, prev->thread.fpu_context); + } + /* + * Force a restore of the FPU context next time this process is + * scheduled. + */ + if (prev->thread.fpu_context) + prev->thread.fpu_context->needs_restore = true; +#endif + + + from = __TBISwitch(to, &prev->thread.kernel_context); + + /* Restore TLS pointer for this process. */ + set_gateway_tls(current->thread.tls_ptr); + + return (struct task_struct *) from.Switch.pPara; +} + +void flush_thread(void) +{ + clear_fpu(¤t->thread); + clear_dsp(¤t->thread); +} + +/* + * Free current thread data structures etc. + */ +void exit_thread(void) +{ + clear_fpu(¤t->thread); + clear_dsp(¤t->thread); +} + +/* TODO: figure out how to unwind the kernel stack here to figure out + * where we went to sleep. */ +unsigned long get_wchan(struct task_struct *p) +{ + return 0; +} + +int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) +{ + /* Returning 0 indicates that the FPU state was not stored (as it was + * not in use) */ + return 0; +} + +#ifdef CONFIG_METAG_USER_TCM + +#define ELF_MIN_ALIGN PAGE_SIZE + +#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1)) +#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) +#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) + +#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) + +unsigned long __metag_elf_map(struct file *filep, unsigned long addr, + struct elf_phdr *eppnt, int prot, int type, + unsigned long total_size) +{ + unsigned long map_addr, size; + unsigned long page_off = ELF_PAGEOFFSET(eppnt->p_vaddr); + unsigned long raw_size = eppnt->p_filesz + page_off; + unsigned long off = eppnt->p_offset - page_off; + unsigned int tcm_tag; + addr = ELF_PAGESTART(addr); + size = ELF_PAGEALIGN(raw_size); + + /* mmap() will return -EINVAL if given a zero size, but a + * segment with zero filesize is perfectly valid */ + if (!size) + return addr; + + tcm_tag = tcm_lookup_tag(addr); + + if (tcm_tag != TCM_INVALID_TAG) + type &= ~MAP_FIXED; + + /* + * total_size is the size of the ELF (interpreter) image. + * The _first_ mmap needs to know the full size, otherwise + * randomization might put this image into an overlapping + * position with the ELF binary image. (since size < total_size) + * So we first map the 'big' image - and unmap the remainder at + * the end. (which unmap is needed for ELF images with holes.) + */ + if (total_size) { + total_size = ELF_PAGEALIGN(total_size); + map_addr = vm_mmap(filep, addr, total_size, prot, type, off); + if (!BAD_ADDR(map_addr)) + vm_munmap(map_addr+size, total_size-size); + } else + map_addr = vm_mmap(filep, addr, size, prot, type, off); + + if (!BAD_ADDR(map_addr) && tcm_tag != TCM_INVALID_TAG) { + struct tcm_allocation *tcm; + unsigned long tcm_addr; + + tcm = kmalloc(sizeof(*tcm), GFP_KERNEL); + if (!tcm) + return -ENOMEM; + + tcm_addr = tcm_alloc(tcm_tag, raw_size); + if (tcm_addr != addr) { + kfree(tcm); + return -ENOMEM; + } + + tcm->tag = tcm_tag; + tcm->addr = tcm_addr; + tcm->size = raw_size; + + list_add(&tcm->list, ¤t->mm->context.tcm); + + eppnt->p_vaddr = map_addr; + if (copy_from_user((void *) addr, (void __user *) map_addr, + raw_size)) + return -EFAULT; + } + + return map_addr; +} +#endif -- cgit v1.2.3 From 9b802d1f43978869fcd98e92b854fd8785cefee7 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 9 Oct 2012 10:55:00 +0100 Subject: metag: Module support Signed-off-by: James Hogan --- arch/metag/include/asm/module.h | 37 ++++++ arch/metag/kernel/module.c | 284 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 321 insertions(+) create mode 100644 arch/metag/include/asm/module.h create mode 100644 arch/metag/kernel/module.c (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/module.h b/arch/metag/include/asm/module.h new file mode 100644 index 000000000000..e47e60941b2b --- /dev/null +++ b/arch/metag/include/asm/module.h @@ -0,0 +1,37 @@ +#ifndef _ASM_METAG_MODULE_H +#define _ASM_METAG_MODULE_H + +#include + +struct metag_plt_entry { + /* Indirect jump instruction sequence. */ + unsigned long tramp[2]; +}; + +struct mod_arch_specific { + /* Indices of PLT sections within module. */ + unsigned int core_plt_section, init_plt_section; +}; + +#if defined CONFIG_METAG_META12 +#define MODULE_PROC_FAMILY "META 1.2 " +#elif defined CONFIG_METAG_META21 +#define MODULE_PROC_FAMILY "META 2.1 " +#else +#define MODULE_PROC_FAMILY "" +#endif + +#ifdef CONFIG_4KSTACKS +#define MODULE_STACKSIZE "4KSTACKS " +#else +#define MODULE_STACKSIZE "" +#endif + +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE + +#ifdef MODULE +asm(".section .plt,\"ax\",@progbits; .balign 8; .previous"); +asm(".section .init.plt,\"ax\",@progbits; .balign 8; .previous"); +#endif + +#endif /* _ASM_METAG_MODULE_H */ diff --git a/arch/metag/kernel/module.c b/arch/metag/kernel/module.c new file mode 100644 index 000000000000..986331cd0a52 --- /dev/null +++ b/arch/metag/kernel/module.c @@ -0,0 +1,284 @@ +/* Kernel module help for Meta. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. +*/ +#include +#include +#include +#include +#include +#include +#include + +#include + +/* Count how many different relocations (different symbol, different + addend) */ +static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) +{ + unsigned int i, r_info, r_addend, _count_relocs; + + _count_relocs = 0; + r_info = 0; + r_addend = 0; + for (i = 0; i < num; i++) + /* Only count relbranch relocs, others don't need stubs */ + if (ELF32_R_TYPE(rela[i].r_info) == R_METAG_RELBRANCH && + (r_info != ELF32_R_SYM(rela[i].r_info) || + r_addend != rela[i].r_addend)) { + _count_relocs++; + r_info = ELF32_R_SYM(rela[i].r_info); + r_addend = rela[i].r_addend; + } + + return _count_relocs; +} + +static int relacmp(const void *_x, const void *_y) +{ + const Elf32_Rela *x, *y; + + y = (Elf32_Rela *)_x; + x = (Elf32_Rela *)_y; + + /* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to + * make the comparison cheaper/faster. It won't affect the sorting or + * the counting algorithms' performance + */ + if (x->r_info < y->r_info) + return -1; + else if (x->r_info > y->r_info) + return 1; + else if (x->r_addend < y->r_addend) + return -1; + else if (x->r_addend > y->r_addend) + return 1; + else + return 0; +} + +static void relaswap(void *_x, void *_y, int size) +{ + uint32_t *x, *y, tmp; + int i; + + y = (uint32_t *)_x; + x = (uint32_t *)_y; + + for (i = 0; i < sizeof(Elf32_Rela) / sizeof(uint32_t); i++) { + tmp = x[i]; + x[i] = y[i]; + y[i] = tmp; + } +} + +/* Get the potential trampolines size required of the init and + non-init sections */ +static unsigned long get_plt_size(const Elf32_Ehdr *hdr, + const Elf32_Shdr *sechdrs, + const char *secstrings, + int is_init) +{ + unsigned long ret = 0; + unsigned i; + + /* Everything marked ALLOC (this includes the exported + symbols) */ + for (i = 1; i < hdr->e_shnum; i++) { + /* If it's called *.init*, and we're not init, we're + not interested */ + if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != NULL) + != is_init) + continue; + + /* We don't want to look at debug sections. */ + if (strstr(secstrings + sechdrs[i].sh_name, ".debug") != NULL) + continue; + + if (sechdrs[i].sh_type == SHT_RELA) { + pr_debug("Found relocations in section %u\n", i); + pr_debug("Ptr: %p. Number: %u\n", + (void *)hdr + sechdrs[i].sh_offset, + sechdrs[i].sh_size / sizeof(Elf32_Rela)); + + /* Sort the relocation information based on a symbol and + * addend key. This is a stable O(n*log n) complexity + * alogrithm but it will reduce the complexity of + * count_relocs() to linear complexity O(n) + */ + sort((void *)hdr + sechdrs[i].sh_offset, + sechdrs[i].sh_size / sizeof(Elf32_Rela), + sizeof(Elf32_Rela), relacmp, relaswap); + + ret += count_relocs((void *)hdr + + sechdrs[i].sh_offset, + sechdrs[i].sh_size + / sizeof(Elf32_Rela)) + * sizeof(struct metag_plt_entry); + } + } + + return ret; +} + +int module_frob_arch_sections(Elf32_Ehdr *hdr, + Elf32_Shdr *sechdrs, + char *secstrings, + struct module *me) +{ + unsigned int i; + + /* Find .plt and .init.plt sections */ + for (i = 0; i < hdr->e_shnum; i++) { + if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0) + me->arch.init_plt_section = i; + else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0) + me->arch.core_plt_section = i; + } + if (!me->arch.core_plt_section || !me->arch.init_plt_section) { + pr_err("Module doesn't contain .plt or .init.plt sections.\n"); + return -ENOEXEC; + } + + /* Override their sizes */ + sechdrs[me->arch.core_plt_section].sh_size + = get_plt_size(hdr, sechdrs, secstrings, 0); + sechdrs[me->arch.core_plt_section].sh_type = SHT_NOBITS; + sechdrs[me->arch.init_plt_section].sh_size + = get_plt_size(hdr, sechdrs, secstrings, 1); + sechdrs[me->arch.init_plt_section].sh_type = SHT_NOBITS; + return 0; +} + +/* Set up a trampoline in the PLT to bounce us to the distant function */ +static uint32_t do_plt_call(void *location, Elf32_Addr val, + Elf32_Shdr *sechdrs, struct module *mod) +{ + struct metag_plt_entry *entry; + /* Instructions used to do the indirect jump. */ + uint32_t tramp[2]; + + /* We have to trash a register, so we assume that any control + transfer more than 21-bits away must be a function call + (so we can use a call-clobbered register). */ + + /* MOVT D0Re0,#HI(v) */ + tramp[0] = 0x02000005 | (((val & 0xffff0000) >> 16) << 3); + /* JUMP D0Re0,#LO(v) */ + tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3); + + /* Init, or core PLT? */ + if (location >= mod->module_core + && location < mod->module_core + mod->core_size) + entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; + else + entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; + + /* Find this entry, or if that fails, the next avail. entry */ + while (entry->tramp[0]) + if (entry->tramp[0] == tramp[0] && entry->tramp[1] == tramp[1]) + return (uint32_t)entry; + else + entry++; + + entry->tramp[0] = tramp[0]; + entry->tramp[1] = tramp[1]; + + return (uint32_t)entry; +} + +int apply_relocate_add(Elf32_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + unsigned int i; + Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr; + Elf32_Sym *sym; + Elf32_Addr relocation; + uint32_t *location; + int32_t value; + + pr_debug("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + /* This is the symbol it is referring to. Note that all + undefined symbols have been resolved. */ + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + + ELF32_R_SYM(rel[i].r_info); + relocation = sym->st_value + rel[i].r_addend; + + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_METAG_NONE: + break; + case R_METAG_HIADDR16: + relocation >>= 16; + case R_METAG_LOADDR16: + *location = (*location & 0xfff80007) | + ((relocation & 0xffff) << 3); + break; + case R_METAG_ADDR32: + /* + * Packed data structures may cause a misaligned + * R_METAG_ADDR32 to be emitted. + */ + put_unaligned(relocation, location); + break; + case R_METAG_GETSETOFF: + *location += ((relocation & 0xfff) << 7); + break; + case R_METAG_RELBRANCH: + if (*location & (0x7ffff << 5)) { + pr_err("bad relbranch relocation\n"); + break; + } + + /* This jump is too big for the offset slot. Build + * a PLT to jump through to get to where we want to go. + * NB: 21bit check - not scaled to 19bit yet + */ + if (((int32_t)(relocation - + (uint32_t)location) > 0xfffff) || + ((int32_t)(relocation - + (uint32_t)location) < -0xfffff)) { + relocation = do_plt_call(location, relocation, + sechdrs, me); + } + + value = relocation - (uint32_t)location; + + /* branch instruction aligned */ + value /= 4; + + if ((value > 0x7ffff) || (value < -0x7ffff)) { + /* + * this should have been caught by the code + * above! + */ + pr_err("overflow of relbranch reloc\n"); + } + + *location = (*location & (~(0x7ffff << 5))) | + ((value & 0x7ffff) << 5); + break; + + default: + pr_err("module %s: Unknown relocation: %u\n", + me->name, ELF32_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + } + return 0; +} -- cgit v1.2.3 From 42682c6c42a5765b2c7cccfca170368fef6191ef Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 5 Oct 2012 16:56:56 +0100 Subject: metag: SMP support Add SMP support for metag. This allows Linux to take control of multiple hardware threads on a single Meta core, treating them as separate Linux CPUs. Signed-off-by: James Hogan --- arch/metag/include/asm/cachepart.h | 42 +++ arch/metag/include/asm/core_reg.h | 35 +++ arch/metag/include/asm/smp.h | 29 ++ arch/metag/include/asm/topology.h | 53 ++++ arch/metag/kernel/cachepart.c | 124 ++++++++ arch/metag/kernel/core_reg.c | 117 ++++++++ arch/metag/kernel/head.S | 12 + arch/metag/kernel/smp.c | 575 +++++++++++++++++++++++++++++++++++++ arch/metag/kernel/topology.c | 77 +++++ 9 files changed, 1064 insertions(+) create mode 100644 arch/metag/include/asm/cachepart.h create mode 100644 arch/metag/include/asm/core_reg.h create mode 100644 arch/metag/include/asm/smp.h create mode 100644 arch/metag/include/asm/topology.h create mode 100644 arch/metag/kernel/cachepart.c create mode 100644 arch/metag/kernel/core_reg.c create mode 100644 arch/metag/kernel/smp.c create mode 100644 arch/metag/kernel/topology.c (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/cachepart.h b/arch/metag/include/asm/cachepart.h new file mode 100644 index 000000000000..cf6b44e916b5 --- /dev/null +++ b/arch/metag/include/asm/cachepart.h @@ -0,0 +1,42 @@ +/* + * Meta cache partition manipulation. + * + * Copyright 2010 Imagination Technologies Ltd. + */ + +#ifndef _METAG_CACHEPART_H_ +#define _METAG_CACHEPART_H_ + +/** + * get_dcache_size() - Get size of data cache. + */ +unsigned int get_dcache_size(void); + +/** + * get_icache_size() - Get size of code cache. + */ +unsigned int get_icache_size(void); + +/** + * get_global_dcache_size() - Get the thread's global dcache. + * + * Returns the size of the current thread's global dcache partition. + */ +unsigned int get_global_dcache_size(void); + +/** + * get_global_icache_size() - Get the thread's global icache. + * + * Returns the size of the current thread's global icache partition. + */ +unsigned int get_global_icache_size(void); + +/** + * check_for_dache_aliasing() - Ensure that the bootloader has configured the + * dache and icache properly to avoid aliasing + * @thread_id: Hardware thread ID + * + */ +void check_for_cache_aliasing(int thread_id); + +#endif diff --git a/arch/metag/include/asm/core_reg.h b/arch/metag/include/asm/core_reg.h new file mode 100644 index 000000000000..bdbc3a51f31c --- /dev/null +++ b/arch/metag/include/asm/core_reg.h @@ -0,0 +1,35 @@ +#ifndef __ASM_METAG_CORE_REG_H_ +#define __ASM_METAG_CORE_REG_H_ + +#include + +extern void core_reg_write(int unit, int reg, int thread, unsigned int val); +extern unsigned int core_reg_read(int unit, int reg, int thread); + +/* + * These macros allow direct access from C to any register known to the + * assembler. Example candidates are TXTACTCYC, TXIDLECYC, and TXPRIVEXT. + */ + +#define __core_reg_get(reg) ({ \ + unsigned int __grvalue; \ + asm volatile("MOV %0," #reg \ + : "=r" (__grvalue)); \ + __grvalue; \ +}) + +#define __core_reg_set(reg, value) do { \ + unsigned int __srvalue = (value); \ + asm volatile("MOV " #reg ",%0" \ + : \ + : "r" (__srvalue)); \ +} while (0) + +#define __core_reg_swap(reg, value) do { \ + unsigned int __srvalue = (value); \ + asm volatile("SWAP " #reg ",%0" \ + : "+r" (__srvalue)); \ + (value) = __srvalue; \ +} while (0) + +#endif diff --git a/arch/metag/include/asm/smp.h b/arch/metag/include/asm/smp.h new file mode 100644 index 000000000000..e0373f81a117 --- /dev/null +++ b/arch/metag/include/asm/smp.h @@ -0,0 +1,29 @@ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +#include + +#define raw_smp_processor_id() (current_thread_info()->cpu) + +enum ipi_msg_type { + IPI_CALL_FUNC, + IPI_CALL_FUNC_SINGLE, + IPI_RESCHEDULE, +}; + +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask + +asmlinkage void secondary_start_kernel(void); + +extern void secondary_startup(void); + +#ifdef CONFIG_HOTPLUG_CPU +extern void __cpu_die(unsigned int cpu); +extern int __cpu_disable(void); +extern void cpu_die(void); +#endif + +extern void smp_init_cpus(void); +#endif /* __ASM_SMP_H */ diff --git a/arch/metag/include/asm/topology.h b/arch/metag/include/asm/topology.h new file mode 100644 index 000000000000..23f5118f58db --- /dev/null +++ b/arch/metag/include/asm/topology.h @@ -0,0 +1,53 @@ +#ifndef _ASM_METAG_TOPOLOGY_H +#define _ASM_METAG_TOPOLOGY_H + +#ifdef CONFIG_NUMA + +/* sched_domains SD_NODE_INIT for Meta machines */ +#define SD_NODE_INIT (struct sched_domain) { \ + .parent = NULL, \ + .child = NULL, \ + .groups = NULL, \ + .min_interval = 8, \ + .max_interval = 32, \ + .busy_factor = 32, \ + .imbalance_pct = 125, \ + .cache_nice_tries = 2, \ + .busy_idx = 3, \ + .idle_idx = 2, \ + .newidle_idx = 0, \ + .wake_idx = 0, \ + .forkexec_idx = 0, \ + .flags = SD_LOAD_BALANCE \ + | SD_BALANCE_FORK \ + | SD_BALANCE_EXEC \ + | SD_BALANCE_NEWIDLE \ + | SD_SERIALIZE, \ + .last_balance = jiffies, \ + .balance_interval = 1, \ + .nr_balance_failed = 0, \ +} + +#define cpu_to_node(cpu) ((void)(cpu), 0) +#define parent_node(node) ((void)(node), 0) + +#define cpumask_of_node(node) ((void)node, cpu_online_mask) + +#define pcibus_to_node(bus) ((void)(bus), -1) +#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ + cpu_all_mask : \ + cpumask_of_node(pcibus_to_node(bus))) + +#endif + +#define mc_capable() (1) + +const struct cpumask *cpu_coregroup_mask(unsigned int cpu); + +extern cpumask_t cpu_core_map[NR_CPUS]; + +#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) + +#include + +#endif /* _ASM_METAG_TOPOLOGY_H */ diff --git a/arch/metag/kernel/cachepart.c b/arch/metag/kernel/cachepart.c new file mode 100644 index 000000000000..3a589dfb966b --- /dev/null +++ b/arch/metag/kernel/cachepart.c @@ -0,0 +1,124 @@ +/* + * Meta cache partition manipulation. + * + * Copyright 2010 Imagination Technologies Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n)) +#define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n)) + +#define CACHE_ASSOCIATIVITY 4 /* 4 way set-assosiative */ +#define ICACHE 0 +#define DCACHE 1 + +/* The CORE_CONFIG2 register is not available on Meta 1 */ +#ifdef CONFIG_METAG_META21 +unsigned int get_dcache_size(void) +{ + unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); + return 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS) + >> METAC_CORECFG2_DCSZ_S); +} + +unsigned int get_icache_size(void) +{ + unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); + return 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS) + >> METAC_CORE_C2ICSZ_S); +} + +unsigned int get_global_dcache_size(void) +{ + unsigned int cpart = metag_in32(SYSC_DCPART(hard_processor_id())); + unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS; + return (get_dcache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4; +} + +unsigned int get_global_icache_size(void) +{ + unsigned int cpart = metag_in32(SYSC_ICPART(hard_processor_id())); + unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS; + return (get_icache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4; +} + +static unsigned int get_thread_cache_size(unsigned int cache, int thread_id) +{ + unsigned int cache_size; + unsigned int t_cache_part; + unsigned int isEnabled; + unsigned int offset = 0; + isEnabled = (cache == DCACHE ? metag_in32(MMCU_DCACHE_CTRL_ADDR) & 0x1 : + metag_in32(MMCU_ICACHE_CTRL_ADDR) & 0x1); + if (!isEnabled) + return 0; +#if PAGE_OFFSET >= LINGLOBAL_BASE + /* Checking for global cache */ + cache_size = (cache == DCACHE ? get_global_dache_size() : + get_global_icache_size()); + offset = 8; +#else + cache_size = (cache == DCACHE ? get_dcache_size() : + get_icache_size()); +#endif + t_cache_part = (cache == DCACHE ? + (metag_in32(SYSC_DCPART(thread_id)) >> offset) & 0xF : + (metag_in32(SYSC_ICPART(thread_id)) >> offset) & 0xF); + switch (t_cache_part) { + case 0xF: + return cache_size; + case 0x7: + return cache_size / 2; + case 0x3: + return cache_size / 4; + case 0x1: + return cache_size / 8; + case 0: + return cache_size / 16; + } + return -1; +} + +void check_for_cache_aliasing(int thread_id) +{ + unsigned int thread_cache_size; + unsigned int cache_type; + for (cache_type = ICACHE; cache_type <= DCACHE; cache_type++) { + thread_cache_size = + get_thread_cache_size(cache_type, thread_id); + if (thread_cache_size < 0) + pr_emerg("Can't read %s cache size", \ + cache_type ? "DCACHE" : "ICACHE"); + else if (thread_cache_size == 0) + /* Cache is off. No need to check for aliasing */ + continue; + if (thread_cache_size / CACHE_ASSOCIATIVITY > PAGE_SIZE) { + pr_emerg("Cache aliasing detected in %s on Thread %d", + cache_type ? "DCACHE" : "ICACHE", thread_id); + pr_warn("Total %s size: %u bytes", + cache_type ? "DCACHE" : "ICACHE ", + cache_type ? get_dcache_size() + : get_icache_size()); + pr_warn("Thread %s size: %d bytes", + cache_type ? "CACHE" : "ICACHE", + thread_cache_size); + pr_warn("Page Size: %lu bytes", PAGE_SIZE); + } + } +} + +#else + +void check_for_cache_aliasing(int thread_id) +{ + return; +} + +#endif diff --git a/arch/metag/kernel/core_reg.c b/arch/metag/kernel/core_reg.c new file mode 100644 index 000000000000..671cce8c34f2 --- /dev/null +++ b/arch/metag/kernel/core_reg.c @@ -0,0 +1,117 @@ +/* + * Support for reading and writing Meta core internal registers. + * + * Copyright (C) 2011 Imagination Technologies Ltd. + * + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#define UNIT_BIT_MASK TXUXXRXRQ_UXX_BITS +#define REG_BIT_MASK TXUXXRXRQ_RX_BITS +#define THREAD_BIT_MASK TXUXXRXRQ_TX_BITS + +#define UNIT_SHIFTS TXUXXRXRQ_UXX_S +#define REG_SHIFTS TXUXXRXRQ_RX_S +#define THREAD_SHIFTS TXUXXRXRQ_TX_S + +#define UNIT_VAL(x) (((x) << UNIT_SHIFTS) & UNIT_BIT_MASK) +#define REG_VAL(x) (((x) << REG_SHIFTS) & REG_BIT_MASK) +#define THREAD_VAL(x) (((x) << THREAD_SHIFTS) & THREAD_BIT_MASK) + +/* + * core_reg_write() - modify the content of a register in a core unit. + * @unit: The unit to be modified. + * @reg: Register number within the unit. + * @thread: The thread we want to access. + * @val: The new value to write. + * + * Check asm/metag_regs.h for a list/defines of supported units (ie: TXUPC_ID, + * TXUTR_ID, etc), and regnums within the units (ie: TXMASKI_REGNUM, + * TXPOLLI_REGNUM, etc). + */ +void core_reg_write(int unit, int reg, int thread, unsigned int val) +{ + unsigned long flags; + + /* TXUCT_ID has its own memory mapped registers */ + if (unit == TXUCT_ID) { + void __iomem *cu_reg = __CU_addr(thread, reg); + metag_out32(val, cu_reg); + return; + } + + __global_lock2(flags); + + /* wait for ready */ + while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT)) + udelay(10); + + /* set the value to write */ + metag_out32(val, TXUXXRXDT); + + /* set the register to write */ + val = UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread); + metag_out32(val, TXUXXRXRQ); + + /* wait for finish */ + while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT)) + udelay(10); + + __global_unlock2(flags); +} +EXPORT_SYMBOL(core_reg_write); + +/* + * core_reg_read() - read the content of a register in a core unit. + * @unit: The unit to be modified. + * @reg: Register number within the unit. + * @thread: The thread we want to access. + * + * Check asm/metag_regs.h for a list/defines of supported units (ie: TXUPC_ID, + * TXUTR_ID, etc), and regnums within the units (ie: TXMASKI_REGNUM, + * TXPOLLI_REGNUM, etc). + */ +unsigned int core_reg_read(int unit, int reg, int thread) +{ + unsigned long flags; + unsigned int val; + + /* TXUCT_ID has its own memory mapped registers */ + if (unit == TXUCT_ID) { + void __iomem *cu_reg = __CU_addr(thread, reg); + val = metag_in32(cu_reg); + return val; + } + + __global_lock2(flags); + + /* wait for ready */ + while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT)) + udelay(10); + + /* set the register to read */ + val = (UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread) | + TXUXXRXRQ_RDnWR_BIT); + metag_out32(val, TXUXXRXRQ); + + /* wait for finish */ + while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT)) + udelay(10); + + /* read the register value */ + val = metag_in32(TXUXXRXDT); + + __global_unlock2(flags); + + return val; +} +EXPORT_SYMBOL(core_reg_read); diff --git a/arch/metag/kernel/head.S b/arch/metag/kernel/head.S index 8b1388663892..969dffabc03a 100644 --- a/arch/metag/kernel/head.S +++ b/arch/metag/kernel/head.S @@ -43,3 +43,15 @@ __start: __exit: XOR TXENABLE,D0Re0,D0Re0 .size __exit,.-__exit + +#ifdef CONFIG_SMP + .global _secondary_startup + .type _secondary_startup,function +_secondary_startup: + MOVT A0StP,#HI(_secondary_data_stack) + ADD A0StP,A0StP,#LO(_secondary_data_stack) + GETD A0StP,[A0StP] + ADD A0StP,A0StP,#THREAD_INFO_SIZE + B _secondary_start_kernel + .size _secondary_startup,.-_secondary_startup +#endif diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c new file mode 100644 index 000000000000..d1163127eb68 --- /dev/null +++ b/arch/metag/kernel/smp.c @@ -0,0 +1,575 @@ +/* + * Copyright (C) 2009,2010,2011 Imagination Technologies Ltd. + * + * Copyright (C) 2002 ARM Limited, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +DECLARE_PER_CPU(PTBI, pTBI); + +void *secondary_data_stack; + +/* + * structures for inter-processor calls + * - A collection of single bit ipi messages. + */ +struct ipi_data { + spinlock_t lock; + unsigned long ipi_count; + unsigned long bits; +}; + +static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { + .lock = __SPIN_LOCK_UNLOCKED(ipi_data.lock), +}; + +static DEFINE_SPINLOCK(boot_lock); + +/* + * "thread" is assumed to be a valid Meta hardware thread ID. + */ +int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle) +{ + u32 val; + + /* + * set synchronisation state between this boot processor + * and the secondary one + */ + spin_lock(&boot_lock); + + core_reg_write(TXUPC_ID, 0, thread, (unsigned int)secondary_startup); + core_reg_write(TXUPC_ID, 1, thread, 0); + + /* + * Give the thread privilege (PSTAT) and clear potentially problematic + * bits in the process (namely ISTAT, CBMarker, CBMarkerI, LSM_STEP). + */ + core_reg_write(TXUCT_ID, TXSTATUS_REGNUM, thread, TXSTATUS_PSTAT_BIT); + + /* Clear the minim enable bit. */ + val = core_reg_read(TXUCT_ID, TXPRIVEXT_REGNUM, thread); + core_reg_write(TXUCT_ID, TXPRIVEXT_REGNUM, thread, val & ~0x80); + + /* + * set the ThreadEnable bit (0x1) in the TXENABLE register + * for the specified thread - off it goes! + */ + val = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, thread); + core_reg_write(TXUCT_ID, TXENABLE_REGNUM, thread, val | 0x1); + + /* + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ + spin_unlock(&boot_lock); + + return 0; +} + +int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) +{ + unsigned int thread = cpu_2_hwthread_id[cpu]; + int ret; + + load_pgd(swapper_pg_dir, thread); + + flush_tlb_all(); + + /* + * Tell the secondary CPU where to find its idle thread's stack. + */ + secondary_data_stack = task_stack_page(idle); + + wmb(); + + /* + * Now bring the CPU into our world. + */ + ret = boot_secondary(thread, idle); + if (ret == 0) { + unsigned long timeout; + + /* + * CPU was successfully started, wait for it + * to come online or time out. + */ + timeout = jiffies + HZ; + while (time_before(jiffies, timeout)) { + if (cpu_online(cpu)) + break; + + udelay(10); + barrier(); + } + + if (!cpu_online(cpu)) + ret = -EIO; + } + + secondary_data_stack = NULL; + + if (ret) { + pr_crit("CPU%u: processor failed to boot\n", cpu); + + /* + * FIXME: We need to clean up the new idle thread. --rmk + */ + } + + return ret; +} + +#ifdef CONFIG_HOTPLUG_CPU +static DECLARE_COMPLETION(cpu_killed); + +/* + * __cpu_disable runs on the processor to be shutdown. + */ +int __cpuexit __cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + struct task_struct *p; + + /* + * Take this CPU offline. Once we clear this, we can't return, + * and we must not schedule until we're ready to give up the cpu. + */ + set_cpu_online(cpu, false); + + /* + * OK - migrate IRQs away from this CPU + */ + migrate_irqs(); + + /* + * Flush user cache and TLB mappings, and then remove this CPU + * from the vm mask set of all processes. + */ + flush_cache_all(); + local_flush_tlb_all(); + + read_lock(&tasklist_lock); + for_each_process(p) { + if (p->mm) + cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); + } + read_unlock(&tasklist_lock); + + return 0; +} + +/* + * called on the thread which is asking for a CPU to be shutdown - + * waits until shutdown has completed, or it is timed out. + */ +void __cpuexit __cpu_die(unsigned int cpu) +{ + if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1))) + pr_err("CPU%u: unable to kill\n", cpu); +} + +/* + * Called from the idle thread for the CPU which has been shutdown. + * + * Note that we do not return from this function. If this cpu is + * brought online again it will need to run secondary_startup(). + */ +void __cpuexit cpu_die(void) +{ + local_irq_disable(); + idle_task_exit(); + + complete(&cpu_killed); + + asm ("XOR TXENABLE, D0Re0,D0Re0\n"); +} +#endif /* CONFIG_HOTPLUG_CPU */ + +/* + * Called by both boot and secondaries to move global data into + * per-processor storage. + */ +void __cpuinit smp_store_cpu_info(unsigned int cpuid) +{ + struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid); + + cpu_info->loops_per_jiffy = loops_per_jiffy; +} + +/* + * This is the secondary CPU boot entry. We're using this CPUs + * idle thread stack and the global page tables. + */ +asmlinkage void secondary_start_kernel(void) +{ + struct mm_struct *mm = &init_mm; + unsigned int cpu = smp_processor_id(); + + /* + * All kernel threads share the same mm context; grab a + * reference and switch to it. + */ + atomic_inc(&mm->mm_users); + atomic_inc(&mm->mm_count); + current->active_mm = mm; + cpumask_set_cpu(cpu, mm_cpumask(mm)); + enter_lazy_tlb(mm, current); + local_flush_tlb_all(); + + /* + * TODO: Some day it might be useful for each Linux CPU to + * have its own TBI structure. That would allow each Linux CPU + * to run different interrupt handlers for the same IRQ + * number. + * + * For now, simply copying the pointer to the boot CPU's TBI + * structure is sufficient because we always want to run the + * same interrupt handler whatever CPU takes the interrupt. + */ + per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT); + + if (!per_cpu(pTBI, cpu)) + panic("No TBI found!"); + + per_cpu_trap_init(cpu); + + preempt_disable(); + + setup_txprivext(); + + /* + * Enable local interrupts. + */ + tbi_startup_interrupt(TBID_SIGNUM_TRT); + notify_cpu_starting(cpu); + local_irq_enable(); + + pr_info("CPU%u (thread %u): Booted secondary processor\n", + cpu, cpu_2_hwthread_id[cpu]); + + calibrate_delay(); + smp_store_cpu_info(cpu); + + /* + * OK, now it's safe to let the boot CPU continue + */ + set_cpu_online(cpu, true); + + /* + * Check for cache aliasing. + * Preemption is disabled + */ + check_for_cache_aliasing(cpu); + + /* + * OK, it's off to the idle thread for us + */ + cpu_idle(); +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ + int cpu; + unsigned long bogosum = 0; + + for_each_online_cpu(cpu) + bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; + + pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n", + num_online_cpus(), + bogosum / (500000/HZ), + (bogosum / (5000/HZ)) % 100); +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + unsigned int cpu = smp_processor_id(); + + init_new_context(current, &init_mm); + current_thread_info()->cpu = cpu; + + smp_store_cpu_info(cpu); + init_cpu_present(cpu_possible_mask); +} + +void __init smp_prepare_boot_cpu(void) +{ + unsigned int cpu = smp_processor_id(); + + per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT); + + if (!per_cpu(pTBI, cpu)) + panic("No TBI found!"); +} + +static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg); + +static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) +{ + unsigned long flags; + unsigned int cpu; + cpumask_t map; + + cpumask_clear(&map); + local_irq_save(flags); + + for_each_cpu(cpu, mask) { + struct ipi_data *ipi = &per_cpu(ipi_data, cpu); + + spin_lock(&ipi->lock); + + /* + * KICK interrupts are queued in hardware so we'll get + * multiple interrupts if we call smp_cross_call() + * multiple times for one msg. The problem is that we + * only have one bit for each message - we can't queue + * them in software. + * + * The first time through ipi_handler() we'll clear + * the msg bit, having done all the work. But when we + * return we'll get _another_ interrupt (and another, + * and another until we've handled all the queued + * KICKs). Running ipi_handler() when there's no work + * to do is bad because that's how kick handler + * chaining detects who the KICK was intended for. + * See arch/metag/kernel/kick.c for more details. + * + * So only add 'cpu' to 'map' if we haven't already + * queued a KICK interrupt for 'msg'. + */ + if (!(ipi->bits & (1 << msg))) { + ipi->bits |= 1 << msg; + cpumask_set_cpu(cpu, &map); + } + + spin_unlock(&ipi->lock); + } + + /* + * Call the platform specific cross-CPU call function. + */ + smp_cross_call(map, msg); + + local_irq_restore(flags); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + send_ipi_message(mask, IPI_CALL_FUNC); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); +} + +void show_ipi_list(struct seq_file *p) +{ + unsigned int cpu; + + seq_puts(p, "IPI:"); + + for_each_present_cpu(cpu) + seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); + + seq_putc(p, '\n'); +} + +static DEFINE_SPINLOCK(stop_lock); + +/* + * Main handler for inter-processor interrupts + * + * For Meta, the ipimask now only identifies a single + * category of IPI (Bit 1 IPIs have been replaced by a + * different mechanism): + * + * Bit 0 - Inter-processor function call + */ +static int do_IPI(struct pt_regs *regs) +{ + unsigned int cpu = smp_processor_id(); + struct ipi_data *ipi = &per_cpu(ipi_data, cpu); + struct pt_regs *old_regs = set_irq_regs(regs); + unsigned long msgs, nextmsg; + int handled = 0; + + ipi->ipi_count++; + + spin_lock(&ipi->lock); + msgs = ipi->bits; + nextmsg = msgs & -msgs; + ipi->bits &= ~nextmsg; + spin_unlock(&ipi->lock); + + if (nextmsg) { + handled = 1; + + nextmsg = ffz(~nextmsg); + switch (nextmsg) { + case IPI_RESCHEDULE: + scheduler_ipi(); + break; + + case IPI_CALL_FUNC: + generic_smp_call_function_interrupt(); + break; + + case IPI_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); + break; + + default: + pr_crit("CPU%u: Unknown IPI message 0x%lx\n", + cpu, nextmsg); + break; + } + } + + set_irq_regs(old_regs); + + return handled; +} + +void smp_send_reschedule(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); +} + +static void stop_this_cpu(void *data) +{ + unsigned int cpu = smp_processor_id(); + + if (system_state == SYSTEM_BOOTING || + system_state == SYSTEM_RUNNING) { + spin_lock(&stop_lock); + pr_crit("CPU%u: stopping\n", cpu); + dump_stack(); + spin_unlock(&stop_lock); + } + + set_cpu_online(cpu, false); + + local_irq_disable(); + + hard_processor_halt(HALT_OK); +} + +void smp_send_stop(void) +{ + smp_call_function(stop_this_cpu, NULL, 0); +} + +/* + * not supported here + */ +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + +/* + * We use KICKs for inter-processor interrupts. + * + * For every CPU in "callmap" the IPI data must already have been + * stored in that CPU's "ipi_data" member prior to calling this + * function. + */ +static void kick_raise_softirq(cpumask_t callmap, unsigned int irq) +{ + int cpu; + + for_each_cpu(cpu, &callmap) { + unsigned int thread; + + thread = cpu_2_hwthread_id[cpu]; + + BUG_ON(thread == BAD_HWTHREAD_ID); + + metag_out32(1, T0KICKI + (thread * TnXKICK_STRIDE)); + } +} + +static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers, + int Inst, PTBI pTBI, int *handled) +{ + *handled = do_IPI((struct pt_regs *)State.Sig.pCtx); + + return State; +} + +static struct kick_irq_handler ipi_irq = { + .func = ipi_handler, +}; + +static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg) +{ + kick_raise_softirq(callmap, 1); +} + +static inline unsigned int get_core_count(void) +{ + int i; + unsigned int ret = 0; + + for (i = 0; i < CONFIG_NR_CPUS; i++) { + if (core_reg_read(TXUCT_ID, TXENABLE_REGNUM, i)) + ret++; + } + + return ret; +} + +/* + * Initialise the CPU possible map early - this describes the CPUs + * which may be present or become present in the system. + */ +void __init smp_init_cpus(void) +{ + unsigned int i, ncores = get_core_count(); + + /* If no hwthread_map early param was set use default mapping */ + for (i = 0; i < NR_CPUS; i++) + if (cpu_2_hwthread_id[i] == BAD_HWTHREAD_ID) { + cpu_2_hwthread_id[i] = i; + hwthread_id_2_cpu[i] = i; + } + + for (i = 0; i < ncores; i++) + set_cpu_possible(i, true); + + kick_register_func(&ipi_irq); +} diff --git a/arch/metag/kernel/topology.c b/arch/metag/kernel/topology.c new file mode 100644 index 000000000000..bec3dec4922e --- /dev/null +++ b/arch/metag/kernel/topology.c @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2007 Paul Mundt + * Copyright (C) 2010 Imagination Technolohies Ltd. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include + +#include + +DEFINE_PER_CPU(struct cpuinfo_metag, cpu_data); + +cpumask_t cpu_core_map[NR_CPUS]; + +static cpumask_t cpu_coregroup_map(unsigned int cpu) +{ + return *cpu_possible_mask; +} + +const struct cpumask *cpu_coregroup_mask(unsigned int cpu) +{ + return &cpu_core_map[cpu]; +} + +int arch_update_cpu_topology(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) + cpu_core_map[cpu] = cpu_coregroup_map(cpu); + + return 0; +} + +static int __init topology_init(void) +{ + int i, ret; + +#ifdef CONFIG_NEED_MULTIPLE_NODES + for_each_online_node(i) + register_one_node(i); +#endif + + for_each_present_cpu(i) { + struct cpuinfo_metag *cpuinfo = &per_cpu(cpu_data, i); +#ifdef CONFIG_HOTPLUG_CPU + cpuinfo->cpu.hotpluggable = 1; +#endif + ret = register_cpu(&cpuinfo->cpu, i); + if (unlikely(ret)) + pr_warn("%s: register_cpu %d failed (%d)\n", + __func__, i, ret); + } + +#if defined(CONFIG_NUMA) && !defined(CONFIG_SMP) + /* + * In the UP case, make sure the CPU association is still + * registered under each node. Without this, sysfs fails + * to make the connection between nodes other than node0 + * and cpu0. + */ + for_each_online_node(i) + if (i != numa_node_id()) + register_cpu_under_node(raw_smp_processor_id(), i); +#endif + + return 0; +} +subsys_initcall(topology_init); -- cgit v1.2.3 From f507758ccbed5c354cc1ce3b8f53ea072d7bc222 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 5 Oct 2012 16:27:03 +0100 Subject: metag: DMA Add DMA mapping code. Signed-off-by: James Hogan --- arch/metag/include/asm/dma-mapping.h | 183 +++++++++++++ arch/metag/kernel/dma.c | 507 +++++++++++++++++++++++++++++++++++ 2 files changed, 690 insertions(+) create mode 100644 arch/metag/include/asm/dma-mapping.h create mode 100644 arch/metag/kernel/dma.c (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h new file mode 100644 index 000000000000..b5f80a62fe8b --- /dev/null +++ b/arch/metag/include/asm/dma-mapping.h @@ -0,0 +1,183 @@ +#ifndef _ASM_METAG_DMA_MAPPING_H +#define _ASM_METAG_DMA_MAPPING_H + +#include + +#include +#include +#include +#include + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) + +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); + +void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + +void dma_sync_for_device(void *vaddr, size_t size, int dma_direction); +void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction); + +int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size); + +int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size); + +static inline dma_addr_t +dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(!valid_dma_direction(direction)); + WARN_ON(size == 0); + dma_sync_for_device(ptr, size, direction); + return virt_to_phys(ptr); +} + +static inline void +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(!valid_dma_direction(direction)); + dma_sync_for_cpu(phys_to_virt(dma_addr), size, direction); +} + +static inline int +dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, + enum dma_data_direction direction) +{ + struct scatterlist *sg; + int i; + + BUG_ON(!valid_dma_direction(direction)); + WARN_ON(nents == 0 || sglist[0].length == 0); + + for_each_sg(sglist, sg, nents, i) { + BUG_ON(!sg_page(sg)); + + sg->dma_address = sg_phys(sg); + dma_sync_for_device(sg_virt(sg), sg->length, direction); + } + + return nents; +} + +static inline dma_addr_t +dma_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction) +{ + BUG_ON(!valid_dma_direction(direction)); + dma_sync_for_device((void *)(page_to_phys(page) + offset), size, + direction); + return page_to_phys(page) + offset; +} + +static inline void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(!valid_dma_direction(direction)); + dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); +} + + +static inline void +dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries, + enum dma_data_direction direction) +{ + struct scatterlist *sg; + int i; + + BUG_ON(!valid_dma_direction(direction)); + WARN_ON(nhwentries == 0 || sglist[0].length == 0); + + for_each_sg(sglist, sg, nhwentries, i) { + BUG_ON(!sg_page(sg)); + + sg->dma_address = sg_phys(sg); + dma_sync_for_cpu(sg_virt(sg), sg->length, direction); + } +} + +static inline void +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); +} + +static inline void +dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction direction) +{ + dma_sync_for_device(phys_to_virt(dma_handle), size, direction); +} + +static inline void +dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + dma_sync_for_cpu(phys_to_virt(dma_handle)+offset, size, + direction); +} + +static inline void +dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + dma_sync_for_device(phys_to_virt(dma_handle)+offset, size, + direction); +} + +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + int i; + for (i = 0; i < nelems; i++, sg++) + dma_sync_for_cpu(sg_virt(sg), sg->length, direction); +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + int i; + for (i = 0; i < nelems; i++, sg++) + dma_sync_for_device(sg_virt(sg), sg->length, direction); +} + +static inline int +dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return 0; +} + +#define dma_supported(dev, mask) (1) + +static inline int +dma_set_mask(struct device *dev, u64 mask) +{ + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + + *dev->dma_mask = mask; + + return 0; +} + +/* + * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to + * do any flushing here. + */ +static inline void +dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction) +{ +} + +#endif diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c new file mode 100644 index 000000000000..8c00dedadc54 --- /dev/null +++ b/arch/metag/kernel/dma.c @@ -0,0 +1,507 @@ +/* + * Meta version derived from arch/powerpc/lib/dma-noncoherent.c + * Copyright (C) 2008 Imagination Technologies Ltd. + * + * PowerPC version derived from arch/arm/mm/consistent.c + * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) + * + * Copyright (C) 2000 Russell King + * + * Consistent memory allocators. Used for DMA devices that want to + * share uncached memory with the processor core. The function return + * is the virtual address and 'dma_handle' is the physical address. + * Mostly stolen from the ARM port, with some changes for PowerPC. + * -- Dan + * + * Reorganized to get rid of the arch-specific consistent_* functions + * and provide non-coherent implementations for the DMA API. -Matt + * + * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() + * implementation. This is pulled straight from ARM and barely + * modified. -Matt + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_START) \ + >> PAGE_SHIFT) + +static u64 get_coherent_dma_mask(struct device *dev) +{ + u64 mask = ~0ULL; + + if (dev) { + mask = dev->coherent_dma_mask; + + /* + * Sanity check the DMA mask - it must be non-zero, and + * must be able to be satisfied by a DMA allocation. + */ + if (mask == 0) { + dev_warn(dev, "coherent DMA mask is unset\n"); + return 0; + } + } + + return mask; +} +/* + * This is the page table (2MB) covering uncached, DMA consistent allocations + */ +static pte_t *consistent_pte; +static DEFINE_SPINLOCK(consistent_lock); + +/* + * VM region handling support. + * + * This should become something generic, handling VM region allocations for + * vmalloc and similar (ioremap, module space, etc). + * + * I envisage vmalloc()'s supporting vm_struct becoming: + * + * struct vm_struct { + * struct metag_vm_region region; + * unsigned long flags; + * struct page **pages; + * unsigned int nr_pages; + * unsigned long phys_addr; + * }; + * + * get_vm_area() would then call metag_vm_region_alloc with an appropriate + * struct metag_vm_region head (eg): + * + * struct metag_vm_region vmalloc_head = { + * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), + * .vm_start = VMALLOC_START, + * .vm_end = VMALLOC_END, + * }; + * + * However, vmalloc_head.vm_start is variable (typically, it is dependent on + * the amount of RAM found at boot time.) I would imagine that get_vm_area() + * would have to initialise this each time prior to calling + * metag_vm_region_alloc(). + */ +struct metag_vm_region { + struct list_head vm_list; + unsigned long vm_start; + unsigned long vm_end; + struct page *vm_pages; + int vm_active; +}; + +static struct metag_vm_region consistent_head = { + .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), + .vm_start = CONSISTENT_START, + .vm_end = CONSISTENT_END, +}; + +static struct metag_vm_region *metag_vm_region_alloc(struct metag_vm_region + *head, size_t size, + gfp_t gfp) +{ + unsigned long addr = head->vm_start, end = head->vm_end - size; + unsigned long flags; + struct metag_vm_region *c, *new; + + new = kmalloc(sizeof(struct metag_vm_region), gfp); + if (!new) + goto out; + + spin_lock_irqsave(&consistent_lock, flags); + + list_for_each_entry(c, &head->vm_list, vm_list) { + if ((addr + size) < addr) + goto nospc; + if ((addr + size) <= c->vm_start) + goto found; + addr = c->vm_end; + if (addr > end) + goto nospc; + } + +found: + /* + * Insert this entry _before_ the one we found. + */ + list_add_tail(&new->vm_list, &c->vm_list); + new->vm_start = addr; + new->vm_end = addr + size; + new->vm_active = 1; + + spin_unlock_irqrestore(&consistent_lock, flags); + return new; + +nospc: + spin_unlock_irqrestore(&consistent_lock, flags); + kfree(new); +out: + return NULL; +} + +static struct metag_vm_region *metag_vm_region_find(struct metag_vm_region + *head, unsigned long addr) +{ + struct metag_vm_region *c; + + list_for_each_entry(c, &head->vm_list, vm_list) { + if (c->vm_active && c->vm_start == addr) + goto out; + } + c = NULL; +out: + return c; +} + +/* + * Allocate DMA-coherent memory space and return both the kernel remapped + * virtual and bus address for that space. + */ +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp) +{ + struct page *page; + struct metag_vm_region *c; + unsigned long order; + u64 mask = get_coherent_dma_mask(dev); + u64 limit; + + if (!consistent_pte) { + pr_err("%s: not initialised\n", __func__); + dump_stack(); + return NULL; + } + + if (!mask) + goto no_page; + size = PAGE_ALIGN(size); + limit = (mask + 1) & ~mask; + if ((limit && size >= limit) + || size >= (CONSISTENT_END - CONSISTENT_START)) { + pr_warn("coherent allocation too big (requested %#x mask %#Lx)\n", + size, mask); + return NULL; + } + + order = get_order(size); + + if (mask != 0xffffffff) + gfp |= GFP_DMA; + + page = alloc_pages(gfp, order); + if (!page) + goto no_page; + + /* + * Invalidate any data that might be lurking in the + * kernel direct-mapped region for device DMA. + */ + { + void *kaddr = page_address(page); + memset(kaddr, 0, size); + flush_dcache_region(kaddr, size); + } + + /* + * Allocate a virtual address in the consistent mapping region. + */ + c = metag_vm_region_alloc(&consistent_head, size, + gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); + if (c) { + unsigned long vaddr = c->vm_start; + pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr); + struct page *end = page + (1 << order); + + c->vm_pages = page; + split_page(page, order); + + /* + * Set the "dma handle" + */ + *handle = page_to_bus(page); + + do { + BUG_ON(!pte_none(*pte)); + + SetPageReserved(page); + set_pte_at(&init_mm, vaddr, + pte, mk_pte(page, + pgprot_writecombine + (PAGE_KERNEL))); + page++; + pte++; + vaddr += PAGE_SIZE; + } while (size -= PAGE_SIZE); + + /* + * Free the otherwise unused pages. + */ + while (page < end) { + __free_page(page); + page++; + } + + return (void *)c->vm_start; + } + + if (page) + __free_pages(page, order); +no_page: + return NULL; +} +EXPORT_SYMBOL(dma_alloc_coherent); + +/* + * free a page as defined by the above mapping. + */ +void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + struct metag_vm_region *c; + unsigned long flags, addr; + pte_t *ptep; + + size = PAGE_ALIGN(size); + + spin_lock_irqsave(&consistent_lock, flags); + + c = metag_vm_region_find(&consistent_head, (unsigned long)vaddr); + if (!c) + goto no_area; + + c->vm_active = 0; + if ((c->vm_end - c->vm_start) != size) { + pr_err("%s: freeing wrong coherent size (%ld != %d)\n", + __func__, c->vm_end - c->vm_start, size); + dump_stack(); + size = c->vm_end - c->vm_start; + } + + ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); + addr = c->vm_start; + do { + pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); + unsigned long pfn; + + ptep++; + addr += PAGE_SIZE; + + if (!pte_none(pte) && pte_present(pte)) { + pfn = pte_pfn(pte); + + if (pfn_valid(pfn)) { + struct page *page = pfn_to_page(pfn); + ClearPageReserved(page); + + __free_page(page); + continue; + } + } + + pr_crit("%s: bad page in kernel page table\n", + __func__); + } while (size -= PAGE_SIZE); + + flush_tlb_kernel_range(c->vm_start, c->vm_end); + + list_del(&c->vm_list); + + spin_unlock_irqrestore(&consistent_lock, flags); + + kfree(c); + return; + +no_area: + spin_unlock_irqrestore(&consistent_lock, flags); + pr_err("%s: trying to free invalid coherent area: %p\n", + __func__, vaddr); + dump_stack(); +} +EXPORT_SYMBOL(dma_free_coherent); + + +static int dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + int ret = -ENXIO; + + unsigned long flags, user_size, kern_size; + struct metag_vm_region *c; + + user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + + spin_lock_irqsave(&consistent_lock, flags); + c = metag_vm_region_find(&consistent_head, (unsigned long)cpu_addr); + spin_unlock_irqrestore(&consistent_lock, flags); + + if (c) { + unsigned long off = vma->vm_pgoff; + + kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; + + if (off < kern_size && + user_size <= (kern_size - off)) { + ret = remap_pfn_range(vma, vma->vm_start, + page_to_pfn(c->vm_pages) + off, + user_size << PAGE_SHIFT, + vma->vm_page_prot); + } + } + + + return ret; +} + +int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + return dma_mmap(dev, vma, cpu_addr, dma_addr, size); +} +EXPORT_SYMBOL(dma_mmap_coherent); + +int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + return dma_mmap(dev, vma, cpu_addr, dma_addr, size); +} +EXPORT_SYMBOL(dma_mmap_writecombine); + + + + +/* + * Initialise the consistent memory allocation. + */ +static int __init dma_alloc_init(void) +{ + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + pte_t *pte; + int ret = 0; + + do { + int offset = pgd_index(CONSISTENT_START); + pgd = pgd_offset(&init_mm, CONSISTENT_START); + pud = pud_alloc(&init_mm, pgd, CONSISTENT_START); + pmd = pmd_alloc(&init_mm, pud, CONSISTENT_START); + if (!pmd) { + pr_err("%s: no pmd tables\n", __func__); + ret = -ENOMEM; + break; + } + WARN_ON(!pmd_none(*pmd)); + + pte = pte_alloc_kernel(pmd, CONSISTENT_START); + if (!pte) { + pr_err("%s: no pte tables\n", __func__); + ret = -ENOMEM; + break; + } + + pgd_k = ((pgd_t *) mmu_get_base()) + offset; + pud_k = pud_offset(pgd_k, CONSISTENT_START); + pmd_k = pmd_offset(pud_k, CONSISTENT_START); + set_pmd(pmd_k, *pmd); + + consistent_pte = pte; + } while (0); + + return ret; +} +early_initcall(dma_alloc_init); + +/* + * make an area consistent to devices. + */ +void dma_sync_for_device(void *vaddr, size_t size, int dma_direction) +{ + /* + * Ensure any writes get through the write combiner. This is necessary + * even with DMA_FROM_DEVICE, or the write may dirty the cache after + * we've invalidated it and get written back during the DMA. + */ + + barrier(); + + switch (dma_direction) { + case DMA_BIDIRECTIONAL: + /* + * Writeback to ensure the device can see our latest changes and + * so that we have no dirty lines, and invalidate the cache + * lines too in preparation for receiving the buffer back + * (dma_sync_for_cpu) later. + */ + flush_dcache_region(vaddr, size); + break; + case DMA_TO_DEVICE: + /* + * Writeback to ensure the device can see our latest changes. + * There's no need to invalidate as the device shouldn't write + * to the buffer. + */ + writeback_dcache_region(vaddr, size); + break; + case DMA_FROM_DEVICE: + /* + * Invalidate to ensure we have no dirty lines that could get + * written back during the DMA. It's also safe to flush + * (writeback) here if necessary. + */ + invalidate_dcache_region(vaddr, size); + break; + case DMA_NONE: + BUG(); + } + + wmb(); +} +EXPORT_SYMBOL(dma_sync_for_device); + +/* + * make an area consistent to the core. + */ +void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction) +{ + /* + * Hardware L2 cache prefetch doesn't occur across 4K physical + * boundaries, however according to Documentation/DMA-API-HOWTO.txt + * kmalloc'd memory is DMA'able, so accesses in nearby memory could + * trigger a cache fill in the DMA buffer. + * + * This should never cause dirty lines, so a flush or invalidate should + * be safe to allow us to see data from the device. + */ + if (_meta_l2c_pf_is_enabled()) { + switch (dma_direction) { + case DMA_BIDIRECTIONAL: + case DMA_FROM_DEVICE: + invalidate_dcache_region(vaddr, size); + break; + case DMA_TO_DEVICE: + /* The device shouldn't have written to the buffer */ + break; + case DMA_NONE: + BUG(); + } + } + + rmb(); +} +EXPORT_SYMBOL(dma_sync_for_cpu); -- cgit v1.2.3 From e8de3486a4b06389d4f996eb2dda678a39f20115 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 5 Oct 2012 17:01:38 +0100 Subject: metag: Stack unwinding Add stack unwinding support for metag. Signed-off-by: James Hogan --- arch/metag/include/asm/stacktrace.h | 20 ++++ arch/metag/kernel/stacktrace.c | 187 ++++++++++++++++++++++++++++++++++++ 2 files changed, 207 insertions(+) create mode 100644 arch/metag/include/asm/stacktrace.h create mode 100644 arch/metag/kernel/stacktrace.c (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/stacktrace.h b/arch/metag/include/asm/stacktrace.h new file mode 100644 index 000000000000..2830a0fe7ac9 --- /dev/null +++ b/arch/metag/include/asm/stacktrace.h @@ -0,0 +1,20 @@ +#ifndef __ASM_STACKTRACE_H +#define __ASM_STACKTRACE_H + +struct stackframe { + unsigned long fp; + unsigned long sp; + unsigned long lr; + unsigned long pc; +}; + +struct metag_frame { + unsigned long fp; + unsigned long lr; +}; + +extern int unwind_frame(struct stackframe *frame); +extern void walk_stackframe(struct stackframe *frame, + int (*fn)(struct stackframe *, void *), void *data); + +#endif /* __ASM_STACKTRACE_H */ diff --git a/arch/metag/kernel/stacktrace.c b/arch/metag/kernel/stacktrace.c new file mode 100644 index 000000000000..5510361d5bea --- /dev/null +++ b/arch/metag/kernel/stacktrace.c @@ -0,0 +1,187 @@ +#include +#include +#include + +#include + +#if defined(CONFIG_FRAME_POINTER) + +#ifdef CONFIG_KALLSYMS +#include +#include + +static unsigned long tbi_boing_addr; +static unsigned long tbi_boing_size; + +static void tbi_boing_init(void) +{ + /* We need to know where TBIBoingVec is and it's size */ + unsigned long size; + unsigned long offset; + char modname[MODULE_NAME_LEN]; + char name[KSYM_NAME_LEN]; + tbi_boing_addr = kallsyms_lookup_name("___TBIBoingVec"); + if (!tbi_boing_addr) + tbi_boing_addr = 1; + else if (!lookup_symbol_attrs(tbi_boing_addr, &size, + &offset, modname, name)) + tbi_boing_size = size; +} +#endif + +#define ALIGN_DOWN(addr, size) ((addr)&(~((size)-1))) + +/* + * Unwind the current stack frame and store the new register values in the + * structure passed as argument. Unwinding is equivalent to a function return, + * hence the new PC value rather than LR should be used for backtrace. + */ +int notrace unwind_frame(struct stackframe *frame) +{ + struct metag_frame *fp = (struct metag_frame *)frame->fp; + unsigned long lr; + unsigned long fpnew; + + if (frame->fp & 0x7) + return -EINVAL; + + fpnew = fp->fp; + lr = fp->lr - 4; + +#ifdef CONFIG_KALLSYMS + /* If we've reached TBIBoingVec then we're at an interrupt + * entry point or a syscall entry point. The frame pointer + * points to a pt_regs which can be used to continue tracing on + * the other side of the boing. + */ + if (!tbi_boing_addr) + tbi_boing_init(); + if (tbi_boing_size && lr >= tbi_boing_addr && + lr < tbi_boing_addr + tbi_boing_size) { + struct pt_regs *regs = (struct pt_regs *)fpnew; + if (user_mode(regs)) + return -EINVAL; + fpnew = regs->ctx.AX[1].U0; + lr = regs->ctx.DX[4].U1; + } +#endif + + /* stack grows up, so frame pointers must decrease */ + if (fpnew < (ALIGN_DOWN((unsigned long)fp, THREAD_SIZE) + + sizeof(struct thread_info)) || fpnew >= (unsigned long)fp) + return -EINVAL; + + /* restore the registers from the stack frame */ + frame->fp = fpnew; + frame->pc = lr; + + return 0; +} +#else +int notrace unwind_frame(struct stackframe *frame) +{ + struct metag_frame *sp = (struct metag_frame *)frame->sp; + + if (frame->sp & 0x7) + return -EINVAL; + + while (!kstack_end(sp)) { + unsigned long addr = sp->lr - 4; + sp--; + + if (__kernel_text_address(addr)) { + frame->sp = (unsigned long)sp; + frame->pc = addr; + return 0; + } + } + return -EINVAL; +} +#endif + +void notrace walk_stackframe(struct stackframe *frame, + int (*fn)(struct stackframe *, void *), void *data) +{ + while (1) { + int ret; + + if (fn(frame, data)) + break; + ret = unwind_frame(frame); + if (ret < 0) + break; + } +} +EXPORT_SYMBOL(walk_stackframe); + +#ifdef CONFIG_STACKTRACE +struct stack_trace_data { + struct stack_trace *trace; + unsigned int no_sched_functions; + unsigned int skip; +}; + +static int save_trace(struct stackframe *frame, void *d) +{ + struct stack_trace_data *data = d; + struct stack_trace *trace = data->trace; + unsigned long addr = frame->pc; + + if (data->no_sched_functions && in_sched_functions(addr)) + return 0; + if (data->skip) { + data->skip--; + return 0; + } + + trace->entries[trace->nr_entries++] = addr; + + return trace->nr_entries >= trace->max_entries; +} + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + struct stack_trace_data data; + struct stackframe frame; + + data.trace = trace; + data.skip = trace->skip; + + if (tsk != current) { +#ifdef CONFIG_SMP + /* + * What guarantees do we have here that 'tsk' is not + * running on another CPU? For now, ignore it as we + * can't guarantee we won't explode. + */ + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; + return; +#else + data.no_sched_functions = 1; + frame.fp = thread_saved_fp(tsk); + frame.sp = thread_saved_sp(tsk); + frame.lr = 0; /* recovered from the stack */ + frame.pc = thread_saved_pc(tsk); +#endif + } else { + register unsigned long current_sp asm ("A0StP"); + + data.no_sched_functions = 0; + frame.fp = (unsigned long)__builtin_frame_address(0); + frame.sp = current_sp; + frame.lr = (unsigned long)__builtin_return_address(0); + frame.pc = (unsigned long)save_stack_trace_tsk; + } + + walk_stackframe(&frame, save_trace, &data); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +void save_stack_trace(struct stack_trace *trace) +{ + save_stack_trace_tsk(current, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace); +#endif -- cgit v1.2.3 From 5633004cc2498ff50a5b88d415d3746ff0c301f2 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 9 Oct 2012 10:54:32 +0100 Subject: metag: Build infrastructure Add metag build infrastructure. Signed-off-by: James Hogan --- arch/metag/Kconfig | 277 +++++++++++++++++++++++++++++++++ arch/metag/Kconfig.debug | 40 +++++ arch/metag/Kconfig.soc | 55 +++++++ arch/metag/Makefile | 87 +++++++++++ arch/metag/boot/Makefile | 68 ++++++++ arch/metag/configs/meta1_defconfig | 38 +++++ arch/metag/configs/meta2_defconfig | 39 +++++ arch/metag/configs/meta2_smp_defconfig | 40 +++++ arch/metag/include/asm/Kbuild | 55 +++++++ arch/metag/include/uapi/asm/Kbuild | 13 ++ arch/metag/kernel/.gitignore | 1 + arch/metag/kernel/Makefile | 36 +++++ arch/metag/kernel/asm-offsets.c | 14 ++ arch/metag/kernel/metag_ksyms.c | 75 +++++++++ arch/metag/kernel/vmlinux.lds.S | 71 +++++++++ arch/metag/lib/Makefile | 22 +++ arch/metag/mm/Kconfig | 153 ++++++++++++++++++ arch/metag/mm/Makefile | 19 +++ arch/metag/tbx/Makefile | 21 +++ 19 files changed, 1124 insertions(+) create mode 100644 arch/metag/Kconfig create mode 100644 arch/metag/Kconfig.debug create mode 100644 arch/metag/Kconfig.soc create mode 100644 arch/metag/Makefile create mode 100644 arch/metag/boot/Makefile create mode 100644 arch/metag/configs/meta1_defconfig create mode 100644 arch/metag/configs/meta2_defconfig create mode 100644 arch/metag/configs/meta2_smp_defconfig create mode 100644 arch/metag/include/asm/Kbuild create mode 100644 arch/metag/include/uapi/asm/Kbuild create mode 100644 arch/metag/kernel/.gitignore create mode 100644 arch/metag/kernel/Makefile create mode 100644 arch/metag/kernel/asm-offsets.c create mode 100644 arch/metag/kernel/metag_ksyms.c create mode 100644 arch/metag/kernel/vmlinux.lds.S create mode 100644 arch/metag/lib/Makefile create mode 100644 arch/metag/mm/Kconfig create mode 100644 arch/metag/mm/Makefile create mode 100644 arch/metag/tbx/Makefile (limited to 'arch/metag/kernel') diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig new file mode 100644 index 000000000000..f786e6e09700 --- /dev/null +++ b/arch/metag/Kconfig @@ -0,0 +1,277 @@ +config SYMBOL_PREFIX + string + default "_" + +config METAG + def_bool y + select EMBEDDED + select GENERIC_ATOMIC64 + select GENERIC_CLOCKEVENTS + select GENERIC_IRQ_SHOW + select GENERIC_SIGALTSTACK + select GENERIC_SMP_IDLE_THREAD + select HAVE_64BIT_ALIGNED_ACCESS + select HAVE_ARCH_TRACEHOOK + select HAVE_DEBUG_KMEMLEAK + select HAVE_GENERIC_HARDIRQS + select HAVE_IRQ_WORK + select HAVE_KERNEL_BZIP2 + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZO + select HAVE_KERNEL_XZ + select HAVE_MEMBLOCK + select HAVE_MEMBLOCK_NODE_MAP + select HAVE_MOD_ARCH_SPECIFIC + select HAVE_SYSCALL_TRACEPOINTS + select IRQ_DOMAIN + select MODULES_USE_ELF_RELA + select OF + select OF_EARLY_FLATTREE + select SPARSE_IRQ + +config ARCH_NO_VIRT_TO_BUS + def_bool y + +config STACKTRACE_SUPPORT + def_bool y + +config LOCKDEP_SUPPORT + def_bool y + +config HAVE_LATENCYTOP_SUPPORT + def_bool y + +config RWSEM_GENERIC_SPINLOCK + def_bool y + +config RWSEM_XCHGADD_ALGORITHM + bool + +config GENERIC_HWEIGHT + def_bool y + +config GENERIC_CALIBRATE_DELAY + def_bool y + +config GENERIC_GPIO + def_bool n + +config NO_IOPORT + def_bool y + +source "init/Kconfig" + +source "kernel/Kconfig.freezer" + +menu "Processor type and features" + +config MMU + def_bool y + +config STACK_GROWSUP + def_bool y + +config HOTPLUG_CPU + bool "Enable CPU hotplug support" + depends on SMP + help + Say Y here to allow turning CPUs off and on. CPUs can be + controlled through /sys/devices/system/cpu. + + Say N if you want to disable CPU hotplug. + +config HIGHMEM + bool "High Memory Support" + help + The address space of Meta processors is only 4 Gigabytes large + and it has to accommodate user address space, kernel address + space as well as some memory mapped IO. That means that, if you + have a large amount of physical memory and/or IO, not all of the + memory can be "permanently mapped" by the kernel. The physical + memory that is not permanently mapped is called "high memory". + + Depending on the selected kernel/user memory split, minimum + vmalloc space and actual amount of RAM, you may not need this + option which should result in a slightly faster kernel. + + If unsure, say n. + +source "arch/metag/mm/Kconfig" + +source "arch/metag/Kconfig.soc" + +config METAG_META12 + bool + help + Select this from the SoC config symbol to indicate that it contains a + Meta 1.2 core. + +config METAG_META21 + bool + help + Select this from the SoC config symbol to indicate that it contains a + Meta 2.1 core. + +config SMP + bool "Symmetric multi-processing support" + depends on METAG_META21 && METAG_META21_MMU + select USE_GENERIC_SMP_HELPERS + help + This enables support for systems with more than one thread running + Linux. If you have a system with only one thread running Linux, + say N. Otherwise, say Y. + +config NR_CPUS + int "Maximum number of CPUs (2-4)" if SMP + range 2 4 if SMP + default "1" if !SMP + default "4" if SMP + +config METAG_SMP_WRITE_REORDERING + bool + help + This attempts to prevent cache-memory incoherence due to external + reordering of writes from different hardware threads when SMP is + enabled. It adds fences (system event 0) to smp_mb and smp_rmb in an + attempt to catch some of the cases, and also before writes to shared + memory in LOCK1 protected atomics and spinlocks. + This will not completely prevent cache incoherency on affected cores. + +config METAG_LNKGET_AROUND_CACHE + bool + depends on METAG_META21 + help + This indicates that the LNKGET/LNKSET instructions go around the + cache, which requires some extra cache flushes when the memory needs + to be accessed by normal GET/SET instructions too. + +choice + prompt "Atomicity primitive" + default METAG_ATOMICITY_LNKGET + help + This option selects the mechanism for performing atomic operations. + +config METAG_ATOMICITY_IRQSOFF + depends on !SMP + bool "irqsoff" + help + This option disables interrupts to achieve atomicity. This mechanism + is not SMP-safe. + +config METAG_ATOMICITY_LNKGET + depends on METAG_META21 + bool "lnkget/lnkset" + help + This option uses the LNKGET and LNKSET instructions to achieve + atomicity. LNKGET/LNKSET are load-link/store-conditional instructions. + Choose this option if your system requires low latency. + +config METAG_ATOMICITY_LOCK1 + depends on SMP + bool "lock1" + help + This option uses the LOCK1 instruction for atomicity. This is mainly + provided as a debugging aid if the lnkget/lnkset atomicity primitive + isn't working properly. + +endchoice + +config METAG_FPU + bool "FPU Support" + depends on METAG_META21 + default y + help + This option allows processes to use FPU hardware available with this + CPU. If this option is not enabled FPU registers will not be saved + and restored on context-switch. + + If you plan on running programs which are compiled to use hard floats + say Y here. + +config METAG_DSP + bool "DSP Support" + help + This option allows processes to use DSP hardware available + with this CPU. If this option is not enabled DSP registers + will not be saved and restored on context-switch. + + If you plan on running DSP programs say Y here. + +config METAG_PERFCOUNTER_IRQS + bool "PerfCounters interrupt support" + depends on METAG_META21 + help + This option enables using interrupts to collect information from + Performance Counters. This option is supported in new META21 + (starting from HTP265). + + When disabled, Performance Counters information will be collected + based on Timer Interrupt. + +menu "Boot options" + +config METAG_BUILTIN_DTB + bool "Embed DTB in kernel image" + default y + help + Embeds a device tree binary in the kernel image. + +config METAG_BUILTIN_DTB_NAME + string "Built in DTB" + depends on METAG_BUILTIN_DTB + help + Set the name of the DTB to embed (leave blank to pick one + automatically based on kernel configuration). + +config CMDLINE_BOOL + bool "Default bootloader kernel arguments" + +config CMDLINE + string "Kernel command line" + depends on CMDLINE_BOOL + help + On some architectures there is currently no way for the boot loader + to pass arguments to the kernel. For these architectures, you should + supply some command-line options at build time by entering them + here. + +config CMDLINE_FORCE + bool "Force default kernel command string" + depends on CMDLINE_BOOL + help + Set this to have arguments from the default kernel command string + override those passed by the boot loader. + +endmenu + +source "kernel/Kconfig.preempt" + +source kernel/Kconfig.hz + +endmenu + +menu "Power management options" + +source kernel/power/Kconfig + +endmenu + +menu "Executable file formats" + +source "fs/Kconfig.binfmt" + +endmenu + +source "net/Kconfig" + +source "drivers/Kconfig" + +source "fs/Kconfig" + +source "arch/metag/Kconfig.debug" + +source "security/Kconfig" + +source "crypto/Kconfig" + +source "lib/Kconfig" diff --git a/arch/metag/Kconfig.debug b/arch/metag/Kconfig.debug new file mode 100644 index 000000000000..e45bbf6a7a5d --- /dev/null +++ b/arch/metag/Kconfig.debug @@ -0,0 +1,40 @@ +menu "Kernel hacking" + +config TRACE_IRQFLAGS_SUPPORT + bool + default y + +source "lib/Kconfig.debug" + +config DEBUG_STACKOVERFLOW + bool "Check for stack overflows" + depends on DEBUG_KERNEL + help + This option will cause messages to be printed if free stack space + drops below a certain limit. + +config 4KSTACKS + bool "Use 4Kb for kernel stacks instead of 8Kb" + depends on DEBUG_KERNEL + help + If you say Y here the kernel will use a 4Kb stacksize for the + kernel stack attached to each process/thread. This facilitates + running more threads on a system and also reduces the pressure + on the VM subsystem for higher order allocations. This option + will also use IRQ stacks to compensate for the reduced stackspace. + +config METAG_FUNCTION_TRACE + bool "Output Meta real-time trace data for function entry/exit" + help + If you say Y here the kernel will use the Meta hardware trace + unit to output information about function entry and exit that + can be used by a debugger for profiling and call-graphs. + +config METAG_POISON_CATCH_BUFFERS + bool "Poison catch buffer contents on kernel entry" + help + If you say Y here the kernel will write poison data to the + catch buffer registers on kernel entry. This will make any + problem with catch buffer handling much more apparent. + +endmenu diff --git a/arch/metag/Kconfig.soc b/arch/metag/Kconfig.soc new file mode 100644 index 000000000000..ec079cfb7c6a --- /dev/null +++ b/arch/metag/Kconfig.soc @@ -0,0 +1,55 @@ +choice + prompt "SoC Type" + default META21_FPGA + +config META12_FPGA + bool "Meta 1.2 FPGA" + select METAG_META12 + help + This is a Meta 1.2 FPGA bitstream, just a bare CPU. + +config META21_FPGA + bool "Meta 2.1 FPGA" + select METAG_META21 + help + This is a Meta 2.1 FPGA bitstream, just a bare CPU. + +endchoice + +menu "SoC configuration" + +if METAG_META21 + +# Meta 2.x specific options + +config METAG_META21_MMU + bool "Meta 2.x MMU mode" + default y + help + Use the Meta 2.x MMU in extended mode. + +config METAG_UNALIGNED + bool "Meta 2.x unaligned access checking" + default y + help + All memory accesses will be checked for alignment and an exception + raised on unaligned accesses. This feature does cost performance + but without it there will be no notification of this type of error. + +config METAG_USER_TCM + bool "Meta on-chip memory support for userland" + select GENERIC_ALLOCATOR + default y + help + Allow the on-chip memories of Meta SoCs to be used by user + applications. + +endif + +config METAG_HALT_ON_PANIC + bool "Halt the core on panic" + help + Halt the core when a panic occurs. This is useful when running + pre-production silicon or in an FPGA environment. + +endmenu diff --git a/arch/metag/Makefile b/arch/metag/Makefile new file mode 100644 index 000000000000..81bd6a1c7483 --- /dev/null +++ b/arch/metag/Makefile @@ -0,0 +1,87 @@ +# +# metag/Makefile +# +# This file is included by the global makefile so that you can add your own +# architecture-specific flags and dependencies. Remember to do have actions +# for "archclean" cleaning up for this architecture. +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1994 by Linus Torvalds +# 2007,2008,2012 by Imagination Technologies Ltd. +# + +LDFLAGS := +OBJCOPYFLAGS := -O binary -R .note -R .comment -S + +checkflags-$(CONFIG_METAG_META12) += -DMETAC_1_2 +checkflags-$(CONFIG_METAG_META21) += -DMETAC_2_1 +CHECKFLAGS += -D__metag__ $(checkflags-y) + +KBUILD_DEFCONFIG := meta2_defconfig + +sflags-$(CONFIG_METAG_META12) += -mmetac=1.2 +ifeq ($(CONFIG_METAG_META12),y) +# Only use TBI API 1.4 if DSP is enabled for META12 cores +sflags-$(CONFIG_METAG_DSP) += -DTBI_1_4 +endif +sflags-$(CONFIG_METAG_META21) += -mmetac=2.1 -DTBI_1_4 + +cflags-$(CONFIG_METAG_FUNCTION_TRACE) += -mhwtrace-leaf -mhwtrace-retpc +cflags-$(CONFIG_METAG_META21) += -mextensions=bex + +KBUILD_CFLAGS += -pipe +KBUILD_CFLAGS += -ffunction-sections + +KBUILD_CFLAGS += $(sflags-y) $(cflags-y) +KBUILD_AFLAGS += $(sflags-y) + +LDFLAGS_vmlinux := $(ldflags-y) + +head-y := arch/metag/kernel/head.o + +core-y += arch/metag/boot/dts/ +core-y += arch/metag/kernel/ +core-y += arch/metag/mm/ + +libs-y += arch/metag/lib/ +libs-y += arch/metag/tbx/ + +boot := arch/metag/boot + +boot_targets += uImage +boot_targets += uImage.gz +boot_targets += uImage.bz2 +boot_targets += uImage.xz +boot_targets += uImage.lzo +boot_targets += uImage.bin +boot_targets += vmlinux.bin + +PHONY += $(boot_targets) + +all: vmlinux.bin + +$(boot_targets): vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +%.dtb %.dtb.S %.dtb.o: scripts + $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@ + +dtbs: scripts + $(Q)$(MAKE) $(build)=$(boot)/dts dtbs + +archclean: + $(Q)$(MAKE) $(clean)=$(boot) + +define archhelp + echo '* vmlinux.bin - Binary kernel image (arch/$(ARCH)/boot/vmlinux.bin)' + @echo ' uImage - Alias to bootable U-Boot image' + @echo ' uImage.bin - Kernel-only image for U-Boot (bin)' + @echo ' uImage.gz - Kernel-only image for U-Boot (gzip)' + @echo ' uImage.bz2 - Kernel-only image for U-Boot (bzip2)' + @echo ' uImage.xz - Kernel-only image for U-Boot (xz)' + @echo ' uImage.lzo - Kernel-only image for U-Boot (lzo)' + @echo ' dtbs - Build device tree blobs for enabled boards' +endef diff --git a/arch/metag/boot/Makefile b/arch/metag/boot/Makefile new file mode 100644 index 000000000000..5a1f88cf91e3 --- /dev/null +++ b/arch/metag/boot/Makefile @@ -0,0 +1,68 @@ +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2007,2012 Imagination Technologies Ltd. +# + +suffix-y := bin +suffix-$(CONFIG_KERNEL_GZIP) := gz +suffix-$(CONFIG_KERNEL_BZIP2) := bz2 +suffix-$(CONFIG_KERNEL_XZ) := xz +suffix-$(CONFIG_KERNEL_LZO) := lzo + +targets += vmlinux.bin +targets += uImage +targets += uImage.gz +targets += uImage.bz2 +targets += uImage.xz +targets += uImage.lzo +targets += uImage.bin + +extra-y += vmlinux.bin +extra-y += vmlinux.bin.gz +extra-y += vmlinux.bin.bz2 +extra-y += vmlinux.bin.xz +extra-y += vmlinux.bin.lzo + +UIMAGE_LOADADDR = $(CONFIG_PAGE_OFFSET) + +ifeq ($(CONFIG_FUNCTION_TRACER),y) +orig_cflags := $(KBUILD_CFLAGS) +KBUILD_CFLAGS = $(subst -pg, , $(orig_cflags)) +endif + +$(obj)/vmlinux.bin: vmlinux FORCE + $(call if_changed,objcopy) + +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE + $(call if_changed,gzip) + +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE + $(call if_changed,bzip2) + +$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE + $(call if_changed,xzkern) + +$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzo) + +$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE + $(call if_changed,uimage,gzip) + +$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE + $(call if_changed,uimage,bzip2) + +$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE + $(call if_changed,uimage,xz) + +$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE + $(call if_changed,uimage,lzo) + +$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE + $(call if_changed,uimage,none) + +$(obj)/uImage: $(obj)/uImage.$(suffix-y) + @ln -sf $(notdir $<) $@ + @echo ' Image $@ is ready' diff --git a/arch/metag/configs/meta1_defconfig b/arch/metag/configs/meta1_defconfig new file mode 100644 index 000000000000..837c235ab67a --- /dev/null +++ b/arch/metag/configs/meta1_defconfig @@ -0,0 +1,38 @@ +CONFIG_EXPERIMENTAL=y +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_SWAP is not set +CONFIG_LOG_BUF_SHIFT=13 +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_ELF_CORE is not set +CONFIG_SLAB=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +# CONFIG_MSDOS_PARTITION is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_FLATMEM_MANUAL=y +CONFIG_META12_FPGA=y +CONFIG_HZ_100=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FW_LOADER is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=1 +CONFIG_BLK_DEV_RAM_SIZE=16384 +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVKMEM is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_DNOTIFY is not set +CONFIG_TMPFS=y +# CONFIG_MISC_FILESYSTEMS is not set +# CONFIG_SCHED_DEBUG is not set +CONFIG_DEBUG_INFO=y diff --git a/arch/metag/configs/meta2_defconfig b/arch/metag/configs/meta2_defconfig new file mode 100644 index 000000000000..e5bada83dd76 --- /dev/null +++ b/arch/metag/configs/meta2_defconfig @@ -0,0 +1,39 @@ +CONFIG_EXPERIMENTAL=y +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_SWAP is not set +CONFIG_SYSVIPC=y +CONFIG_LOG_BUF_SHIFT=13 +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_ELF_CORE is not set +CONFIG_SLAB=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +# CONFIG_MSDOS_PARTITION is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_METAG_L2C=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_METAG_HALT_ON_PANIC=y +CONFIG_HZ_100=y +CONFIG_DEVTMPFS=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FW_LOADER is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=1 +CONFIG_BLK_DEV_RAM_SIZE=16384 +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVKMEM is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_DNOTIFY is not set +CONFIG_TMPFS=y +# CONFIG_MISC_FILESYSTEMS is not set +# CONFIG_SCHED_DEBUG is not set +CONFIG_DEBUG_INFO=y diff --git a/arch/metag/configs/meta2_smp_defconfig b/arch/metag/configs/meta2_smp_defconfig new file mode 100644 index 000000000000..41983a20db72 --- /dev/null +++ b/arch/metag/configs/meta2_smp_defconfig @@ -0,0 +1,40 @@ +CONFIG_EXPERIMENTAL=y +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_SWAP is not set +CONFIG_SYSVIPC=y +CONFIG_LOG_BUF_SHIFT=13 +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_ELF_CORE is not set +CONFIG_SLAB=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +# CONFIG_MSDOS_PARTITION is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_METAG_L2C=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_METAG_HALT_ON_PANIC=y +CONFIG_SMP=y +CONFIG_HZ_100=y +CONFIG_DEVTMPFS=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FW_LOADER is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=1 +CONFIG_BLK_DEV_RAM_SIZE=16384 +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVKMEM is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_DNOTIFY is not set +CONFIG_TMPFS=y +# CONFIG_MISC_FILESYSTEMS is not set +# CONFIG_SCHED_DEBUG is not set +CONFIG_DEBUG_INFO=y diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild new file mode 100644 index 000000000000..3a139810559e --- /dev/null +++ b/arch/metag/include/asm/Kbuild @@ -0,0 +1,55 @@ +generic-y += auxvec.h +generic-y += bitsperlong.h +generic-y += bugs.h +generic-y += clkdev.h +generic-y += cputime.h +generic-y += current.h +generic-y += device.h +generic-y += dma.h +generic-y += emergency-restart.h +generic-y += errno.h +generic-y += exec.h +generic-y += fb.h +generic-y += fcntl.h +generic-y += ftrace.h +generic-y += futex.h +generic-y += hardirq.h +generic-y += hw_irq.h +generic-y += ioctl.h +generic-y += ioctls.h +generic-y += ipcbuf.h +generic-y += irq_regs.h +generic-y += kdebug.h +generic-y += kmap_types.h +generic-y += kvm_para.h +generic-y += local.h +generic-y += local64.h +generic-y += msgbuf.h +generic-y += mutex.h +generic-y += param.h +generic-y += pci.h +generic-y += percpu.h +generic-y += poll.h +generic-y += posix_types.h +generic-y += scatterlist.h +generic-y += sections.h +generic-y += sembuf.h +generic-y += serial.h +generic-y += shmbuf.h +generic-y += shmparam.h +generic-y += signal.h +generic-y += socket.h +generic-y += sockios.h +generic-y += stat.h +generic-y += statfs.h +generic-y += switch_to.h +generic-y += termbits.h +generic-y += termios.h +generic-y += timex.h +generic-y += trace_clock.h +generic-y += types.h +generic-y += ucontext.h +generic-y += unaligned.h +generic-y += user.h +generic-y += vga.h +generic-y += xor.h diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild new file mode 100644 index 000000000000..876c71f866de --- /dev/null +++ b/arch/metag/include/uapi/asm/Kbuild @@ -0,0 +1,13 @@ +# UAPI Header export list +include include/uapi/asm-generic/Kbuild.asm + +header-y += byteorder.h +header-y += ptrace.h +header-y += resource.h +header-y += sigcontext.h +header-y += siginfo.h +header-y += swab.h +header-y += unistd.h + +generic-y += mman.h +generic-y += setup.h diff --git a/arch/metag/kernel/.gitignore b/arch/metag/kernel/.gitignore new file mode 100644 index 000000000000..c5f676c3c224 --- /dev/null +++ b/arch/metag/kernel/.gitignore @@ -0,0 +1 @@ +vmlinux.lds diff --git a/arch/metag/kernel/Makefile b/arch/metag/kernel/Makefile new file mode 100644 index 000000000000..e985d0ca618c --- /dev/null +++ b/arch/metag/kernel/Makefile @@ -0,0 +1,36 @@ +# +# Makefile for the Linux/Meta kernel. +# + +extra-y += head.o +extra-y += vmlinux.lds + +obj-y += cachepart.o +obj-y += clock.o +obj-y += core_reg.o +obj-y += devtree.o +obj-y += dma.o +obj-y += irq.o +obj-y += kick.o +obj-y += machines.o +obj-y += process.o +obj-y += ptrace.o +obj-y += setup.o +obj-y += signal.o +obj-y += stacktrace.o +obj-y += sys_metag.o +obj-y += tbiunexp.o +obj-y += time.o +obj-y += topology.o +obj-y += traps.o +obj-y += user_gateway.o + +obj-$(CONFIG_METAG_COREMEM) += coremem.o +obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace_stub.o +obj-$(CONFIG_MODULES) += metag_ksyms.o +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_METAG_SUSPEND_MEM) += suspend.o +obj-$(CONFIG_METAG_USER_TCM) += tcm.o diff --git a/arch/metag/kernel/asm-offsets.c b/arch/metag/kernel/asm-offsets.c new file mode 100644 index 000000000000..bfc9205f9647 --- /dev/null +++ b/arch/metag/kernel/asm-offsets.c @@ -0,0 +1,14 @@ +/* + * This program is used to generate definitions needed by + * assembly language modules. + * + */ + +#include +#include + +int main(void) +{ + DEFINE(THREAD_INFO_SIZE, sizeof(struct thread_info)); + return 0; +} diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c new file mode 100644 index 000000000000..c73ebd3db417 --- /dev/null +++ b/arch/metag/kernel/metag_ksyms.c @@ -0,0 +1,75 @@ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* uaccess symbols */ +EXPORT_SYMBOL(__copy_user_zeroing); +EXPORT_SYMBOL(__copy_user); +EXPORT_SYMBOL(__get_user_asm_b); +EXPORT_SYMBOL(__get_user_asm_w); +EXPORT_SYMBOL(__get_user_asm_d); +EXPORT_SYMBOL(__put_user_asm_b); +EXPORT_SYMBOL(__put_user_asm_w); +EXPORT_SYMBOL(__put_user_asm_d); +EXPORT_SYMBOL(__put_user_asm_l); +EXPORT_SYMBOL(__strncpy_from_user); +EXPORT_SYMBOL(strnlen_user); +EXPORT_SYMBOL(__do_clear_user); + +EXPORT_SYMBOL(pTBI_get); +EXPORT_SYMBOL(meta_memoffset); +EXPORT_SYMBOL(kick_register_func); +EXPORT_SYMBOL(kick_unregister_func); +#ifdef CONFIG_SMP +EXPORT_SYMBOL(get_trigger_mask); +#else +EXPORT_SYMBOL(global_trigger_mask); +#endif + +EXPORT_SYMBOL(empty_zero_page); + +EXPORT_SYMBOL(pfn_base); +#ifdef CONFIG_FLATMEM +/* needed for the pfn_valid macro */ +EXPORT_SYMBOL(max_pfn); +EXPORT_SYMBOL(min_low_pfn); +#endif + +/* TBI symbols */ +EXPORT_SYMBOL(__TBI); +EXPORT_SYMBOL(__TBIFindSeg); +EXPORT_SYMBOL(__TBIPoll); +EXPORT_SYMBOL(__TBITimeStamp); + +#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name) + +/* libgcc functions */ +DECLARE_EXPORT(__ashldi3); +DECLARE_EXPORT(__ashrdi3); +DECLARE_EXPORT(__lshrdi3); +DECLARE_EXPORT(__udivsi3); +DECLARE_EXPORT(__divsi3); +DECLARE_EXPORT(__umodsi3); +DECLARE_EXPORT(__modsi3); +DECLARE_EXPORT(__muldi3); +DECLARE_EXPORT(__cmpdi2); +DECLARE_EXPORT(__ucmpdi2); + +/* Maths functions */ +EXPORT_SYMBOL(div_u64); +EXPORT_SYMBOL(div_s64); + +/* String functions */ +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memmove); diff --git a/arch/metag/kernel/vmlinux.lds.S b/arch/metag/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..e12055e88bfe --- /dev/null +++ b/arch/metag/kernel/vmlinux.lds.S @@ -0,0 +1,71 @@ +/* ld script to make Meta Linux kernel */ + +#include +#include +#include + +#include + +OUTPUT_FORMAT("elf32-metag", "elf32-metag", "elf32-metag") +OUTPUT_ARCH(metag) +ENTRY(__start) + +_jiffies = _jiffies_64; +SECTIONS +{ + . = CONFIG_PAGE_OFFSET; + _text = .; + __text = .; + __stext = .; + HEAD_TEXT_SECTION + .text : { + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + KPROBES_TEXT + IRQENTRY_TEXT + *(.text.*) + *(.gnu.warning) + } + + __etext = .; /* End of text section */ + + __sdata = .; + RO_DATA_SECTION(PAGE_SIZE) + RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + __edata = .; /* End of data section */ + + EXCEPTION_TABLE(16) + NOTES + + . = ALIGN(PAGE_SIZE); /* Init code and data */ + ___init_begin = .; + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) + + .init.arch.info : { + ___arch_info_begin = .; + *(.arch.info.init) + ___arch_info_end = .; + } + + PERCPU_SECTION(L1_CACHE_BYTES) + + ___init_end = .; + + BSS_SECTION(0, PAGE_SIZE, 0) + + __end = .; + + . = ALIGN(PAGE_SIZE); + __heap_start = .; + + DWARF_DEBUG + + /* When something in the kernel is NOT compiled as a module, the + * module cleanup code and data are put into these segments. Both + * can then be thrown away, as cleanup code is never called unless + * it's a module. + */ + DISCARDS +} diff --git a/arch/metag/lib/Makefile b/arch/metag/lib/Makefile new file mode 100644 index 000000000000..a41d24e270e6 --- /dev/null +++ b/arch/metag/lib/Makefile @@ -0,0 +1,22 @@ +# +# Makefile for Meta-specific library files. +# + +lib-y += usercopy.o +lib-y += copy_page.o +lib-y += clear_page.o +lib-y += memcpy.o +lib-y += memmove.o +lib-y += memset.o +lib-y += delay.o +lib-y += div64.o +lib-y += muldi3.o +lib-y += ashrdi3.o +lib-y += ashldi3.o +lib-y += lshrdi3.o +lib-y += divsi3.o +lib-y += modsi3.o +lib-y += cmpdi2.o +lib-y += ucmpdi2.o +lib-y += ip_fast_csum.o +lib-y += checksum.o diff --git a/arch/metag/mm/Kconfig b/arch/metag/mm/Kconfig new file mode 100644 index 000000000000..cd7f2f2ad416 --- /dev/null +++ b/arch/metag/mm/Kconfig @@ -0,0 +1,153 @@ +menu "Memory management options" + +config PAGE_OFFSET + hex "Kernel page offset address" + default "0x40000000" + help + This option allows you to set the virtual address at which the + kernel will be mapped to. +endmenu + +config KERNEL_4M_PAGES + bool "Map kernel with 4MB pages" + depends on METAG_META21_MMU + default y + help + Map the kernel with large pages to reduce TLB pressure. + +choice + prompt "User page size" + default PAGE_SIZE_4K + +config PAGE_SIZE_4K + bool "4kB" + help + This is the default page size used by all Meta cores. + +config PAGE_SIZE_8K + bool "8kB" + depends on METAG_META21_MMU + help + This enables 8kB pages as supported by Meta 2.x and later MMUs. + +config PAGE_SIZE_16K + bool "16kB" + depends on METAG_META21_MMU + help + This enables 16kB pages as supported by Meta 2.x and later MMUs. + +endchoice + +config NUMA + bool "Non Uniform Memory Access (NUMA) Support" + help + Some Meta systems have MMU-mappable on-chip memories with + lower latencies than main memory. This enables support for + these blocks by binding them to nodes and allowing + memory policies to be used for prioritizing and controlling + allocation behaviour. + +config FORCE_MAX_ZONEORDER + int "Maximum zone order" + range 10 32 + default "10" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 11 means that the largest free memory block is 2^10 pages. + + The page size is not necessarily 4KB. Keep this in mind + when choosing a value for this option. + +config METAG_L2C + bool "Level 2 Cache Support" + depends on METAG_META21 + help + Press y here to enable support for the Meta Level 2 (L2) cache. This + will enable the cache at start up if it hasn't already been enabled + by the bootloader. + + If the bootloader enables the L2 you must press y here to ensure the + kernel takes the appropriate actions to keep the cache coherent. + +config NODES_SHIFT + int + default "1" + depends on NEED_MULTIPLE_NODES + +config ARCH_FLATMEM_ENABLE + def_bool y + depends on !NUMA + +config ARCH_SPARSEMEM_ENABLE + def_bool y + select SPARSEMEM_STATIC + +config ARCH_SPARSEMEM_DEFAULT + def_bool y + +config MAX_ACTIVE_REGIONS + int + default "2" if SPARSEMEM + default "1" + +config ARCH_POPULATES_NODE_MAP + def_bool y + +config ARCH_SELECT_MEMORY_MODEL + def_bool y + +config SYS_SUPPORTS_HUGETLBFS + def_bool y + depends on METAG_META21_MMU + +choice + prompt "HugeTLB page size" + depends on METAG_META21_MMU && HUGETLB_PAGE + default HUGETLB_PAGE_SIZE_1M + +config HUGETLB_PAGE_SIZE_8K + bool "8kB" + depends on PAGE_SIZE_4K + +config HUGETLB_PAGE_SIZE_16K + bool "16kB" + depends on PAGE_SIZE_4K || PAGE_SIZE_8K + +config HUGETLB_PAGE_SIZE_32K + bool "32kB" + +config HUGETLB_PAGE_SIZE_64K + bool "64kB" + +config HUGETLB_PAGE_SIZE_128K + bool "128kB" + +config HUGETLB_PAGE_SIZE_256K + bool "256kB" + +config HUGETLB_PAGE_SIZE_512K + bool "512kB" + +config HUGETLB_PAGE_SIZE_1M + bool "1MB" + +config HUGETLB_PAGE_SIZE_2M + bool "2MB" + +config HUGETLB_PAGE_SIZE_4M + bool "4MB" + +endchoice + +config METAG_COREMEM + bool + default y if SUSPEND + +source "mm/Kconfig" diff --git a/arch/metag/mm/Makefile b/arch/metag/mm/Makefile new file mode 100644 index 000000000000..994331164125 --- /dev/null +++ b/arch/metag/mm/Makefile @@ -0,0 +1,19 @@ +# +# Makefile for the linux Meta-specific parts of the memory manager. +# + +obj-y += cache.o +obj-y += extable.o +obj-y += fault.o +obj-y += init.o +obj-y += ioremap.o +obj-y += maccess.o + +mmu-y := mmu-meta1.o +mmu-$(CONFIG_METAG_META21_MMU) := mmu-meta2.o +obj-y += $(mmu-y) + +obj-$(CONFIG_HIGHMEM) += highmem.o +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o +obj-$(CONFIG_METAG_L2C) += l2cache.o +obj-$(CONFIG_NUMA) += numa.o diff --git a/arch/metag/tbx/Makefile b/arch/metag/tbx/Makefile new file mode 100644 index 000000000000..e994239e518c --- /dev/null +++ b/arch/metag/tbx/Makefile @@ -0,0 +1,21 @@ +# +# Makefile for TBX library files.. +# + +asflags-y += -mmetac=2.1 -Wa,-mfpu=metac21 -mdsp +asflags-$(CONFIG_SMP) += -DTBX_PERCPU_SP_SAVE + +ccflags-y += -mmetac=2.1 + +lib-y += tbicore.o +lib-y += tbictx.o +lib-y += tbidefr.o +lib-y += tbilogf.o +lib-y += tbipcx.o +lib-y += tbiroot.o +lib-y += tbisoft.o +lib-y += tbistring.o +lib-y += tbitimer.o + +lib-$(CONFIG_METAG_DSP) += tbidspram.o +lib-$(CONFIG_METAG_FPU) += tbictxfpu.o -- cgit v1.2.3 From 903b20ad6810e05bc5f7cc038257e80463e71001 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 5 Oct 2012 16:54:55 +0100 Subject: metag: Perf Add Perf support for metag. Signed-off-by: James Hogan Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo --- arch/metag/Kconfig | 1 + arch/metag/include/asm/perf_event.h | 4 + arch/metag/kernel/Makefile | 2 + arch/metag/kernel/perf/Makefile | 3 + arch/metag/kernel/perf/perf_event.c | 861 ++++++++++++++++++++++++++++++++++++ arch/metag/kernel/perf/perf_event.h | 106 +++++ arch/metag/kernel/perf_callchain.c | 96 ++++ 7 files changed, 1073 insertions(+) create mode 100644 arch/metag/include/asm/perf_event.h create mode 100644 arch/metag/kernel/perf/Makefile create mode 100644 arch/metag/kernel/perf/perf_event.c create mode 100644 arch/metag/kernel/perf/perf_event.h create mode 100644 arch/metag/kernel/perf_callchain.c (limited to 'arch/metag/kernel') diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index f786e6e09700..47972025818f 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig @@ -22,6 +22,7 @@ config METAG select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select HAVE_MOD_ARCH_SPECIFIC + select HAVE_PERF_EVENTS select HAVE_SYSCALL_TRACEPOINTS select IRQ_DOMAIN select MODULES_USE_ELF_RELA diff --git a/arch/metag/include/asm/perf_event.h b/arch/metag/include/asm/perf_event.h new file mode 100644 index 000000000000..105bbff0149f --- /dev/null +++ b/arch/metag/include/asm/perf_event.h @@ -0,0 +1,4 @@ +#ifndef __ASM_METAG_PERF_EVENT_H +#define __ASM_METAG_PERF_EVENT_H + +#endif /* __ASM_METAG_PERF_EVENT_H */ diff --git a/arch/metag/kernel/Makefile b/arch/metag/kernel/Makefile index e985d0ca618c..a5e4ba6fd20a 100644 --- a/arch/metag/kernel/Makefile +++ b/arch/metag/kernel/Makefile @@ -25,6 +25,8 @@ obj-y += topology.o obj-y += traps.o obj-y += user_gateway.o +obj-$(CONFIG_PERF_EVENTS) += perf/ + obj-$(CONFIG_METAG_COREMEM) += coremem.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace_stub.o diff --git a/arch/metag/kernel/perf/Makefile b/arch/metag/kernel/perf/Makefile new file mode 100644 index 000000000000..b158cb27208d --- /dev/null +++ b/arch/metag/kernel/perf/Makefile @@ -0,0 +1,3 @@ +# Makefile for performance event core + +obj-y += perf_event.o diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c new file mode 100644 index 000000000000..a876d5ff3897 --- /dev/null +++ b/arch/metag/kernel/perf/perf_event.c @@ -0,0 +1,861 @@ +/* + * Meta performance counter support. + * Copyright (C) 2012 Imagination Technologies Ltd + * + * This code is based on the sh pmu code: + * Copyright (C) 2009 Paul Mundt + * + * and on the arm pmu code: + * Copyright (C) 2009 picoChip Designs, Ltd., James Iles + * Copyright (C) 2010 ARM Ltd., Will Deacon + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "perf_event.h" + +static int _hw_perf_event_init(struct perf_event *); +static void _hw_perf_event_destroy(struct perf_event *); + +/* Determines which core type we are */ +static struct metag_pmu *metag_pmu __read_mostly; + +/* Processor specific data */ +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +/* PMU admin */ +const char *perf_pmu_name(void) +{ + if (metag_pmu) + return metag_pmu->pmu.name; + + return NULL; +} +EXPORT_SYMBOL_GPL(perf_pmu_name); + +int perf_num_counters(void) +{ + if (metag_pmu) + return metag_pmu->max_events; + + return 0; +} +EXPORT_SYMBOL_GPL(perf_num_counters); + +static inline int metag_pmu_initialised(void) +{ + return !!metag_pmu; +} + +static void release_pmu_hardware(void) +{ + int irq; + unsigned int version = (metag_pmu->version & + (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >> + METAC_ID_REV_S; + + /* Early cores don't have overflow interrupts */ + if (version < 0x0104) + return; + + irq = internal_irq_map(17); + if (irq >= 0) + free_irq(irq, (void *)1); + + irq = internal_irq_map(16); + if (irq >= 0) + free_irq(irq, (void *)0); +} + +static int reserve_pmu_hardware(void) +{ + int err = 0, irq[2]; + unsigned int version = (metag_pmu->version & + (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >> + METAC_ID_REV_S; + + /* Early cores don't have overflow interrupts */ + if (version < 0x0104) + goto out; + + /* + * Bit 16 on HWSTATMETA is the interrupt for performance counter 0; + * similarly, 17 is the interrupt for performance counter 1. + * We can't (yet) interrupt on the cycle counter, because it's a + * register, however it holds a 32-bit value as opposed to 24-bit. + */ + irq[0] = internal_irq_map(16); + if (irq[0] < 0) { + pr_err("unable to map internal IRQ %d\n", 16); + goto out; + } + err = request_irq(irq[0], metag_pmu->handle_irq, IRQF_NOBALANCING, + "metagpmu0", (void *)0); + if (err) { + pr_err("unable to request IRQ%d for metag PMU counters\n", + irq[0]); + goto out; + } + + irq[1] = internal_irq_map(17); + if (irq[1] < 0) { + pr_err("unable to map internal IRQ %d\n", 17); + goto out_irq1; + } + err = request_irq(irq[1], metag_pmu->handle_irq, IRQF_NOBALANCING, + "metagpmu1", (void *)1); + if (err) { + pr_err("unable to request IRQ%d for metag PMU counters\n", + irq[1]); + goto out_irq1; + } + + return 0; + +out_irq1: + free_irq(irq[0], (void *)0); +out: + return err; +} + +/* PMU operations */ +static void metag_pmu_enable(struct pmu *pmu) +{ +} + +static void metag_pmu_disable(struct pmu *pmu) +{ +} + +static int metag_pmu_event_init(struct perf_event *event) +{ + int err = 0; + atomic_t *active_events = &metag_pmu->active_events; + + if (!metag_pmu_initialised()) { + err = -ENODEV; + goto out; + } + + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + event->destroy = _hw_perf_event_destroy; + + if (!atomic_inc_not_zero(active_events)) { + mutex_lock(&metag_pmu->reserve_mutex); + if (atomic_read(active_events) == 0) + err = reserve_pmu_hardware(); + + if (!err) + atomic_inc(active_events); + + mutex_unlock(&metag_pmu->reserve_mutex); + } + + /* Hardware and caches counters */ + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + err = _hw_perf_event_init(event); + break; + + default: + return -ENOENT; + } + + if (err) + event->destroy(event); + +out: + return err; +} + +void metag_pmu_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + u64 prev_raw_count, new_raw_count; + s64 delta; + + /* + * If this counter is chained, it may be that the previous counter + * value has been changed beneath us. + * + * To get around this, we read and exchange the new raw count, then + * add the delta (new - prev) to the generic counter atomically. + * + * Without interrupts, this is the simplest approach. + */ +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = metag_pmu->read(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + /* + * Calculate the delta and add it to the counter. + */ + delta = new_raw_count - prev_raw_count; + + local64_add(delta, &event->count); +} + +int metag_pmu_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > (s64)metag_pmu->max_period) + left = metag_pmu->max_period; + + if (metag_pmu->write) + metag_pmu->write(idx, (u64)(-left) & MAX_PERIOD); + + perf_event_update_userpage(event); + + return ret; +} + +static void metag_pmu_start(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (WARN_ON_ONCE(idx == -1)) + return; + + /* + * We always have to reprogram the period, so ignore PERF_EF_RELOAD. + */ + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + /* + * Reset the period. + * Some counters can't be stopped (i.e. are core global), so when the + * counter was 'stopped' we merely disabled the IRQ. If we don't reset + * the period, then we'll either: a) get an overflow too soon; + * or b) too late if the overflow happened since disabling. + * Obviously, this has little bearing on cores without the overflow + * interrupt, as the performance counter resets to zero on write + * anyway. + */ + if (metag_pmu->max_period) + metag_pmu_event_set_period(event, hwc, hwc->idx); + cpuc->events[idx] = event; + metag_pmu->enable(hwc, idx); +} + +static void metag_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + /* + * We should always update the counter on stop; see comment above + * why. + */ + if (!(hwc->state & PERF_HES_STOPPED)) { + metag_pmu_event_update(event, hwc, hwc->idx); + metag_pmu->disable(hwc, hwc->idx); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } +} + +static int metag_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = 0, ret = 0; + + perf_pmu_disable(event->pmu); + + /* check whether we're counting instructions */ + if (hwc->config == 0x100) { + if (__test_and_set_bit(METAG_INST_COUNTER, + cpuc->used_mask)) { + ret = -EAGAIN; + goto out; + } + idx = METAG_INST_COUNTER; + } else { + /* Check whether we have a spare counter */ + idx = find_first_zero_bit(cpuc->used_mask, + atomic_read(&metag_pmu->active_events)); + if (idx >= METAG_INST_COUNTER) { + ret = -EAGAIN; + goto out; + } + + __set_bit(idx, cpuc->used_mask); + } + hwc->idx = idx; + + /* Make sure the counter is disabled */ + metag_pmu->disable(hwc, idx); + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + metag_pmu_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); +out: + perf_pmu_enable(event->pmu); + return ret; +} + +static void metag_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + WARN_ON(idx < 0); + metag_pmu_stop(event, PERF_EF_UPDATE); + cpuc->events[idx] = NULL; + __clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); +} + +static void metag_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* Don't read disabled counters! */ + if (hwc->idx < 0) + return; + + metag_pmu_event_update(event, hwc, hwc->idx); +} + +static struct pmu pmu = { + .pmu_enable = metag_pmu_enable, + .pmu_disable = metag_pmu_disable, + + .event_init = metag_pmu_event_init, + + .add = metag_pmu_add, + .del = metag_pmu_del, + .start = metag_pmu_start, + .stop = metag_pmu_stop, + .read = metag_pmu_read, +}; + +/* Core counter specific functions */ +static const int metag_general_events[] = { + [PERF_COUNT_HW_CPU_CYCLES] = 0x03, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x100, + [PERF_COUNT_HW_CACHE_REFERENCES] = -1, + [PERF_COUNT_HW_CACHE_MISSES] = -1, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1, + [PERF_COUNT_HW_BRANCH_MISSES] = -1, + [PERF_COUNT_HW_BUS_CYCLES] = -1, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = -1, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = -1, + [PERF_COUNT_HW_REF_CPU_CYCLES] = -1, +}; + +static const int metag_pmu_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x08, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x09, + [C(RESULT_MISS)] = 0x0a, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0xd0, + [C(RESULT_MISS)] = 0xd2, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0xd4, + [C(RESULT_MISS)] = 0xd5, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0xd1, + [C(RESULT_MISS)] = 0xd3, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + + +static void _hw_perf_event_destroy(struct perf_event *event) +{ + atomic_t *active_events = &metag_pmu->active_events; + struct mutex *pmu_mutex = &metag_pmu->reserve_mutex; + + if (atomic_dec_and_mutex_lock(active_events, pmu_mutex)) { + release_pmu_hardware(); + mutex_unlock(pmu_mutex); + } +} + +static int _hw_perf_cache_event(int config, int *evp) +{ + unsigned long type, op, result; + int ev; + + if (!metag_pmu->cache_events) + return -EINVAL; + + /* Unpack config */ + type = config & 0xff; + op = (config >> 8) & 0xff; + result = (config >> 16) & 0xff; + + if (type >= PERF_COUNT_HW_CACHE_MAX || + op >= PERF_COUNT_HW_CACHE_OP_MAX || + result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + ev = (*metag_pmu->cache_events)[type][op][result]; + if (ev == 0) + return -EOPNOTSUPP; + if (ev == -1) + return -EINVAL; + *evp = ev; + return 0; +} + +static int _hw_perf_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + int mapping = 0, err; + + switch (attr->type) { + case PERF_TYPE_HARDWARE: + if (attr->config >= PERF_COUNT_HW_MAX) + return -EINVAL; + + mapping = metag_pmu->event_map(attr->config); + break; + + case PERF_TYPE_HW_CACHE: + err = _hw_perf_cache_event(attr->config, &mapping); + if (err) + return err; + break; + } + + /* Return early if the event is unsupported */ + if (mapping == -1) + return -EINVAL; + + /* + * Early cores have "limited" counters - they have no overflow + * interrupts - and so are unable to do sampling without extra work + * and timer assistance. + */ + if (metag_pmu->max_period == 0) { + if (hwc->sample_period) + return -EINVAL; + } + + /* + * Don't assign an index until the event is placed into the hardware. + * -1 signifies that we're still deciding where to put it. On SMP + * systems each core has its own set of counters, so we can't do any + * constraint checking yet. + */ + hwc->idx = -1; + + /* Store the event encoding */ + hwc->config |= (unsigned long)mapping; + + /* + * For non-sampling runs, limit the sample_period to half of the + * counter width. This way, the new counter value should be less + * likely to overtake the previous one (unless there are IRQ latency + * issues...) + */ + if (metag_pmu->max_period) { + if (!hwc->sample_period) { + hwc->sample_period = metag_pmu->max_period >> 1; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + } + + return 0; +} + +static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) +{ + struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events); + unsigned int config = event->config; + unsigned int tmp = config & 0xf0; + unsigned long flags; + + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + /* + * Check if we're enabling the instruction counter (index of + * MAX_HWEVENTS - 1) + */ + if (METAG_INST_COUNTER == idx) { + WARN_ONCE((config != 0x100), + "invalid configuration (%d) for counter (%d)\n", + config, idx); + + /* Reset the cycle count */ + __core_reg_set(TXTACTCYC, 0); + goto unlock; + } + + /* Check for a core internal or performance channel event. */ + if (tmp) { + void *perf_addr = (void *)PERF_COUNT(idx); + + /* + * Anything other than a cycle count will write the low- + * nibble to the correct counter register. + */ + switch (tmp) { + case 0xd0: + perf_addr = (void *)PERF_ICORE(idx); + break; + + case 0xf0: + perf_addr = (void *)PERF_CHAN(idx); + break; + } + + metag_out32((tmp & 0x0f), perf_addr); + + /* + * Now we use the high nibble as the performance event to + * to count. + */ + config = tmp >> 4; + } + + /* + * Enabled counters start from 0. Early cores clear the count on + * write but newer cores don't, so we make sure that the count is + * set to 0. + */ + tmp = ((config & 0xf) << 28) | + ((1 << 24) << cpu_2_hwthread_id[get_cpu()]); + metag_out32(tmp, PERF_COUNT(idx)); +unlock: + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static void metag_pmu_disable_counter(struct hw_perf_event *event, int idx) +{ + struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events); + unsigned int tmp = 0; + unsigned long flags; + + /* + * The cycle counter can't be disabled per se, as it's a hardware + * thread register which is always counting. We merely return if this + * is the counter we're attempting to disable. + */ + if (METAG_INST_COUNTER == idx) + return; + + /* + * The counter value _should_ have been read prior to disabling, + * as if we're running on an early core then the value gets reset to + * 0, and any read after that would be useless. On the newer cores, + * however, it's better to read-modify-update this for purposes of + * the overflow interrupt. + * Here we remove the thread id AND the event nibble (there are at + * least two events that count events that are core global and ignore + * the thread id mask). This only works because we don't mix thread + * performance counts, and event 0x00 requires a thread id mask! + */ + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + tmp = metag_in32(PERF_COUNT(idx)); + tmp &= 0x00ffffff; + metag_out32(tmp, PERF_COUNT(idx)); + + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static u64 metag_pmu_read_counter(int idx) +{ + u32 tmp = 0; + + /* The act of reading the cycle counter also clears it */ + if (METAG_INST_COUNTER == idx) { + __core_reg_swap(TXTACTCYC, tmp); + goto out; + } + + tmp = metag_in32(PERF_COUNT(idx)) & 0x00ffffff; +out: + return tmp; +} + +static void metag_pmu_write_counter(int idx, u32 val) +{ + struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events); + u32 tmp = 0; + unsigned long flags; + + /* + * This _shouldn't_ happen, but if it does, then we can just + * ignore the write, as the register is read-only and clear-on-write. + */ + if (METAG_INST_COUNTER == idx) + return; + + /* + * We'll keep the thread mask and event id, and just update the + * counter itself. Also , we should bound the value to 24-bits. + */ + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + val &= 0x00ffffff; + tmp = metag_in32(PERF_COUNT(idx)) & 0xff000000; + val |= tmp; + metag_out32(val, PERF_COUNT(idx)); + + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static int metag_pmu_event_map(int idx) +{ + return metag_general_events[idx]; +} + +static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev) +{ + int idx = (int)dev; + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + struct perf_event *event = cpuhw->events[idx]; + struct hw_perf_event *hwc = &event->hw; + struct pt_regs *regs = get_irq_regs(); + struct perf_sample_data sampledata; + unsigned long flags; + u32 counter = 0; + + /* + * We need to stop the core temporarily from generating another + * interrupt while we disable this counter. However, we don't want + * to flag the counter as free + */ + __global_lock2(flags); + counter = metag_in32(PERF_COUNT(idx)); + metag_out32((counter & 0x00ffffff), PERF_COUNT(idx)); + __global_unlock2(flags); + + /* Update the counts and reset the sample period */ + metag_pmu_event_update(event, hwc, idx); + perf_sample_data_init(&sampledata, 0, hwc->last_period); + metag_pmu_event_set_period(event, hwc, idx); + + /* + * Enable the counter again once core overflow processing has + * completed. + */ + if (!perf_event_overflow(event, &sampledata, regs)) + metag_out32(counter, PERF_COUNT(idx)); + + return IRQ_HANDLED; +} + +static struct metag_pmu _metag_pmu = { + .handle_irq = metag_pmu_counter_overflow, + .enable = metag_pmu_enable_counter, + .disable = metag_pmu_disable_counter, + .read = metag_pmu_read_counter, + .write = metag_pmu_write_counter, + .event_map = metag_pmu_event_map, + .cache_events = &metag_pmu_cache_events, + .max_period = MAX_PERIOD, + .max_events = MAX_HWEVENTS, +}; + +/* PMU CPU hotplug notifier */ +static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned int)hcpu; + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + + if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) + return NOTIFY_DONE; + + memset(cpuc, 0, sizeof(struct cpu_hw_events)); + raw_spin_lock_init(&cpuc->pmu_lock); + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata metag_pmu_notifier = { + .notifier_call = metag_pmu_cpu_notify, +}; + +/* PMU Initialisation */ +static int __init init_hw_perf_events(void) +{ + int ret = 0, cpu; + u32 version = *(u32 *)METAC_ID; + int major = (version & METAC_ID_MAJOR_BITS) >> METAC_ID_MAJOR_S; + int min_rev = (version & (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) + >> METAC_ID_REV_S; + + /* Not a Meta 2 core, then not supported */ + if (0x02 > major) { + pr_info("no hardware counter support available\n"); + goto out; + } else if (0x02 == major) { + metag_pmu = &_metag_pmu; + + if (min_rev < 0x0104) { + /* + * A core without overflow interrupts, and clear-on- + * write counters. + */ + metag_pmu->handle_irq = NULL; + metag_pmu->write = NULL; + metag_pmu->max_period = 0; + } + + metag_pmu->name = "Meta 2"; + metag_pmu->version = version; + metag_pmu->pmu = pmu; + } + + pr_info("enabled with %s PMU driver, %d counters available\n", + metag_pmu->name, metag_pmu->max_events); + + /* Initialise the active events and reservation mutex */ + atomic_set(&metag_pmu->active_events, 0); + mutex_init(&metag_pmu->reserve_mutex); + + /* Clear the counters */ + metag_out32(0, PERF_COUNT(0)); + metag_out32(0, PERF_COUNT(1)); + + for_each_possible_cpu(cpu) { + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + + memset(cpuc, 0, sizeof(struct cpu_hw_events)); + raw_spin_lock_init(&cpuc->pmu_lock); + } + + register_cpu_notifier(&metag_pmu_notifier); + ret = perf_pmu_register(&pmu, (char *)metag_pmu->name, PERF_TYPE_RAW); +out: + return ret; +} +early_initcall(init_hw_perf_events); diff --git a/arch/metag/kernel/perf/perf_event.h b/arch/metag/kernel/perf/perf_event.h new file mode 100644 index 000000000000..fd10a1345b67 --- /dev/null +++ b/arch/metag/kernel/perf/perf_event.h @@ -0,0 +1,106 @@ +/* + * Meta performance counter support. + * Copyright (C) 2012 Imagination Technologies Ltd + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef METAG_PERF_EVENT_H_ +#define METAG_PERF_EVENT_H_ + +#include +#include +#include + +/* For performance counter definitions */ +#include + +/* + * The Meta core has two performance counters, with 24-bit resolution. Newer + * cores generate an overflow interrupt on transition from 0xffffff to 0. + * + * Each counter consists of the counter id, hardware thread id, and the count + * itself; each counter can be assigned to multiple hardware threads at any + * one time, with the returned count being an aggregate of events. A small + * number of events are thread global, i.e. they count the aggregate of all + * threads' events, regardless of the thread selected. + * + * Newer cores can store an arbitrary 24-bit number in the counter, whereas + * older cores will clear the counter bits on write. + * + * We also have a pseudo-counter in the form of the thread active cycles + * counter (which, incidentally, is also bound to + */ + +#define MAX_HWEVENTS 3 +#define MAX_PERIOD ((1UL << 24) - 1) +#define METAG_INST_COUNTER (MAX_HWEVENTS - 1) + +/** + * struct cpu_hw_events - a processor core's performance events + * @events: an array of perf_events active for a given index. + * @used_mask: a bitmap of in-use counters. + * @pmu_lock: a perf counter lock + * + * This is a per-cpu/core structure that maintains a record of its + * performance counters' state. + */ +struct cpu_hw_events { + struct perf_event *events[MAX_HWEVENTS]; + unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; + raw_spinlock_t pmu_lock; +}; + +/** + * struct metag_pmu - the Meta PMU structure + * @pmu: core pmu structure + * @name: pmu name + * @version: core version + * @handle_irq: overflow interrupt handler + * @enable: enable a counter + * @disable: disable a counter + * @read: read the value of a counter + * @write: write a value to a counter + * @event_map: kernel event to counter event id map + * @cache_events: kernel cache counter to core cache counter map + * @max_period: maximum value of the counter before overflow + * @max_events: maximum number of counters available at any one time + * @active_events: number of active counters + * @reserve_mutex: counter reservation mutex + * + * This describes the main functionality and data used by the performance + * event core. + */ +struct metag_pmu { + struct pmu pmu; + const char *name; + u32 version; + irqreturn_t (*handle_irq)(int irq_num, void *dev); + void (*enable)(struct hw_perf_event *evt, int idx); + void (*disable)(struct hw_perf_event *evt, int idx); + u64 (*read)(int idx); + void (*write)(int idx, u32 val); + int (*event_map)(int idx); + const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + u32 max_period; + int max_events; + atomic_t active_events; + struct mutex reserve_mutex; +}; + +/* Convenience macros for accessing the perf counters */ +/* Define some convenience accessors */ +#define PERF_COUNT(x) (PERF_COUNT0 + (sizeof(u64) * (x))) +#define PERF_ICORE(x) (PERF_ICORE0 + (sizeof(u64) * (x))) +#define PERF_CHAN(x) (PERF_CHAN0 + (sizeof(u64) * (x))) + +/* Cache index macros */ +#define C(x) PERF_COUNT_HW_CACHE_##x +#define CACHE_OP_UNSUPPORTED 0xfffe +#define CACHE_OP_NONSENSE 0xffff + +#endif diff --git a/arch/metag/kernel/perf_callchain.c b/arch/metag/kernel/perf_callchain.c new file mode 100644 index 000000000000..315633461a94 --- /dev/null +++ b/arch/metag/kernel/perf_callchain.c @@ -0,0 +1,96 @@ +/* + * Perf callchain handling code. + * + * Based on the ARM perf implementation. + */ + +#include +#include +#include +#include +#include +#include + +static bool is_valid_call(unsigned long calladdr) +{ + unsigned int callinsn; + + /* Check the possible return address is aligned. */ + if (!(calladdr & 0x3)) { + if (!get_user(callinsn, (unsigned int *)calladdr)) { + /* Check for CALLR or SWAP PC,D1RtP. */ + if ((callinsn & 0xff000000) == 0xab000000 || + callinsn == 0xa3200aa0) + return true; + } + } + return false; +} + +static struct metag_frame __user * +user_backtrace(struct metag_frame __user *user_frame, + struct perf_callchain_entry *entry) +{ + struct metag_frame frame; + unsigned long calladdr; + + /* We cannot rely on having frame pointers in user code. */ + while (1) { + /* Also check accessibility of one struct frame beyond */ + if (!access_ok(VERIFY_READ, user_frame, sizeof(frame))) + return 0; + if (__copy_from_user_inatomic(&frame, user_frame, + sizeof(frame))) + return 0; + + --user_frame; + + calladdr = frame.lr - 4; + if (is_valid_call(calladdr)) { + perf_callchain_store(entry, calladdr); + return user_frame; + } + } + + return 0; +} + +void +perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) +{ + unsigned long sp = regs->ctx.AX[0].U0; + struct metag_frame __user *frame; + + frame = (struct metag_frame __user *)sp; + + --frame; + + while ((entry->nr < PERF_MAX_STACK_DEPTH) && frame) + frame = user_backtrace(frame, entry); +} + +/* + * Gets called by walk_stackframe() for every stackframe. This will be called + * whist unwinding the stackframe and is like a subroutine return so we use + * the PC. + */ +static int +callchain_trace(struct stackframe *fr, + void *data) +{ + struct perf_callchain_entry *entry = data; + perf_callchain_store(entry, fr->pc); + return 0; +} + +void +perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) +{ + struct stackframe fr; + + fr.fp = regs->ctx.AX[1].U0; + fr.sp = regs->ctx.AX[0].U0; + fr.lr = regs->ctx.DX[4].U1; + fr.pc = regs->ctx.CurrPC; + walk_stackframe(&fr, callchain_trace, entry); +} -- cgit v1.2.3 From 00512bdd4573674d10af1c1d60328b4b0f9dcafd Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 5 Oct 2012 16:27:31 +0100 Subject: metag: ftrace support Add ftrace support for metag. Signed-off-by: James Hogan Cc: Frederic Weisbecker Cc: Ingo Molnar Reviewed-by: Steven Rostedt --- arch/metag/Kconfig | 5 ++ arch/metag/include/asm/Kbuild | 1 - arch/metag/include/asm/ftrace.h | 23 ++++++++ arch/metag/kernel/ftrace.c | 126 ++++++++++++++++++++++++++++++++++++++++ arch/metag/kernel/ftrace_stub.S | 76 ++++++++++++++++++++++++ arch/metag/kernel/metag_ksyms.c | 5 ++ scripts/recordmcount.c | 13 +++++ 7 files changed, 248 insertions(+), 1 deletion(-) create mode 100644 arch/metag/include/asm/ftrace.h create mode 100644 arch/metag/kernel/ftrace.c create mode 100644 arch/metag/kernel/ftrace_stub.S (limited to 'arch/metag/kernel') diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index 47972025818f..f6846ad5d3a8 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig @@ -12,7 +12,12 @@ config METAG select GENERIC_SMP_IDLE_THREAD select HAVE_64BIT_ALIGNED_ACCESS select HAVE_ARCH_TRACEHOOK + select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK + select HAVE_DYNAMIC_FTRACE + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_GENERIC_HARDIRQS select HAVE_IRQ_WORK select HAVE_KERNEL_BZIP2 diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index 3a139810559e..6ae0ccb632cb 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild @@ -11,7 +11,6 @@ generic-y += errno.h generic-y += exec.h generic-y += fb.h generic-y += fcntl.h -generic-y += ftrace.h generic-y += futex.h generic-y += hardirq.h generic-y += hw_irq.h diff --git a/arch/metag/include/asm/ftrace.h b/arch/metag/include/asm/ftrace.h new file mode 100644 index 000000000000..2901f0f7d944 --- /dev/null +++ b/arch/metag/include/asm/ftrace.h @@ -0,0 +1,23 @@ +#ifndef _ASM_METAG_FTRACE +#define _ASM_METAG_FTRACE + +#ifdef CONFIG_FUNCTION_TRACER +#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */ + +#ifndef __ASSEMBLY__ +extern void mcount_wrapper(void); +#define MCOUNT_ADDR ((long)(mcount_wrapper)) + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} + +struct dyn_arch_ftrace { + /* No extra data needed on metag */ +}; +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_FUNCTION_TRACER */ + +#endif /* _ASM_METAG_FTRACE */ diff --git a/arch/metag/kernel/ftrace.c b/arch/metag/kernel/ftrace.c new file mode 100644 index 000000000000..a774f321643f --- /dev/null +++ b/arch/metag/kernel/ftrace.c @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2008 Imagination Technologies Ltd. + * Licensed under the GPL + * + * Dynamic ftrace support. + */ + +#include +#include +#include + +#include + +#define D04_MOVT_TEMPLATE 0x02200005 +#define D04_CALL_TEMPLATE 0xAC200005 +#define D1RTP_MOVT_TEMPLATE 0x03200005 +#define D1RTP_CALL_TEMPLATE 0xAC200006 + +static const unsigned long NOP[2] = {0xa0fffffe, 0xa0fffffe}; +static unsigned long movt_and_call_insn[2]; + +static unsigned char *ftrace_nop_replace(void) +{ + return (char *)&NOP[0]; +} + +static unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr) +{ + unsigned long hi16, low16; + + hi16 = (addr & 0xffff0000) >> 13; + low16 = (addr & 0x0000ffff) << 3; + + /* + * The compiler makes the call to mcount_wrapper() + * (Meta's wrapper around mcount()) through the register + * D0.4. So whenever we're patching one of those compiler-generated + * calls we also need to go through D0.4. Otherwise use D1RtP. + */ + if (pc == (unsigned long)&ftrace_call) { + writel(D1RTP_MOVT_TEMPLATE | hi16, &movt_and_call_insn[0]); + writel(D1RTP_CALL_TEMPLATE | low16, &movt_and_call_insn[1]); + } else { + writel(D04_MOVT_TEMPLATE | hi16, &movt_and_call_insn[0]); + writel(D04_CALL_TEMPLATE | low16, &movt_and_call_insn[1]); + } + + return (unsigned char *)&movt_and_call_insn[0]; +} + +static int ftrace_modify_code(unsigned long pc, unsigned char *old_code, + unsigned char *new_code) +{ + unsigned char replaced[MCOUNT_INSN_SIZE]; + + /* + * Note: Due to modules and __init, code can + * disappear and change, we need to protect against faulting + * as well as code changing. + * + * No real locking needed, this code is run through + * kstop_machine. + */ + + /* read the text we want to modify */ + if (probe_kernel_read(replaced, (void *)pc, MCOUNT_INSN_SIZE)) + return -EFAULT; + + /* Make sure it is what we expect it to be */ + if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) + return -EINVAL; + + /* replace the text with the new text */ + if (probe_kernel_write((void *)pc, new_code, MCOUNT_INSN_SIZE)) + return -EPERM; + + flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); + + return 0; +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + int ret; + unsigned long pc; + unsigned char old[MCOUNT_INSN_SIZE], *new; + + pc = (unsigned long)&ftrace_call; + memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); + new = ftrace_call_replace(pc, (unsigned long)func); + ret = ftrace_modify_code(pc, old, new); + + return ret; +} + +int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned char *new, *old; + unsigned long ip = rec->ip; + + old = ftrace_call_replace(ip, addr); + new = ftrace_nop_replace(); + + return ftrace_modify_code(ip, old, new); +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned char *new, *old; + unsigned long ip = rec->ip; + + old = ftrace_nop_replace(); + new = ftrace_call_replace(ip, addr); + + return ftrace_modify_code(ip, old, new); +} + +/* run from kstop_machine */ +int __init ftrace_dyn_arch_init(void *data) +{ + /* The return code is returned via data */ + writel(0, data); + + return 0; +} diff --git a/arch/metag/kernel/ftrace_stub.S b/arch/metag/kernel/ftrace_stub.S new file mode 100644 index 000000000000..e70bff745bdd --- /dev/null +++ b/arch/metag/kernel/ftrace_stub.S @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2008 Imagination Technologies Ltd. + * Licensed under the GPL + * + */ + +#include + + .text +#ifdef CONFIG_DYNAMIC_FTRACE + .global _mcount_wrapper + .type _mcount_wrapper,function +_mcount_wrapper: + MOV PC,D0.4 + + .global _ftrace_caller + .type _ftrace_caller,function +_ftrace_caller: + MOVT D0Re0,#HI(_function_trace_stop) + ADD D0Re0,D0Re0,#LO(_function_trace_stop) + GETD D0Re0,[D0Re0] + CMP D0Re0,#0 + BEQ $Lcall_stub + MOV PC,D0.4 +$Lcall_stub: + MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 + MOV D1Ar1, D0.4 + MOV D0Ar2, D1RtP + SUB D1Ar1,D1Ar1,#MCOUNT_INSN_SIZE + + .global _ftrace_call +_ftrace_call: + MOVT D1RtP,#HI(_ftrace_stub) + CALL D1RtP,#LO(_ftrace_stub) + GETL D0.4, D1RtP, [A0StP++#(-8)] + GETL D0Ar2, D1Ar1, [A0StP++#(-8)] + GETL D0Ar4, D1Ar3, [A0StP++#(-8)] + GETL D0Ar6, D1Ar5, [A0StP++#(-8)] + MOV PC, D0.4 +#else + + .global _mcount_wrapper + .type _mcount_wrapper,function +_mcount_wrapper: + MOVT D0Re0,#HI(_function_trace_stop) + ADD D0Re0,D0Re0,#LO(_function_trace_stop) + GETD D0Re0,[D0Re0] + CMP D0Re0,#0 + BEQ $Lcall_mcount + MOV PC,D0.4 +$Lcall_mcount: + MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 + MOV D1Ar1, D0.4 + MOV D0Ar2, D1RtP + MOVT D0Re0,#HI(_ftrace_trace_function) + ADD D0Re0,D0Re0,#LO(_ftrace_trace_function) + GET D1Ar3,[D0Re0] + MOVT D1Re0,#HI(_ftrace_stub) + ADD D1Re0,D1Re0,#LO(_ftrace_stub) + CMP D1Ar3,D1Re0 + BEQ $Ltrace_exit + MOV D1RtP,D1Ar3 + SUB D1Ar1,D1Ar1,#MCOUNT_INSN_SIZE + SWAP PC,D1RtP +$Ltrace_exit: + GETL D0.4, D1RtP, [A0StP++#(-8)] + GETL D0Ar2, D1Ar1, [A0StP++#(-8)] + GETL D0Ar4, D1Ar3, [A0StP++#(-8)] + GETL D0Ar6, D1Ar5, [A0StP++#(-8)] + MOV PC, D0.4 + +#endif /* CONFIG_DYNAMIC_FTRACE */ + + .global _ftrace_stub +_ftrace_stub: + MOV PC,D1RtP diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c index c73ebd3db417..3cc4d1d4b322 100644 --- a/arch/metag/kernel/metag_ksyms.c +++ b/arch/metag/kernel/metag_ksyms.c @@ -10,6 +10,7 @@ #include #include #include +#include #include /* uaccess symbols */ @@ -73,3 +74,7 @@ EXPORT_SYMBOL(div_s64); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); + +#ifdef CONFIG_FUNCTION_TRACER +EXPORT_SYMBOL(mcount_wrapper); +#endif diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index ee52cb8e17ad..9c22317778eb 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -33,6 +33,13 @@ #include #include +#ifndef EM_METAG +/* Remove this when these make it to the standard system elf.h. */ +#define EM_METAG 174 +#define R_METAG_ADDR32 2 +#define R_METAG_NONE 3 +#endif + static int fd_map; /* File descriptor for file being modified. */ static int mmap_failed; /* Boolean flag. */ static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */ @@ -341,6 +348,12 @@ do_file(char const *const fname) altmcount = "__gnu_mcount_nc"; break; case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break; + case EM_METAG: reltype = R_METAG_ADDR32; + altmcount = "_mcount_wrapper"; + rel_type_nop = R_METAG_NONE; + /* We happen to have the same requirement as MIPS */ + is_fake_mcount32 = MIPS32_is_fake_mcount; + break; case EM_MIPS: /* reltype: e_class */ gpfx = '_'; break; case EM_PPC: reltype = R_PPC_ADDR32; gpfx = '_'; break; case EM_PPC64: reltype = R_PPC64_ADDR64; gpfx = '_'; break; -- cgit v1.2.3 From ae85ac71b7433fa974759109c4380c620258f07f Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 21 Sep 2012 17:38:15 +0100 Subject: metag: Add JTAG Debug Adapter (DA) support Add basic JTAG Debug Adapter (DA) support so that drivers which communicate with the DA can detect whether one is actually present (otherwise the target will halt indefinitely). This allows the metag_da TTY driver and imgdafs filesystem driver to be built, updates defconfigs, and sets up the metag_da console early if it's configured in. Signed-off-by: James Hogan --- MAINTAINERS | 2 ++ arch/metag/Kconfig | 12 ++++++++++ arch/metag/configs/meta1_defconfig | 3 +++ arch/metag/configs/meta2_defconfig | 3 +++ arch/metag/configs/meta2_smp_defconfig | 3 +++ arch/metag/include/asm/da.h | 43 ++++++++++++++++++++++++++++++++++ arch/metag/kernel/Makefile | 1 + arch/metag/kernel/da.c | 23 ++++++++++++++++++ arch/metag/kernel/setup.c | 15 ++++++++++++ 9 files changed, 105 insertions(+) create mode 100644 arch/metag/include/asm/da.h create mode 100644 arch/metag/kernel/da.c (limited to 'arch/metag/kernel') diff --git a/MAINTAINERS b/MAINTAINERS index 9b2b7699da4d..a6a0c352e559 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5042,6 +5042,8 @@ F: Documentation/devicetree/bindings/metag/ F: drivers/clocksource/metag_generic.c F: drivers/irqchip/irq-metag.c F: drivers/irqchip/irq-metag-ext.c +F: drivers/tty/metag_da.c +F: fs/imgdafs/ MICROBLAZE ARCHITECTURE M: Michal Simek diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index f6846ad5d3a8..30adc7875daa 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig @@ -214,6 +214,18 @@ config METAG_PERFCOUNTER_IRQS When disabled, Performance Counters information will be collected based on Timer Interrupt. +config METAG_DA + bool "DA support" + help + Say Y if you plan to use a DA debug adapter with Linux. The presence + of the DA will be detected automatically at boot, so it is safe to say + Y to this option even when booting without a DA. + + This enables support for services provided by DA JTAG debug adapters, + such as: + - communication over DA channels (such as the console driver). + - use of the DA filesystem. + menu "Boot options" config METAG_BUILTIN_DTB diff --git a/arch/metag/configs/meta1_defconfig b/arch/metag/configs/meta1_defconfig index 837c235ab67a..ad663ca53208 100644 --- a/arch/metag/configs/meta1_defconfig +++ b/arch/metag/configs/meta1_defconfig @@ -14,6 +14,7 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_CFQ is not set CONFIG_FLATMEM_MANUAL=y CONFIG_META12_FPGA=y +CONFIG_METAG_DA=y CONFIG_HZ_100=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y @@ -27,6 +28,8 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set +CONFIG_DA_TTY=y +CONFIG_DA_CONSOLE=y # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set diff --git a/arch/metag/configs/meta2_defconfig b/arch/metag/configs/meta2_defconfig index e5bada83dd76..47922e9306af 100644 --- a/arch/metag/configs/meta2_defconfig +++ b/arch/metag/configs/meta2_defconfig @@ -16,6 +16,7 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_METAG_L2C=y CONFIG_FLATMEM_MANUAL=y CONFIG_METAG_HALT_ON_PANIC=y +CONFIG_METAG_DA=y CONFIG_HZ_100=y CONFIG_DEVTMPFS=y # CONFIG_STANDALONE is not set @@ -28,6 +29,8 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set +CONFIG_DA_TTY=y +CONFIG_DA_CONSOLE=y # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set diff --git a/arch/metag/configs/meta2_smp_defconfig b/arch/metag/configs/meta2_smp_defconfig index 41983a20db72..f5082505872d 100644 --- a/arch/metag/configs/meta2_smp_defconfig +++ b/arch/metag/configs/meta2_smp_defconfig @@ -17,6 +17,7 @@ CONFIG_METAG_L2C=y CONFIG_FLATMEM_MANUAL=y CONFIG_METAG_HALT_ON_PANIC=y CONFIG_SMP=y +CONFIG_METAG_DA=y CONFIG_HZ_100=y CONFIG_DEVTMPFS=y # CONFIG_STANDALONE is not set @@ -29,6 +30,8 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set +CONFIG_DA_TTY=y +CONFIG_DA_CONSOLE=y # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set diff --git a/arch/metag/include/asm/da.h b/arch/metag/include/asm/da.h new file mode 100644 index 000000000000..81bd5212fb03 --- /dev/null +++ b/arch/metag/include/asm/da.h @@ -0,0 +1,43 @@ +/* + * Meta DA JTAG debugger control. + * + * Copyright 2012 Imagination Technologies Ltd. + */ + +#ifndef _METAG_DA_H_ +#define _METAG_DA_H_ + +#ifdef CONFIG_METAG_DA + +#include +#include + +extern bool _metag_da_present; + +/** + * metag_da_enabled() - Find whether a DA is currently enabled. + * + * Returns: true if a DA was detected, false if not. + */ +static inline bool metag_da_enabled(void) +{ + return _metag_da_present; +} + +/** + * metag_da_probe() - Try and detect a connected DA. + * + * This is used at start up to detect whether a DA is active. + * + * Returns: 0 on detection, -err otherwise. + */ +int __init metag_da_probe(void); + +#else /* !CONFIG_METAG_DA */ + +#define metag_da_enabled() false +#define metag_da_probe() do {} while (0) + +#endif + +#endif /* _METAG_DA_H_ */ diff --git a/arch/metag/kernel/Makefile b/arch/metag/kernel/Makefile index a5e4ba6fd20a..d7675f4a5df8 100644 --- a/arch/metag/kernel/Makefile +++ b/arch/metag/kernel/Makefile @@ -28,6 +28,7 @@ obj-y += user_gateway.o obj-$(CONFIG_PERF_EVENTS) += perf/ obj-$(CONFIG_METAG_COREMEM) += coremem.o +obj-$(CONFIG_METAG_DA) += da.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace_stub.o obj-$(CONFIG_MODULES) += metag_ksyms.o diff --git a/arch/metag/kernel/da.c b/arch/metag/kernel/da.c new file mode 100644 index 000000000000..52aabb658fde --- /dev/null +++ b/arch/metag/kernel/da.c @@ -0,0 +1,23 @@ +/* + * Meta DA JTAG debugger control. + * + * Copyright 2012 Imagination Technologies Ltd. + */ + + +#include +#include +#include +#include + +bool _metag_da_present; + +int __init metag_da_probe(void) +{ + _metag_da_present = (metag_in32(T0VECINT_BHALT) == 1); + if (_metag_da_present) + pr_info("DA present\n"); + else + pr_info("DA not present\n"); + return 0; +} diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c index 74e2c1f812a5..89f9cdc389e8 100644 --- a/arch/metag/kernel/setup.c +++ b/arch/metag/kernel/setup.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -60,6 +61,11 @@ extern char _heap_start[]; extern u32 __dtb_start[]; #endif +#ifdef CONFIG_DA_CONSOLE +/* Our early channel based console driver */ +extern struct console dash_console; +#endif + struct machine_desc *machine_desc __initdata; /* @@ -180,6 +186,15 @@ void __init setup_arch(char **cmdline_p) metag_cache_probe(); + metag_da_probe(); +#ifdef CONFIG_DA_CONSOLE + if (metag_da_enabled()) { + /* An early channel based console driver */ + register_console(&dash_console); + add_preferred_console("ttyDA", 1, NULL); + } +#endif + /* try interpreting the argument as a device tree */ machine_desc = setup_machine_fdt(original_cmd_line); /* if it doesn't look like a device tree it must be a command line */ -- cgit v1.2.3 From 82f0167aa4c4bbc06b5a2e1e83d252792f7c5754 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 31 Jan 2013 13:38:48 +0000 Subject: metag: kernel/setup.c: sort includes Sort includes in kernel/setup.c. Signed-off-by: James Hogan --- arch/metag/kernel/setup.c | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) (limited to 'arch/metag/kernel') diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c index 89f9cdc389e8..aaebc56270ec 100644 --- a/arch/metag/kernel/setup.c +++ b/arch/metag/kernel/setup.c @@ -5,43 +5,43 @@ * */ -#include -#include -#include +#include +#include +#include #include -#include +#include #include -#include #include -#include -#include #include -#include -#include #include -#include -#include -#include -#include +#include +#include #include +#include #include +#include +#include +#include +#include +#include +#include #include #include -#include -#include -#include -#include -#include +#include #include +#include +#include #include -#include #include -#include -#include #include -#include -#include +#include +#include +#include +#include +#include +#include +#include /* PRIV protect as many registers as possible. */ #define DEFAULT_PRIV 0xff0f7f00 -- cgit v1.2.3 From c787c2d62fe0c482f5fb3e5b869cd262fe69b244 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 31 Jan 2013 13:27:35 +0000 Subject: metag: make TXPRIVEXT bits explicit Define PRIV_BITS using explicit constants from rather than with a hard coded value. This also adds a couple of missing definitions for the TXPRIVEXT priv bits for protecting writes to TXTIMER and the trace registers. Signed-off-by: James Hogan --- arch/metag/include/asm/metag_regs.h | 4 ++++ arch/metag/kernel/setup.c | 34 +++++++++++++++++++++++++++------- 2 files changed, 31 insertions(+), 7 deletions(-) (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/metag_regs.h b/arch/metag/include/asm/metag_regs.h index 022fcad768f7..acf4b8e6e9d1 100644 --- a/arch/metag/include/asm/metag_regs.h +++ b/arch/metag/include/asm/metag_regs.h @@ -414,6 +414,10 @@ #define TXPRIVEXT_REGNUM 29 #define TXPRIVEXT_COPRO_BITS 0xFF000000 /* Co-processor 0-7 */ #define TXPRIVEXT_COPRO_S 24 +#ifndef METAC_1_2 +#define TXPRIVEXT_TXTIMER_BIT 0x00080000 /* TXTIMER priv */ +#define TXPRIVEXT_TRACE_BIT 0x00040000 /* TTEXEC|TTCTRL|GTEXEC */ +#endif #define TXPRIVEXT_TXTRIGGER_BIT 0x00020000 /* TXSTAT|TXMASK|TXPOLL */ #define TXPRIVEXT_TXGBLCREG_BIT 0x00010000 /* Global common regs */ #define TXPRIVEXT_CBPRIV_BIT 0x00008000 /* Mem i/f dump priv */ diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c index aaebc56270ec..dcb1d6d51ce4 100644 --- a/arch/metag/kernel/setup.c +++ b/arch/metag/kernel/setup.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -43,18 +44,37 @@ #include #include -/* PRIV protect as many registers as possible. */ -#define DEFAULT_PRIV 0xff0f7f00 - -/* Enable unaligned access checking. */ -#define UNALIGNED_PRIV 0x00000010 +/* Priv protect as many registers as possible. */ +#define DEFAULT_PRIV (TXPRIVEXT_COPRO_BITS | \ + TXPRIVEXT_TXTRIGGER_BIT | \ + TXPRIVEXT_TXGBLCREG_BIT | \ + TXPRIVEXT_ILOCK_BIT | \ + TXPRIVEXT_TXITACCYC_BIT | \ + TXPRIVEXT_TXDIVTIME_BIT | \ + TXPRIVEXT_TXAMAREGX_BIT | \ + TXPRIVEXT_TXTIMERI_BIT | \ + TXPRIVEXT_TXSTATUS_BIT | \ + TXPRIVEXT_TXDISABLE_BIT) + +/* Meta2 specific bits. */ +#ifdef CONFIG_METAG_META12 +#define META2_PRIV 0 +#else +#define META2_PRIV (TXPRIVEXT_TXTIMER_BIT | \ + TXPRIVEXT_TRACE_BIT) +#endif +/* Unaligned access checking bits. */ #ifdef CONFIG_METAG_UNALIGNED -#define PRIV_BITS (DEFAULT_PRIV | UNALIGNED_PRIV) +#define UNALIGNED_PRIV TXPRIVEXT_ALIGNREW_BIT #else -#define PRIV_BITS DEFAULT_PRIV +#define UNALIGNED_PRIV 0 #endif +#define PRIV_BITS (DEFAULT_PRIV | \ + META2_PRIV | \ + UNALIGNED_PRIV) + extern char _heap_start[]; #ifdef CONFIG_METAG_BUILTIN_DTB -- cgit v1.2.3 From 3d6b7bb0a2c518d24bc3036f9bbb6f3fb35462c3 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 31 Jan 2013 13:42:03 +0000 Subject: metag: protect more non-MMU memory regions Rename setup_txprivext() to setup_priv() and add initialisation of some more per-thread privilege protection registers: - TxPRIVSYSR: 0x04400000-0x047fffff 0x05000000-0x07ffffff 0x84000000-0x87ffffff - TxPIOREG: 0x02000000-0x02ffffff 0x04800000-0x048fffff - TxSYREG: 0x04000000-0x04000fff (except write fetch system event) Signed-off-by: James Hogan --- arch/metag/include/asm/processor.h | 2 +- arch/metag/kernel/setup.c | 45 +++++++++++++++++++++++++++++++++----- arch/metag/kernel/smp.c | 2 +- 3 files changed, 42 insertions(+), 7 deletions(-) (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h index b7e25289f3a3..9b029a7911c3 100644 --- a/arch/metag/include/asm/processor.h +++ b/arch/metag/include/asm/processor.h @@ -154,7 +154,7 @@ unsigned long get_wchan(struct task_struct *p); #define cpu_relax() barrier() -extern void setup_txprivext(void); +extern void setup_priv(void); static inline unsigned int hard_processor_id(void) { diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c index dcb1d6d51ce4..9803ca4f6510 100644 --- a/arch/metag/kernel/setup.c +++ b/arch/metag/kernel/setup.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -75,6 +76,32 @@ META2_PRIV | \ UNALIGNED_PRIV) +/* + * Protect access to: + * 0x06000000-0x07ffffff Direct mapped region + * 0x05000000-0x05ffffff MMU table region (Meta1) + * 0x04400000-0x047fffff Cache flush region + * 0x84000000-0x87ffffff Core cache memory region (Meta2) + * + * Allow access to: + * 0x80000000-0x81ffffff Core code memory region (Meta2) + */ +#ifdef CONFIG_METAG_META12 +#define PRIVSYSR_BITS TXPRIVSYSR_ALL_BITS +#else +#define PRIVSYSR_BITS (TXPRIVSYSR_ALL_BITS & ~TXPRIVSYSR_CORECODE_BIT) +#endif + +/* Protect all 0x02xxxxxx and 0x048xxxxx. */ +#define PIOREG_BITS 0xffffffff + +/* + * Protect all 0x04000xx0 (system events) + * except write combiner flush and write fence (system events 4 and 5). + */ +#define PSYREG_BITS 0xfffffffb + + extern char _heap_start[]; #ifdef CONFIG_METAG_BUILTIN_DTB @@ -371,7 +398,7 @@ void __init setup_arch(char **cmdline_p) paging_init(heap_end); - setup_txprivext(); + setup_priv(); /* Setup the boot cpu's mapping. The rest will be setup below. */ cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id(); @@ -531,13 +558,21 @@ void __init metag_start_kernel(char *args) start_kernel(); } -/* - * Setup TXPRIVEXT register to be prevent userland from touching our - * precious registers. +/** + * setup_priv() - Set up privilege protection registers. + * + * Set up privilege protection registers such as TXPRIVEXT to prevent userland + * from touching our precious registers and sensitive memory areas. */ -void setup_txprivext(void) +void setup_priv(void) { + unsigned int offset = hard_processor_id() << TXPRIVREG_STRIDE_S; + __core_reg_set(TXPRIVEXT, PRIV_BITS); + + metag_out32(PRIVSYSR_BITS, T0PRIVSYSR + offset); + metag_out32(PIOREG_BITS, T0PIOREG + offset); + metag_out32(PSYREG_BITS, T0PSYREG + offset); } PTBI pTBI_get(unsigned int cpu) diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c index d1163127eb68..4b6d1f14df32 100644 --- a/arch/metag/kernel/smp.c +++ b/arch/metag/kernel/smp.c @@ -268,7 +268,7 @@ asmlinkage void secondary_start_kernel(void) preempt_disable(); - setup_txprivext(); + setup_priv(); /* * Enable local interrupts. -- cgit v1.2.3 From c838e72a35e49ea51c39c2c634ece028fa49c565 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Mon, 11 Feb 2013 11:47:02 +0000 Subject: metag: export clear_page and copy_page Various file systems use clear_page() and copy_page(), so when they're built as modules we get build errors like the following: ERROR: "clear_page" [fs/ntfs/ntfs.ko] undefined! ERROR: "copy_page" [fs/nilfs2/nilfs2.ko] undefined! Therefore export these functions to modules from metag_ksyms.c to fix the errors. This was hit by a randconfig build. Signed-off-by: James Hogan --- arch/metag/kernel/metag_ksyms.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/metag/kernel') diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c index 3cc4d1d4b322..63606f8aa86a 100644 --- a/arch/metag/kernel/metag_ksyms.c +++ b/arch/metag/kernel/metag_ksyms.c @@ -37,6 +37,8 @@ EXPORT_SYMBOL(get_trigger_mask); EXPORT_SYMBOL(global_trigger_mask); #endif +EXPORT_SYMBOL(clear_page); +EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(pfn_base); -- cgit v1.2.3 From fa771d029af8f8a23089c97b6ab6a5745e98ad7e Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 12 Feb 2013 16:04:53 +0000 Subject: metag: move irq enable out of irqflags.h on SMP The SMP version of arch_local_irq_enable() uses preempt_disable(), but doesn't include causing the following errors on SMP when pstore/ftrace is enabled (caught by buildbot smp allyesconfig): In file included from include/linux/irqflags.h:15, from fs/pstore/ftrace.c:16: arch/metag/include/asm/irqflags.h: In function 'arch_local_irq_enable': arch/metag/include/asm/irqflags.h:84: error: implicit declaration of function 'preempt_disable' arch/metag/include/asm/irqflags.h:86: error: implicit declaration of function 'preempt_enable_no_resched' However cannot be easily included from as it can cause circular include dependencies in the !SMP case, and potentially in the SMP case in the future. Therefore move the SMP implementation of arch_local_irq_enable() into traps.c and use an inline version of get_trigger_mask() which is also defined in traps.c for SMP. This adds an extra layer of function call / stack push when preempt_disable needs to call other functions, however in the non-preemptive SMP case it should be about as fast, as it was already calling the get_trigger_mask() function which is now used inline. Signed-off-by: James Hogan --- arch/metag/include/asm/irqflags.h | 11 +++++------ arch/metag/kernel/traps.c | 16 +++++++++++++++- 2 files changed, 20 insertions(+), 7 deletions(-) (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/irqflags.h b/arch/metag/include/asm/irqflags.h index cba5e135bc9a..339b16f062eb 100644 --- a/arch/metag/include/asm/irqflags.h +++ b/arch/metag/include/asm/irqflags.h @@ -78,16 +78,15 @@ static inline void arch_local_irq_disable(void) asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory"); } -static inline void arch_local_irq_enable(void) -{ #ifdef CONFIG_SMP - preempt_disable(); - arch_local_irq_restore(get_trigger_mask()); - preempt_enable_no_resched(); +/* Avoid circular include dependencies through */ +void arch_local_irq_enable(void); #else +static inline void arch_local_irq_enable(void) +{ arch_local_irq_restore(get_trigger_mask()); -#endif } +#endif #endif /* (__ASSEMBLY__) */ diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c index 1ad363ce1ee9..202be405771e 100644 --- a/arch/metag/kernel/traps.c +++ b/arch/metag/kernel/traps.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -776,17 +777,30 @@ int traps_restore_context(void) #endif #ifdef CONFIG_SMP -unsigned int get_trigger_mask(void) +static inline unsigned int _get_trigger_mask(void) { unsigned long cpu = smp_processor_id(); return per_cpu(trigger_mask, cpu); } +unsigned int get_trigger_mask(void) +{ + return _get_trigger_mask(); +} + static void set_trigger_mask(unsigned int mask) { unsigned long cpu = smp_processor_id(); per_cpu(trigger_mask, cpu) = mask; } + +void arch_local_irq_enable(void) +{ + preempt_disable(); + arch_local_irq_restore(_get_trigger_mask()); + preempt_enable_no_resched(); +} +EXPORT_SYMBOL(arch_local_irq_enable); #else static void set_trigger_mask(unsigned int mask) { -- cgit v1.2.3 From 9fb4aa8723f6b332ee3089dc59c9ff7dcf2c0d47 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 13 Feb 2013 12:19:03 +0000 Subject: metag: move traps.c exports out of metag_ksyms.c It's less error prone to have function symbols exported immediately after the function rather than in metag_ksyms.c. Move each EXPORT_SYMBOL in metag_ksyms.c for symbols defined in traps.c into traps.c Signed-off-by: James Hogan --- arch/metag/kernel/metag_ksyms.c | 5 ----- arch/metag/kernel/traps.c | 3 +++ 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/metag/kernel') diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c index 63606f8aa86a..138317cfb518 100644 --- a/arch/metag/kernel/metag_ksyms.c +++ b/arch/metag/kernel/metag_ksyms.c @@ -31,11 +31,6 @@ EXPORT_SYMBOL(pTBI_get); EXPORT_SYMBOL(meta_memoffset); EXPORT_SYMBOL(kick_register_func); EXPORT_SYMBOL(kick_unregister_func); -#ifdef CONFIG_SMP -EXPORT_SYMBOL(get_trigger_mask); -#else -EXPORT_SYMBOL(global_trigger_mask); -#endif EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c index 202be405771e..5cad15601cff 100644 --- a/arch/metag/kernel/traps.c +++ b/arch/metag/kernel/traps.c @@ -8,6 +8,7 @@ * for more details. */ +#include #include #include #include @@ -59,6 +60,7 @@ DECLARE_PER_CPU(PTBI, pTBI); static DEFINE_PER_CPU(unsigned int, trigger_mask); #else unsigned int global_trigger_mask; +EXPORT_SYMBOL(global_trigger_mask); #endif unsigned long per_cpu__stack_save[NR_CPUS]; @@ -787,6 +789,7 @@ unsigned int get_trigger_mask(void) { return _get_trigger_mask(); } +EXPORT_SYMBOL(get_trigger_mask); static void set_trigger_mask(unsigned int mask) { -- cgit v1.2.3 From aa29ec5f79f61ad880771d53d37a341cc7f5dfa6 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 13 Feb 2013 13:01:55 +0000 Subject: metag: move kick.c exports out of metag_ksyms.c It's less error prone to have function symbols exported immediately after the function rather than in metag_ksyms.c. Move each EXPORT_SYMBOL in metag_ksyms.c for symbols defined in kick.c into kick.c Signed-off-by: James Hogan --- arch/metag/kernel/kick.c | 3 +++ arch/metag/kernel/metag_ksyms.c | 2 -- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/metag/kernel') diff --git a/arch/metag/kernel/kick.c b/arch/metag/kernel/kick.c index c3090962c45b..50fcbec98cd2 100644 --- a/arch/metag/kernel/kick.c +++ b/arch/metag/kernel/kick.c @@ -25,6 +25,7 @@ * the KICK handlers require access to a CPU's pTBI structure. So we * pass it as an argument. */ +#include #include #include #include @@ -48,6 +49,7 @@ void kick_register_func(struct kick_irq_handler *kh) spin_unlock_irqrestore(&kick_handlers_lock, flags); } +EXPORT_SYMBOL(kick_register_func); void kick_unregister_func(struct kick_irq_handler *kh) { @@ -59,6 +61,7 @@ void kick_unregister_func(struct kick_irq_handler *kh) spin_unlock_irqrestore(&kick_handlers_lock, flags); } +EXPORT_SYMBOL(kick_unregister_func); TBIRES kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI) diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c index 138317cfb518..eef6576ab4e2 100644 --- a/arch/metag/kernel/metag_ksyms.c +++ b/arch/metag/kernel/metag_ksyms.c @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__do_clear_user); EXPORT_SYMBOL(pTBI_get); EXPORT_SYMBOL(meta_memoffset); -EXPORT_SYMBOL(kick_register_func); -EXPORT_SYMBOL(kick_unregister_func); EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); -- cgit v1.2.3 From 7293dbed9d8be1916034dbfcf2f203e96bd8fae1 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 13 Feb 2013 12:55:51 +0000 Subject: metag: move setup.c exports out of metag_ksyms.c It's less error prone to have function symbols exported immediately after the function rather than in metag_ksyms.c. Move each EXPORT_SYMBOL in metag_ksyms.c for symbols defined in setup.c into setup.c Signed-off-by: James Hogan --- arch/metag/kernel/metag_ksyms.c | 5 ----- arch/metag/kernel/setup.c | 3 +++ 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/metag/kernel') diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c index eef6576ab4e2..8ce3c3df2028 100644 --- a/arch/metag/kernel/metag_ksyms.c +++ b/arch/metag/kernel/metag_ksyms.c @@ -6,10 +6,8 @@ #include #include -#include #include #include -#include #include #include @@ -27,9 +25,6 @@ EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(strnlen_user); EXPORT_SYMBOL(__do_clear_user); -EXPORT_SYMBOL(pTBI_get); -EXPORT_SYMBOL(meta_memoffset); - EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(empty_zero_page); diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c index 9803ca4f6510..dd6c5ad73917 100644 --- a/arch/metag/kernel/setup.c +++ b/arch/metag/kernel/setup.c @@ -5,6 +5,7 @@ * */ +#include #include #include #include @@ -141,6 +142,7 @@ u8 hwthread_id_2_cpu[4] __read_mostly = { * probably only be used via them. */ unsigned int meta_memoffset; +EXPORT_SYMBOL(meta_memoffset); static char __initdata *original_cmd_line; @@ -579,6 +581,7 @@ PTBI pTBI_get(unsigned int cpu) { return per_cpu(pTBI, cpu); } +EXPORT_SYMBOL(pTBI_get); #if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU) char capabilites[] = "dsp fpu"; -- cgit v1.2.3 From 9da3ee9aa8a9c9fcf94188d4c0ae280afbeb63c1 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 13 Feb 2013 12:57:10 +0000 Subject: metag: move usercopy.c exports out of metag_ksyms.c It's less error prone to have function symbols exported immediately after the function rather than in metag_ksyms.c. Move each EXPORT_SYMBOL in metag_ksyms.c for symbols defined in usercopy.c into usercopy.c Signed-off-by: James Hogan --- arch/metag/kernel/metag_ksyms.c | 16 ---------------- arch/metag/lib/usercopy.c | 13 +++++++++++++ 2 files changed, 13 insertions(+), 16 deletions(-) (limited to 'arch/metag/kernel') diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c index 8ce3c3df2028..3d60cf31e264 100644 --- a/arch/metag/kernel/metag_ksyms.c +++ b/arch/metag/kernel/metag_ksyms.c @@ -2,29 +2,13 @@ #include #include #include -#include #include #include #include -#include #include #include -/* uaccess symbols */ -EXPORT_SYMBOL(__copy_user_zeroing); -EXPORT_SYMBOL(__copy_user); -EXPORT_SYMBOL(__get_user_asm_b); -EXPORT_SYMBOL(__get_user_asm_w); -EXPORT_SYMBOL(__get_user_asm_d); -EXPORT_SYMBOL(__put_user_asm_b); -EXPORT_SYMBOL(__put_user_asm_w); -EXPORT_SYMBOL(__put_user_asm_d); -EXPORT_SYMBOL(__put_user_asm_l); -EXPORT_SYMBOL(__strncpy_from_user); -EXPORT_SYMBOL(strnlen_user); -EXPORT_SYMBOL(__do_clear_user); - EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(empty_zero_page); diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index 92f6dbb34e83..b3ebfe9c8e88 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c @@ -10,6 +10,7 @@ * Modified for Meta by Will Newton. */ +#include #include #include /* def of L1_CACHE_BYTES */ @@ -610,6 +611,7 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, return retn; } +EXPORT_SYMBOL(__copy_user); #define __asm_copy_from_user_1(to, from, ret) \ __asm_copy_user_cont(to, from, ret, \ @@ -936,6 +938,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, return retn + n; } +EXPORT_SYMBOL(__copy_user_zeroing); #define __asm_clear_8x64(to, ret) \ asm volatile ( \ @@ -1089,6 +1092,7 @@ unsigned long __do_clear_user(void __user *pto, unsigned long pn) return retn; } +EXPORT_SYMBOL(__do_clear_user); unsigned char __get_user_asm_b(const void __user *addr, long *err) { @@ -1112,6 +1116,7 @@ unsigned char __get_user_asm_b(const void __user *addr, long *err) : "D0FrT"); return x; } +EXPORT_SYMBOL(__get_user_asm_b); unsigned short __get_user_asm_w(const void __user *addr, long *err) { @@ -1135,6 +1140,7 @@ unsigned short __get_user_asm_w(const void __user *addr, long *err) : "D0FrT"); return x; } +EXPORT_SYMBOL(__get_user_asm_w); unsigned int __get_user_asm_d(const void __user *addr, long *err) { @@ -1158,6 +1164,7 @@ unsigned int __get_user_asm_d(const void __user *addr, long *err) : "D0FrT"); return x; } +EXPORT_SYMBOL(__get_user_asm_d); long __put_user_asm_b(unsigned int x, void __user *addr) { @@ -1181,6 +1188,7 @@ long __put_user_asm_b(unsigned int x, void __user *addr) : "D0FrT"); return err; } +EXPORT_SYMBOL(__put_user_asm_b); long __put_user_asm_w(unsigned int x, void __user *addr) { @@ -1204,6 +1212,7 @@ long __put_user_asm_w(unsigned int x, void __user *addr) : "D0FrT"); return err; } +EXPORT_SYMBOL(__put_user_asm_w); long __put_user_asm_d(unsigned int x, void __user *addr) { @@ -1227,6 +1236,7 @@ long __put_user_asm_d(unsigned int x, void __user *addr) : "D0FrT"); return err; } +EXPORT_SYMBOL(__put_user_asm_d); long __put_user_asm_l(unsigned long long x, void __user *addr) { @@ -1250,6 +1260,7 @@ long __put_user_asm_l(unsigned long long x, void __user *addr) : "D0FrT"); return err; } +EXPORT_SYMBOL(__put_user_asm_l); long strnlen_user(const char __user *src, long count) { @@ -1286,6 +1297,7 @@ long strnlen_user(const char __user *src, long count) return res; } +EXPORT_SYMBOL(strnlen_user); long __strncpy_from_user(char *dst, const char __user *src, long count) { @@ -1339,3 +1351,4 @@ long __strncpy_from_user(char *dst, const char __user *src, long count) return res; } +EXPORT_SYMBOL(__strncpy_from_user); -- cgit v1.2.3 From 44c2451080ab3896688c852253814f5eabb2ecce Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 13 Feb 2013 13:19:15 +0000 Subject: metag: move mm/init.c exports out of metag_ksyms.c It's less error prone to have function symbols exported immediately after the function rather than in metag_ksyms.c. Move each EXPORT_SYMBOL in metag_ksyms.c for symbols defined in mm/init.c into mm/init.c. Signed-off-by: James Hogan --- arch/metag/kernel/metag_ksyms.c | 2 -- arch/metag/mm/init.c | 3 +++ 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/metag/kernel') diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c index 3d60cf31e264..87e47769bfe6 100644 --- a/arch/metag/kernel/metag_ksyms.c +++ b/arch/metag/kernel/metag_ksyms.c @@ -11,9 +11,7 @@ EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); -EXPORT_SYMBOL(empty_zero_page); -EXPORT_SYMBOL(pfn_base); #ifdef CONFIG_FLATMEM /* needed for the pfn_valid macro */ EXPORT_SYMBOL(max_pfn); diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c index 514376d90db4..504a398d5f8b 100644 --- a/arch/metag/mm/init.c +++ b/arch/metag/mm/init.c @@ -3,6 +3,7 @@ * */ +#include #include #include #include @@ -25,10 +26,12 @@ #include unsigned long pfn_base; +EXPORT_SYMBOL(pfn_base); pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data; unsigned long empty_zero_page; +EXPORT_SYMBOL(empty_zero_page); extern char __user_gateway_start; extern char __user_gateway_end; -- cgit v1.2.3 From d7900504480ea9d49a6b5f8811f2678ad08d3255 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 13 Feb 2013 13:30:15 +0000 Subject: metag: cleanup metag_ksyms.c includes Minimise metag_ksyms.c includes to directly include the files that declare a particular symbol, and not include any unnecessary ones. Signed-off-by: James Hogan --- arch/metag/kernel/metag_ksyms.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'arch/metag/kernel') diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c index 87e47769bfe6..ec872ef14eb1 100644 --- a/arch/metag/kernel/metag_ksyms.c +++ b/arch/metag/kernel/metag_ksyms.c @@ -1,12 +1,9 @@ #include -#include -#include -#include -#include -#include -#include +#include #include +#include +#include #include EXPORT_SYMBOL(clear_page); -- cgit v1.2.3 From 2270e6d30bb6601ffd42e9d972442df0499ad547 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 20 Feb 2013 13:59:13 +0000 Subject: metag: copy devicetree to non-init memory Make a copy of the device tree blob in non-init memory. It is required when using built-in device tree files that the platform code copies the blob to non-init memory prior to calling unflatten_device_tree(), otherwise the strings that the device tree refer to will get poisoned and potentially reused, breaking later reading of the device tree post-init (such as compatible matching in modules, debugfs, and the procfs interface). Signed-off-by: James Hogan Reviewed-by: Vineet Gupta --- arch/metag/include/asm/prom.h | 1 + arch/metag/kernel/devtree.c | 17 +++++++++++++++++ arch/metag/kernel/setup.c | 2 ++ 3 files changed, 20 insertions(+) (limited to 'arch/metag/kernel') diff --git a/arch/metag/include/asm/prom.h b/arch/metag/include/asm/prom.h index d881396c392c..ccac97e865d0 100644 --- a/arch/metag/include/asm/prom.h +++ b/arch/metag/include/asm/prom.h @@ -18,6 +18,7 @@ #define HAVE_ARCH_DEVTREE_FIXUPS extern struct machine_desc *setup_machine_fdt(void *dt); +extern void copy_fdt(void); extern void metag_dt_memblock_reserve(void); #endif /* __ASM_METAG_PROM_H */ diff --git a/arch/metag/kernel/devtree.c b/arch/metag/kernel/devtree.c index 5b6b1d855492..7cd02529636e 100644 --- a/arch/metag/kernel/devtree.c +++ b/arch/metag/kernel/devtree.c @@ -95,3 +95,20 @@ struct machine_desc * __init setup_machine_fdt(void *dt) return mdesc_best; } + +/** + * copy_fdt - Copy device tree into non-init memory. + * + * We must copy the flattened device tree blob into non-init memory because the + * unflattened device tree will reference the strings in it directly. + */ +void __init copy_fdt(void) +{ + void *alloc = early_init_dt_alloc_memory_arch( + be32_to_cpu(initial_boot_params->totalsize), 0x40); + if (alloc) { + memcpy(alloc, initial_boot_params, + be32_to_cpu(initial_boot_params->totalsize)); + initial_boot_params = alloc; + } +} diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c index dd6c5ad73917..879246170aec 100644 --- a/arch/metag/kernel/setup.c +++ b/arch/metag/kernel/setup.c @@ -406,6 +406,8 @@ void __init setup_arch(char **cmdline_p) cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id(); hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id(); + /* Copy device tree blob into non-init memory before unflattening */ + copy_fdt(); unflatten_device_tree(); #ifdef CONFIG_SMP -- cgit v1.2.3