diff options
author | Ley Foon Tan <lftan@altera.com> | 2014-11-06 15:19:41 +0800 |
---|---|---|
committer | Ley Foon Tan <lftan@altera.com> | 2014-12-08 12:55:51 +0800 |
commit | 5ccc6af5e88efdd0c7facba64078d8d61de9012b (patch) | |
tree | 88f9375353cfd8ae22f9e844ef01a5c7fd4dd4f8 /arch/nios2/mm | |
parent | 771a0163c0bda4a379b79bd573693aa88c0d47b7 (diff) |
nios2: Memory management
This patch contains the initialisation of the memory blocks, MMU
attributes and the memory map.
Signed-off-by: Ley Foon Tan <lftan@altera.com>
Diffstat (limited to 'arch/nios2/mm')
-rw-r--r-- | arch/nios2/mm/init.c | 142 | ||||
-rw-r--r-- | arch/nios2/mm/uaccess.c | 163 |
2 files changed, 305 insertions, 0 deletions
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c new file mode 100644 index 000000000000..e75c75d249d6 --- /dev/null +++ b/arch/nios2/mm/init.c @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2013 Altera Corporation + * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch> + * Copyright (C) 2009 Wind River Systems Inc + * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com + * Copyright (C) 2004 Microtronix Datacom Ltd + * + * based on arch/m68k/mm/init.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/pagemap.h> +#include <linux/bootmem.h> +#include <linux/slab.h> +#include <linux/binfmts.h> + +#include <asm/setup.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/sections.h> +#include <asm/tlb.h> +#include <asm/mmu_context.h> +#include <asm/cpuinfo.h> +#include <asm/processor.h> + +pgd_t *pgd_current; + +/* + * paging_init() continues the virtual memory environment setup which + * was begun by the code in arch/head.S. + * The parameters are pointers to where to stick the starting and ending + * addresses of available kernel virtual memory. + */ +void __init paging_init(void) +{ + unsigned long zones_size[MAX_NR_ZONES]; + + memset(zones_size, 0, sizeof(zones_size)); + + pagetable_init(); + pgd_current = swapper_pg_dir; + + zones_size[ZONE_NORMAL] = max_mapnr; + + /* pass the memory from the bootmem allocator to the main allocator */ + free_area_init(zones_size); + + flush_dcache_range((unsigned long)empty_zero_page, + (unsigned long)empty_zero_page + PAGE_SIZE); +} + +void __init mem_init(void) +{ + unsigned long end_mem = memory_end; /* this must not include + kernel stack at top */ + + pr_debug("mem_init: start=%lx, end=%lx\n", memory_start, memory_end); + + end_mem &= PAGE_MASK; + high_memory = __va(end_mem); + + /* this will put all memory onto the freelists */ + free_all_bootmem(); + mem_init_print_info(NULL); +} + +void __init mmu_init(void) +{ + flush_tlb_all(); +} + +#ifdef CONFIG_BLK_DEV_INITRD +void __init free_initrd_mem(unsigned long start, unsigned long end) +{ + free_reserved_area((void *)start, (void *)end, -1, "initrd"); +} +#endif + +void __init_refok free_initmem(void) +{ + free_initmem_default(-1); +} + +#define __page_aligned(order) __aligned(PAGE_SIZE << (order)) +pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER); +pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER); +static struct page *kuser_page[1]; + +static int alloc_kuser_page(void) +{ + extern char __kuser_helper_start[], __kuser_helper_end[]; + int kuser_sz = __kuser_helper_end - __kuser_helper_start; + unsigned long vpage; + + vpage = get_zeroed_page(GFP_ATOMIC); + if (!vpage) + return -ENOMEM; + + /* Copy kuser helpers */ + memcpy((void *)vpage, __kuser_helper_start, kuser_sz); + + flush_icache_range(vpage, vpage + KUSER_SIZE); + kuser_page[0] = virt_to_page(vpage); + + return 0; +} +arch_initcall(alloc_kuser_page); + +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + struct mm_struct *mm = current->mm; + int ret; + + down_write(&mm->mmap_sem); + + /* Map kuser helpers to user space address */ + ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE, + VM_READ | VM_EXEC | VM_MAYREAD | + VM_MAYEXEC, kuser_page); + + up_write(&mm->mmap_sem); + + return ret; +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL; +} diff --git a/arch/nios2/mm/uaccess.c b/arch/nios2/mm/uaccess.c new file mode 100644 index 000000000000..7663e156ff4f --- /dev/null +++ b/arch/nios2/mm/uaccess.c @@ -0,0 +1,163 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009, Wind River Systems Inc + * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com + */ + +#include <linux/export.h> +#include <linux/uaccess.h> + +asm(".global __copy_from_user\n" + " .type __copy_from_user, @function\n" + "__copy_from_user:\n" + " movi r2,7\n" + " mov r3,r4\n" + " bge r2,r6,1f\n" + " xor r2,r4,r5\n" + " andi r2,r2,3\n" + " movi r7,3\n" + " beq r2,zero,4f\n" + "1: addi r6,r6,-1\n" + " movi r2,-1\n" + " beq r6,r2,3f\n" + " mov r7,r2\n" + "2: ldbu r2,0(r5)\n" + " addi r6,r6,-1\n" + " addi r5,r5,1\n" + " stb r2,0(r3)\n" + " addi r3,r3,1\n" + " bne r6,r7,2b\n" + "3:\n" + " addi r2,r6,1\n" + " ret\n" + "13:mov r2,r6\n" + " ret\n" + "4: andi r2,r4,1\n" + " cmpeq r2,r2,zero\n" + " beq r2,zero,7f\n" + "5: andi r2,r3,2\n" + " beq r2,zero,6f\n" + "9: ldhu r2,0(r5)\n" + " addi r6,r6,-2\n" + " addi r5,r5,2\n" + " sth r2,0(r3)\n" + " addi r3,r3,2\n" + "6: bge r7,r6,1b\n" + "10:ldw r2,0(r5)\n" + " addi r6,r6,-4\n" + " addi r5,r5,4\n" + " stw r2,0(r3)\n" + " addi r3,r3,4\n" + " br 6b\n" + "7: ldbu r2,0(r5)\n" + " addi r6,r6,-1\n" + " addi r5,r5,1\n" + " addi r3,r4,1\n" + " stb r2,0(r4)\n" + " br 5b\n" + ".section __ex_table,\"a\"\n" + ".word 2b,3b\n" + ".word 9b,13b\n" + ".word 10b,13b\n" + ".word 7b,13b\n" + ".previous\n" + ); +EXPORT_SYMBOL(__copy_from_user); + +asm( + " .global __copy_to_user\n" + " .type __copy_to_user, @function\n" + "__copy_to_user:\n" + " movi r2,7\n" + " mov r3,r4\n" + " bge r2,r6,1f\n" + " xor r2,r4,r5\n" + " andi r2,r2,3\n" + " movi r7,3\n" + " beq r2,zero,4f\n" + /* Bail if we try to copy zero bytes */ + "1: addi r6,r6,-1\n" + " movi r2,-1\n" + " beq r6,r2,3f\n" + /* Copy byte by byte for small copies and if src^dst != 0 */ + " mov r7,r2\n" + "2: ldbu r2,0(r5)\n" + " addi r5,r5,1\n" + "9: stb r2,0(r3)\n" + " addi r6,r6,-1\n" + " addi r3,r3,1\n" + " bne r6,r7,2b\n" + "3: addi r2,r6,1\n" + " ret\n" + "13:mov r2,r6\n" + " ret\n" + /* If 'to' is an odd address byte copy */ + "4: andi r2,r4,1\n" + " cmpeq r2,r2,zero\n" + " beq r2,zero,7f\n" + /* If 'to' is not divideable by four copy halfwords */ + "5: andi r2,r3,2\n" + " beq r2,zero,6f\n" + " ldhu r2,0(r5)\n" + " addi r5,r5,2\n" + "10:sth r2,0(r3)\n" + " addi r6,r6,-2\n" + " addi r3,r3,2\n" + /* Copy words */ + "6: bge r7,r6,1b\n" + " ldw r2,0(r5)\n" + " addi r5,r5,4\n" + "11:stw r2,0(r3)\n" + " addi r6,r6,-4\n" + " addi r3,r3,4\n" + " br 6b\n" + /* Copy remaining bytes */ + "7: ldbu r2,0(r5)\n" + " addi r5,r5,1\n" + " addi r3,r4,1\n" + "12: stb r2,0(r4)\n" + " addi r6,r6,-1\n" + " br 5b\n" + ".section __ex_table,\"a\"\n" + ".word 9b,3b\n" + ".word 10b,13b\n" + ".word 11b,13b\n" + ".word 12b,13b\n" + ".previous\n"); +EXPORT_SYMBOL(__copy_to_user); + +long strncpy_from_user(char *__to, const char __user *__from, long __len) +{ + int l = strnlen_user(__from, __len); + int is_zt = 1; + + if (l > __len) { + is_zt = 0; + l = __len; + } + + if (l == 0 || copy_from_user(__to, __from, l)) + return -EFAULT; + + if (is_zt) + l--; + return l; +} + +long strnlen_user(const char __user *s, long n) +{ + long i; + + for (i = 0; i < n; i++) { + char c; + + if (get_user(c, s + i) == -EFAULT) + return 0; + if (c == 0) + return i + 1; + } + return n + 1; +} |