summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-06-09 12:19:41 +1000
committerPaul Mackerras <paulus@samba.org>2008-06-09 12:19:41 +1000
commit8a3e1c670e503ddd6f6c373b307f38b783ee3a50 (patch)
tree03094e8425b750d2693a271ebc89b49312e5476a /mm
parente026892c85571e12f11abffde5a90bcc704d663e (diff)
parent60d5019be8acef268f4676d229c490186d338fbc (diff)
Merge branch 'merge'
Conflicts: arch/powerpc/sysdev/fsl_soc.c
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mmap.c12
-rw-r--r--mm/nommu.c34
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/slob.c5
-rw-r--r--mm/slub.c5
-rw-r--r--mm/swap.c4
8 files changed, 52 insertions, 20 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bbf953eeb58b..ab171274ef21 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -785,7 +785,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
continue;
spin_lock(&dst->page_table_lock);
- spin_lock(&src->page_table_lock);
+ spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
if (!huge_pte_none(huge_ptep_get(src_pte))) {
if (cow)
huge_ptep_set_wrprotect(src, addr, src_pte);
diff --git a/mm/memory.c b/mm/memory.c
index fb5608a120ed..19e0ae9beecb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2295,8 +2295,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
vmf.flags = flags;
vmf.page = NULL;
- BUG_ON(vma->vm_flags & VM_PFNMAP);
-
ret = vma->vm_ops->fault(vma, &vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
return ret;
diff --git a/mm/mmap.c b/mm/mmap.c
index fac66337da2a..3354fdd83d4b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -80,7 +80,7 @@ EXPORT_SYMBOL(vm_get_page_prot);
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
-atomic_t vm_committed_space = ATOMIC_INIT(0);
+atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
/*
* Check that a process has enough memory to allocate a new virtual
@@ -177,7 +177,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
* cast `allowed' as a signed long because vm_committed_space
* sometimes has a negative value
*/
- if (atomic_read(&vm_committed_space) < (long)allowed)
+ if (atomic_long_read(&vm_committed_space) < (long)allowed)
return 0;
error:
vm_unacct_memory(pages);
@@ -245,10 +245,16 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
unsigned long rlim, retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
+ unsigned long min_brk;
down_write(&mm->mmap_sem);
- if (brk < mm->start_brk)
+#ifdef CONFIG_COMPAT_BRK
+ min_brk = mm->end_code;
+#else
+ min_brk = mm->start_brk;
+#endif
+ if (brk < min_brk)
goto out;
/*
diff --git a/mm/nommu.c b/mm/nommu.c
index ef8c62cec697..3abd0845bda4 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -39,7 +39,7 @@ struct page *mem_map;
unsigned long max_mapnr;
unsigned long num_physpages;
unsigned long askedalloc, realalloc;
-atomic_t vm_committed_space = ATOMIC_INIT(0);
+atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
@@ -104,21 +104,43 @@ EXPORT_SYMBOL(vmtruncate);
unsigned int kobjsize(const void *objp)
{
struct page *page;
+ int order = 0;
/*
* If the object we have should not have ksize performed on it,
* return size of 0
*/
- if (!objp || (unsigned long)objp >= memory_end || !((page = virt_to_page(objp))))
+ if (!objp)
return 0;
+ if ((unsigned long)objp >= memory_end)
+ return 0;
+
+ page = virt_to_head_page(objp);
+ if (!page)
+ return 0;
+
+ /*
+ * If the allocator sets PageSlab, we know the pointer came from
+ * kmalloc().
+ */
if (PageSlab(page))
return ksize(objp);
- BUG_ON(page->index < 0);
- BUG_ON(page->index >= MAX_ORDER);
+ /*
+ * The ksize() function is only guaranteed to work for pointers
+ * returned by kmalloc(). So handle arbitrary pointers, that we expect
+ * always to be compound pages, here.
+ */
+ if (PageCompound(page))
+ order = compound_order(page);
- return (PAGE_SIZE << page->index);
+ /*
+ * Finally, handle arbitrary pointers that don't set PageSlab.
+ * Default to 0-order in the case when we're unable to ksize()
+ * the object.
+ */
+ return PAGE_SIZE << order;
}
/*
@@ -1410,7 +1432,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
* cast `allowed' as a signed long because vm_committed_space
* sometimes has a negative value
*/
- if (atomic_read(&vm_committed_space) < (long)allowed)
+ if (atomic_long_read(&vm_committed_space) < (long)allowed)
return 0;
error:
vm_unacct_memory(pages);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 63835579323a..8e83f02cd2d3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1396,6 +1396,9 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
(void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
&preferred_zone);
+ if (!preferred_zone)
+ return NULL;
+
classzone_idx = zone_idx(preferred_zone);
zonelist_scan:
@@ -2804,7 +2807,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
alloc_size = zone->wait_table_hash_nr_entries
* sizeof(wait_queue_head_t);
- if (system_state == SYSTEM_BOOTING) {
+ if (!slab_is_available()) {
zone->wait_table = (wait_queue_head_t *)
alloc_bootmem_node(pgdat, alloc_size);
} else {
@@ -3378,7 +3381,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
* is used by this zone for memmap. This affects the watermark
* and per-cpu initialisations
*/
- memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
+ memmap_pages =
+ PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
if (realsize >= memmap_pages) {
realsize -= memmap_pages;
printk(KERN_DEBUG
diff --git a/mm/slob.c b/mm/slob.c
index 6038cbadf796..a3ad6671adf1 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -469,8 +469,9 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
return ZERO_SIZE_PTR;
m = slob_alloc(size + align, gfp, align, node);
- if (m)
- *m = size;
+ if (!m)
+ return NULL;
+ *m = size;
return (void *)m + align;
} else {
void *ret;
diff --git a/mm/slub.c b/mm/slub.c
index a505a828ef41..0987d1cd943c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2726,9 +2726,10 @@ size_t ksize(const void *object)
page = virt_to_head_page(object);
- if (unlikely(!PageSlab(page)))
+ if (unlikely(!PageSlab(page))) {
+ WARN_ON(!PageCompound(page));
return PAGE_SIZE << compound_order(page);
-
+ }
s = page->slab;
#ifdef CONFIG_SLUB_DEBUG
diff --git a/mm/swap.c b/mm/swap.c
index 91e194445a5e..45c9f25a8a3b 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -503,7 +503,7 @@ void vm_acct_memory(long pages)
local = &__get_cpu_var(committed_space);
*local += pages;
if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
- atomic_add(*local, &vm_committed_space);
+ atomic_long_add(*local, &vm_committed_space);
*local = 0;
}
preempt_enable();
@@ -520,7 +520,7 @@ static int cpu_swap_callback(struct notifier_block *nfb,
committed = &per_cpu(committed_space, (long)hcpu);
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
- atomic_add(*committed, &vm_committed_space);
+ atomic_long_add(*committed, &vm_committed_space);
*committed = 0;
drain_cpu_pagevecs((long)hcpu);
}