summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-08-26 16:49:51 +0900
committerPaul Mundt <lethal@linux-sh.org>2011-08-26 16:49:51 +0900
commit2d0b579a9831c927bcebf5f61055e7462f0af2dc (patch)
tree3181c183b463e745601b1f4efda0f8fff3f1c08b /mm
parente80ca144ea902efa7aed446780fd9fad421fd8d3 (diff)
parent671ee7f0ce62e4b991b47fcf1c161c3f710dabbc (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into sh-fixes-for-linus
Diffstat (limited to 'mm')
-rw-r--r--mm/highmem.c4
-rw-r--r--mm/memcontrol.c38
-rw-r--r--mm/page-writeback.c15
-rw-r--r--mm/slub.c10
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmscan.c19
6 files changed, 42 insertions, 51 deletions
diff --git a/mm/highmem.c b/mm/highmem.c
index 693394daa2ed..5ef672c07f75 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -326,7 +326,7 @@ static struct page_address_slot {
spinlock_t lock; /* Protect this bucket's list */
} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
-static struct page_address_slot *page_slot(struct page *page)
+static struct page_address_slot *page_slot(const struct page *page)
{
return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
}
@@ -337,7 +337,7 @@ static struct page_address_slot *page_slot(struct page *page)
*
* Returns the page's virtual address.
*/
-void *page_address(struct page *page)
+void *page_address(const struct page *page)
{
unsigned long flags;
void *ret;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f4ec4e7ca4cd..ebd1e86bef1c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1841,29 +1841,23 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
*/
static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
{
- int lock_count = -1;
struct mem_cgroup *iter, *failed = NULL;
bool cond = true;
for_each_mem_cgroup_tree_cond(iter, mem, cond) {
- bool locked = iter->oom_lock;
-
- iter->oom_lock = true;
- if (lock_count == -1)
- lock_count = iter->oom_lock;
- else if (lock_count != locked) {
+ if (iter->oom_lock) {
/*
* this subtree of our hierarchy is already locked
* so we cannot give a lock.
*/
- lock_count = 0;
failed = iter;
cond = false;
- }
+ } else
+ iter->oom_lock = true;
}
if (!failed)
- goto done;
+ return true;
/*
* OK, we failed to lock the whole subtree so we have to clean up
@@ -1877,8 +1871,7 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
}
iter->oom_lock = false;
}
-done:
- return lock_count;
+ return false;
}
/*
@@ -2091,6 +2084,7 @@ struct memcg_stock_pcp {
#define FLUSHING_CACHED_CHARGE (0)
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
+static DEFINE_MUTEX(percpu_charge_mutex);
/*
* Try to consume stocked charge on this cpu. If success, one page is consumed
@@ -2168,13 +2162,7 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
/* Notify other cpus that system-wide "drain" is running */
get_online_cpus();
- /*
- * Get a hint for avoiding draining charges on the current cpu,
- * which must be exhausted by our charging. It is not required that
- * this be a precise check, so we use raw_smp_processor_id() instead of
- * getcpu()/putcpu().
- */
- curcpu = raw_smp_processor_id();
+ curcpu = get_cpu();
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *mem;
@@ -2191,14 +2179,14 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
schedule_work_on(cpu, &stock->work);
}
}
+ put_cpu();
if (!sync)
goto out;
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
- if (mem_cgroup_same_or_subtree(root_mem, stock->cached) &&
- test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
+ if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
flush_work(&stock->work);
}
out:
@@ -2213,14 +2201,22 @@ out:
*/
static void drain_all_stock_async(struct mem_cgroup *root_mem)
{
+ /*
+ * If someone calls draining, avoid adding more kworker runs.
+ */
+ if (!mutex_trylock(&percpu_charge_mutex))
+ return;
drain_all_stock(root_mem, false);
+ mutex_unlock(&percpu_charge_mutex);
}
/* This is a synchronous drain interface. */
static void drain_all_stock_sync(struct mem_cgroup *root_mem)
{
/* called when force_empty is called */
+ mutex_lock(&percpu_charge_mutex);
drain_all_stock(root_mem, true);
+ mutex_unlock(&percpu_charge_mutex);
}
/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d1960744f881..0e309cd1b5b9 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -754,21 +754,10 @@ static void balance_dirty_pages(struct address_space *mapping,
* 200ms is typically more than enough to curb heavy dirtiers;
* (b) the pause time limit makes the dirtiers more responsive.
*/
- if (nr_dirty < dirty_thresh +
- dirty_thresh / DIRTY_MAXPAUSE_AREA &&
+ if (nr_dirty < dirty_thresh &&
+ bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 &&
time_after(jiffies, start_time + MAX_PAUSE))
break;
- /*
- * pass-good area. When some bdi gets blocked (eg. NFS server
- * not responding), or write bandwidth dropped dramatically due
- * to concurrent reads, or dirty threshold suddenly dropped and
- * the dirty pages cannot be brought down anytime soon (eg. on
- * slow USB stick), at least let go of the good bdi's.
- */
- if (nr_dirty < dirty_thresh +
- dirty_thresh / DIRTY_PASSGOOD_AREA &&
- bdi_dirty < bdi_thresh)
- break;
/*
* Increase the delay for each loop, up to our previous
diff --git a/mm/slub.c b/mm/slub.c
index eb5a8f93338a..9f662d70eb47 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -701,7 +701,7 @@ static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes)
return check_bytes8(start, value, bytes);
value64 = value | value << 8 | value << 16 | value << 24;
- value64 = value64 | value64 << 32;
+ value64 = (value64 & 0xffffffff) | value64 << 32;
prefix = 8 - ((unsigned long)start) % 8;
if (prefix) {
@@ -1854,7 +1854,7 @@ redo:
new.frozen = 0;
- if (!new.inuse && n->nr_partial < s->min_partial)
+ if (!new.inuse && n->nr_partial > s->min_partial)
m = M_FREE;
else if (new.freelist) {
m = M_PARTIAL;
@@ -2387,11 +2387,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
slab_empty:
if (prior) {
/*
- * Slab still on the partial list.
+ * Slab on the partial list.
*/
remove_partial(n, page);
stat(s, FREE_REMOVE_PARTIAL);
- }
+ } else
+ /* Slab must be on the full list */
+ remove_full(s, page);
spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 464621d18eb2..7ef0903058ee 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -725,9 +725,10 @@ static void free_unmap_vmap_area_addr(unsigned long addr)
#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
-#define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
- VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
- VMALLOC_PAGES / NR_CPUS / 16))
+#define VMAP_BBMAP_BITS \
+ VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
+ VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
+ VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7ef69124fa3e..b7719ec10dc5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2283,7 +2283,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
.mem_cgroup = mem,
.memcg_record = rec,
};
- unsigned long start, end;
+ ktime_t start, end;
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -2292,7 +2292,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
sc.may_writepage,
sc.gfp_mask);
- start = sched_clock();
+ start = ktime_get();
/*
* NOTE: Although we can get the priority field, using it
* here is not a good idea, since it limits the pages we can scan.
@@ -2301,10 +2301,10 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
* the priority and make it zero.
*/
shrink_zone(0, zone, &sc);
- end = sched_clock();
+ end = ktime_get();
if (rec)
- rec->elapsed += end - start;
+ rec->elapsed += ktime_to_ns(ktime_sub(end, start));
*scanned = sc.nr_scanned;
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
@@ -2319,7 +2319,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
{
struct zonelist *zonelist;
unsigned long nr_reclaimed;
- unsigned long start, end;
+ ktime_t start, end;
int nid;
struct scan_control sc = {
.may_writepage = !laptop_mode,
@@ -2337,7 +2337,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
.gfp_mask = sc.gfp_mask,
};
- start = sched_clock();
+ start = ktime_get();
/*
* Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
* take care of from where we get pages. So the node where we start the
@@ -2352,9 +2352,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
sc.gfp_mask);
nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
- end = sched_clock();
+ end = ktime_get();
if (rec)
- rec->elapsed += end - start;
+ rec->elapsed += ktime_to_ns(ktime_sub(end, start));
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
@@ -2529,6 +2529,9 @@ loop_again:
high_wmark_pages(zone), 0, 0)) {
end_zone = i;
break;
+ } else {
+ /* If balanced, clear the congested flag */
+ zone_clear_flag(zone, ZONE_CONGESTED);
}
}
if (i < 0)