summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c32
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/shmem.c3
-rw-r--r--mm/slab.c2
-rw-r--r--mm/vmscan.c2
5 files changed, 38 insertions, 3 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 3ef20739e725..fd57442186cb 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -697,6 +697,38 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
return ret;
}
+/**
+ * find_get_pages_contig - gang contiguous pagecache lookup
+ * @mapping: The address_space to search
+ * @index: The starting page index
+ * @nr_pages: The maximum number of pages
+ * @pages: Where the resulting pages are placed
+ *
+ * find_get_pages_contig() works exactly like find_get_pages(), except
+ * that the returned number of pages are guaranteed to be contiguous.
+ *
+ * find_get_pages_contig() returns the number of pages which were found.
+ */
+unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
+ unsigned int nr_pages, struct page **pages)
+{
+ unsigned int i;
+ unsigned int ret;
+
+ read_lock_irq(&mapping->tree_lock);
+ ret = radix_tree_gang_lookup(&mapping->page_tree,
+ (void **)pages, index, nr_pages);
+ for (i = 0; i < ret; i++) {
+ if (pages[i]->mapping == NULL || pages[i]->index != index)
+ break;
+
+ page_cache_get(pages[i]);
+ index++;
+ }
+ read_unlock_irq(&mapping->tree_lock);
+ return i;
+}
+
/*
* Like find_get_pages, except we only return pages which are tagged with
* `tag'. We update *index to index the next page for the traversal.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 123c60586740..ea77c999047e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1962,7 +1962,7 @@ static inline void free_zone_pagesets(int cpu)
}
}
-static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
+static int pageset_cpuup_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
diff --git a/mm/shmem.c b/mm/shmem.c
index 37eaf42ed2c6..4c5e68e4e9ae 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -46,6 +46,8 @@
#include <linux/mempolicy.h>
#include <linux/namei.h>
#include <linux/ctype.h>
+#include <linux/migrate.h>
+
#include <asm/uaccess.h>
#include <asm/div64.h>
#include <asm/pgtable.h>
@@ -2173,6 +2175,7 @@ static struct address_space_operations shmem_aops = {
.prepare_write = shmem_prepare_write,
.commit_write = simple_commit_write,
#endif
+ .migratepage = migrate_page,
};
static struct file_operations shmem_file_operations = {
diff --git a/mm/slab.c b/mm/slab.c
index e6ef9bd52335..af5c5237e11a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1036,7 +1036,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
#endif
-static int __devinit cpuup_callback(struct notifier_block *nfb,
+static int cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index acdf001d6941..4649a63a8cb6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1328,7 +1328,7 @@ repeat:
not required for correctness. So if the last cpu in a node goes
away, we get changed to run anywhere: as the first one comes back,
restore their cpu bindings. */
-static int __devinit cpu_callback(struct notifier_block *nfb,
+static int cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
pg_data_t *pgdat;