diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/cgroup.h | 26 | ||||
-rw-r--r-- | include/linux/compaction.h | 10 | ||||
-rw-r--r-- | include/linux/file.h | 1 | ||||
-rw-r--r-- | include/linux/gfp.h | 4 | ||||
-rw-r--r-- | include/linux/hugetlb.h | 3 | ||||
-rw-r--r-- | include/linux/hugetlb_cgroup.h | 1 | ||||
-rw-r--r-- | include/linux/kern_levels.h | 13 | ||||
-rw-r--r-- | include/linux/kernel.h | 1 | ||||
-rw-r--r-- | include/linux/memcontrol.h | 50 | ||||
-rw-r--r-- | include/linux/mm_types.h | 5 | ||||
-rw-r--r-- | include/linux/mmzone.h | 12 | ||||
-rw-r--r-- | include/linux/page_cgroup.h | 105 | ||||
-rw-r--r-- | include/linux/page_counter.h | 51 | ||||
-rw-r--r-- | include/linux/percpu-refcount.h | 47 | ||||
-rw-r--r-- | include/linux/printk.h | 1 | ||||
-rw-r--r-- | include/linux/ptrace.h | 2 | ||||
-rw-r--r-- | include/linux/res_counter.h | 223 | ||||
-rw-r--r-- | include/linux/slab.h | 4 | ||||
-rw-r--r-- | include/linux/swap_cgroup.h | 42 | ||||
-rw-r--r-- | include/net/sock.h | 26 | ||||
-rw-r--r-- | include/uapi/linux/sysctl.h | 1 |
21 files changed, 211 insertions, 417 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 27b0c9105da5..641e56494a92 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -113,6 +113,19 @@ static inline void css_get(struct cgroup_subsys_state *css) } /** + * css_get_many - obtain references on the specified css + * @css: target css + * @n: number of references to get + * + * The caller must already have a reference. + */ +static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_get_many(&css->refcnt, n); +} + +/** * css_tryget - try to obtain a reference on the specified css * @css: target css * @@ -159,6 +172,19 @@ static inline void css_put(struct cgroup_subsys_state *css) percpu_ref_put(&css->refcnt); } +/** + * css_put_many - put css references + * @css: target css + * @n: number of references to put + * + * Put references obtained via css_get() and css_tryget_online(). + */ +static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_put_many(&css->refcnt, n); +} + /* bits in struct cgroup flags field */ enum { /* Control Group requires release notifications to userspace */ diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 60bdf8dc02a3..3238ffa33f68 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -33,10 +33,11 @@ extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, enum migrate_mode mode, int *contended, - struct zone **candidate_zone); + int alloc_flags, int classzone_idx); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); -extern unsigned long compaction_suitable(struct zone *zone, int order); +extern unsigned long compaction_suitable(struct zone *zone, int order, + int alloc_flags, int classzone_idx); /* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6 @@ -103,7 +104,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, enum migrate_mode mode, int *contended, - struct zone **candidate_zone) + int alloc_flags, int classzone_idx) { return COMPACT_CONTINUE; } @@ -116,7 +117,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) { } -static inline unsigned long compaction_suitable(struct zone *zone, int order) +static inline unsigned long compaction_suitable(struct zone *zone, int order, + int alloc_flags, int classzone_idx) { return COMPACT_SKIPPED; } diff --git a/include/linux/file.h b/include/linux/file.h index 4d69123377a2..f87d30882a24 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -66,7 +66,6 @@ extern void set_close_on_exec(unsigned int fd, int flag); extern bool get_close_on_exec(unsigned int fd); extern void put_filp(struct file *); extern int get_unused_fd_flags(unsigned flags); -#define get_unused_fd() get_unused_fd_flags(0) extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 41b30fd4d041..07d2699cdb51 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -381,8 +381,8 @@ extern void free_kmem_pages(unsigned long addr, unsigned int order); void page_alloc_init(void); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); -void drain_all_pages(void); -void drain_local_pages(void *dummy); +void drain_all_pages(struct zone *zone); +void drain_local_pages(struct zone *zone); /* * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 6e6d338641fe..cdd149ca5cc0 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -311,7 +311,8 @@ static inline struct hstate *hstate_sizelog(int page_size_log) { if (!page_size_log) return &default_hstate; - return size_to_hstate(1 << page_size_log); + + return size_to_hstate(1UL << page_size_log); } static inline struct hstate *hstate_vma(struct vm_area_struct *vma) diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 0129f89cf98d..bcc853eccc85 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -16,7 +16,6 @@ #define _LINUX_HUGETLB_CGROUP_H #include <linux/mmdebug.h> -#include <linux/res_counter.h> struct hugetlb_cgroup; /* diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h index 866caaa9e2bb..c2ce155d83cc 100644 --- a/include/linux/kern_levels.h +++ b/include/linux/kern_levels.h @@ -22,4 +22,17 @@ */ #define KERN_CONT "" +/* integer equivalents of KERN_<LEVEL> */ +#define LOGLEVEL_SCHED -2 /* Deferred messages from sched code + * are set to this special level */ +#define LOGLEVEL_DEFAULT -1 /* default (or last) loglevel */ +#define LOGLEVEL_EMERG 0 /* system is unusable */ +#define LOGLEVEL_ALERT 1 /* action must be taken immediately */ +#define LOGLEVEL_CRIT 2 /* critical conditions */ +#define LOGLEVEL_ERR 3 /* error conditions */ +#define LOGLEVEL_WARNING 4 /* warning conditions */ +#define LOGLEVEL_NOTICE 5 /* normal but significant condition */ +#define LOGLEVEL_INFO 6 /* informational */ +#define LOGLEVEL_DEBUG 7 /* debug-level messages */ + #endif diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 446d76a87ba1..233ea8107038 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -427,6 +427,7 @@ extern int panic_timeout; extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; +extern int panic_on_warn; extern int sysctl_panic_on_stackoverflow; /* * Only to be used by arch init code. If the user over-wrote the default diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6b75640ef5ab..6ea9f919e888 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -25,7 +25,6 @@ #include <linux/jump_label.h> struct mem_cgroup; -struct page_cgroup; struct page; struct mm_struct; struct kmem_cache; @@ -68,10 +67,9 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); -bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, - struct mem_cgroup *memcg); -bool task_in_mem_cgroup(struct task_struct *task, - const struct mem_cgroup *memcg); +bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, + struct mem_cgroup *root); +bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); @@ -79,15 +77,16 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); -static inline -bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) +static inline bool mm_match_cgroup(struct mm_struct *mm, + struct mem_cgroup *memcg) { struct mem_cgroup *task_memcg; - bool match; + bool match = false; rcu_read_lock(); task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); - match = __mem_cgroup_same_or_subtree(memcg, task_memcg); + if (task_memcg) + match = mem_cgroup_is_descendant(task_memcg, memcg); rcu_read_unlock(); return match; } @@ -141,8 +140,8 @@ static inline bool mem_cgroup_disabled(void) struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, unsigned long *flags); -void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, - unsigned long flags); +void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked, + unsigned long *flags); void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx, int val); @@ -174,10 +173,6 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, void mem_cgroup_split_huge_fixup(struct page *head); #endif -#ifdef CONFIG_DEBUG_VM -bool mem_cgroup_bad_page_check(struct page *page); -void mem_cgroup_print_bad_page(struct page *page); -#endif #else /* CONFIG_MEMCG */ struct mem_cgroup; @@ -297,7 +292,7 @@ static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, } static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, - bool locked, unsigned long flags) + bool *locked, unsigned long *flags) { } @@ -347,19 +342,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) } #endif /* CONFIG_MEMCG */ -#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) -static inline bool -mem_cgroup_bad_page_check(struct page *page) -{ - return false; -} - -static inline void -mem_cgroup_print_bad_page(struct page *page) -{ -} -#endif - enum { UNDER_LIMIT, SOFT_LIMIT, @@ -447,9 +429,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) /* * __GFP_NOFAIL allocations will move on even if charging is not * possible. Therefore we don't even try, and have this allocation - * unaccounted. We could in theory charge it with - * res_counter_charge_nofail, but we hope those allocations are rare, - * and won't be worth the trouble. + * unaccounted. We could in theory charge it forcibly, but we hope + * those allocations are rare, and won't be worth the trouble. */ if (gfp & __GFP_NOFAIL) return true; @@ -467,8 +448,6 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) * memcg_kmem_uncharge_pages: uncharge pages from memcg * @page: pointer to struct page being freed * @order: allocation order. - * - * there is no need to specify memcg here, since it is embedded in page_cgroup */ static inline void memcg_kmem_uncharge_pages(struct page *page, int order) @@ -485,8 +464,7 @@ memcg_kmem_uncharge_pages(struct page *page, int order) * * Needs to be called after memcg_kmem_newpage_charge, regardless of success or * failure of the allocation. if @page is NULL, this function will revert the - * charges. Otherwise, it will commit the memcg given by @memcg to the - * corresponding page_cgroup. + * charges. Otherwise, it will commit @page to @memcg. */ static inline void memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 004e9d17b47e..bf9f57529dcf 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -22,6 +22,7 @@ #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) struct address_space; +struct mem_cgroup; #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ @@ -167,6 +168,10 @@ struct page { struct page *first_page; /* Compound tail pages */ }; +#ifdef CONFIG_MEMCG + struct mem_cgroup *mem_cgroup; +#endif + /* * On machines where all RAM is mapped into kernel address space, * we can simply calculate the virtual address. On machines with diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ffe66e381c04..3879d7664dfc 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -722,9 +722,6 @@ typedef struct pglist_data { int nr_zones; #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ struct page *node_mem_map; -#ifdef CONFIG_MEMCG - struct page_cgroup *node_page_cgroup; -#endif #endif #ifndef CONFIG_NO_BOOTMEM struct bootmem_data *bdata; @@ -1078,7 +1075,6 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) struct page; -struct page_cgroup; struct mem_section { /* * This is, logically, a pointer to an array of struct @@ -1096,14 +1092,6 @@ struct mem_section { /* See declaration of similar field in struct zone */ unsigned long *pageblock_flags; -#ifdef CONFIG_MEMCG - /* - * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use - * section. (see memcontrol.h/page_cgroup.h about this.) - */ - struct page_cgroup *page_cgroup; - unsigned long pad; -#endif /* * WARNING: mem_section must be a power-of-2 in size for the * calculation and use of SECTION_ROOT_MASK to make sense. diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h deleted file mode 100644 index 5c831f1eca79..000000000000 --- a/include/linux/page_cgroup.h +++ /dev/null @@ -1,105 +0,0 @@ -#ifndef __LINUX_PAGE_CGROUP_H -#define __LINUX_PAGE_CGROUP_H - -enum { - /* flags for mem_cgroup */ - PCG_USED = 0x01, /* This page is charged to a memcg */ - PCG_MEM = 0x02, /* This page holds a memory charge */ - PCG_MEMSW = 0x04, /* This page holds a memory+swap charge */ -}; - -struct pglist_data; - -#ifdef CONFIG_MEMCG -struct mem_cgroup; - -/* - * Page Cgroup can be considered as an extended mem_map. - * A page_cgroup page is associated with every page descriptor. The - * page_cgroup helps us identify information about the cgroup - * All page cgroups are allocated at boot or memory hotplug event, - * then the page cgroup for pfn always exists. - */ -struct page_cgroup { - unsigned long flags; - struct mem_cgroup *mem_cgroup; -}; - -extern void pgdat_page_cgroup_init(struct pglist_data *pgdat); - -#ifdef CONFIG_SPARSEMEM -static inline void page_cgroup_init_flatmem(void) -{ -} -extern void page_cgroup_init(void); -#else -extern void page_cgroup_init_flatmem(void); -static inline void page_cgroup_init(void) -{ -} -#endif - -struct page_cgroup *lookup_page_cgroup(struct page *page); - -static inline int PageCgroupUsed(struct page_cgroup *pc) -{ - return !!(pc->flags & PCG_USED); -} -#else /* !CONFIG_MEMCG */ -struct page_cgroup; - -static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat) -{ -} - -static inline struct page_cgroup *lookup_page_cgroup(struct page *page) -{ - return NULL; -} - -static inline void page_cgroup_init(void) -{ -} - -static inline void page_cgroup_init_flatmem(void) -{ -} -#endif /* CONFIG_MEMCG */ - -#include <linux/swap.h> - -#ifdef CONFIG_MEMCG_SWAP -extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, - unsigned short old, unsigned short new); -extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); -extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); -extern int swap_cgroup_swapon(int type, unsigned long max_pages); -extern void swap_cgroup_swapoff(int type); -#else - -static inline -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) -{ - return 0; -} - -static inline -unsigned short lookup_swap_cgroup_id(swp_entry_t ent) -{ - return 0; -} - -static inline int -swap_cgroup_swapon(int type, unsigned long max_pages) -{ - return 0; -} - -static inline void swap_cgroup_swapoff(int type) -{ - return; -} - -#endif /* CONFIG_MEMCG_SWAP */ - -#endif /* __LINUX_PAGE_CGROUP_H */ diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h new file mode 100644 index 000000000000..955421575d16 --- /dev/null +++ b/include/linux/page_counter.h @@ -0,0 +1,51 @@ +#ifndef _LINUX_PAGE_COUNTER_H +#define _LINUX_PAGE_COUNTER_H + +#include <linux/atomic.h> +#include <linux/kernel.h> +#include <asm/page.h> + +struct page_counter { + atomic_long_t count; + unsigned long limit; + struct page_counter *parent; + + /* legacy */ + unsigned long watermark; + unsigned long failcnt; +}; + +#if BITS_PER_LONG == 32 +#define PAGE_COUNTER_MAX LONG_MAX +#else +#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE) +#endif + +static inline void page_counter_init(struct page_counter *counter, + struct page_counter *parent) +{ + atomic_long_set(&counter->count, 0); + counter->limit = PAGE_COUNTER_MAX; + counter->parent = parent; +} + +static inline unsigned long page_counter_read(struct page_counter *counter) +{ + return atomic_long_read(&counter->count); +} + +void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); +void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); +int page_counter_try_charge(struct page_counter *counter, + unsigned long nr_pages, + struct page_counter **fail); +void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); +int page_counter_limit(struct page_counter *counter, unsigned long limit); +int page_counter_memparse(const char *buf, unsigned long *nr_pages); + +static inline void page_counter_reset_watermark(struct page_counter *counter) +{ + counter->watermark = page_counter_read(counter); +} + +#endif /* _LINUX_PAGE_COUNTER_H */ diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 51ce60c35f4c..530b249f7ea4 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -147,28 +147,42 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, } /** - * percpu_ref_get - increment a percpu refcount + * percpu_ref_get_many - increment a percpu refcount * @ref: percpu_ref to get + * @nr: number of references to get * - * Analagous to atomic_long_inc(). + * Analogous to atomic_long_add(). * * This function is safe to call as long as @ref is between init and exit. */ -static inline void percpu_ref_get(struct percpu_ref *ref) +static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) - this_cpu_inc(*percpu_count); + this_cpu_add(*percpu_count, nr); else - atomic_long_inc(&ref->count); + atomic_long_add(nr, &ref->count); rcu_read_unlock_sched(); } /** + * percpu_ref_get - increment a percpu refcount + * @ref: percpu_ref to get + * + * Analagous to atomic_long_inc(). + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_get(struct percpu_ref *ref) +{ + percpu_ref_get_many(ref, 1); +} + +/** * percpu_ref_tryget - try to increment a percpu refcount * @ref: percpu_ref to try-get * @@ -231,29 +245,44 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) } /** - * percpu_ref_put - decrement a percpu refcount + * percpu_ref_put_many - decrement a percpu refcount * @ref: percpu_ref to put + * @nr: number of references to put * * Decrement the refcount, and if 0, call the release function (which was passed * to percpu_ref_init()) * * This function is safe to call as long as @ref is between init and exit. */ -static inline void percpu_ref_put(struct percpu_ref *ref) +static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) - this_cpu_dec(*percpu_count); - else if (unlikely(atomic_long_dec_and_test(&ref->count))) + this_cpu_sub(*percpu_count, nr); + else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) ref->release(ref); rcu_read_unlock_sched(); } /** + * percpu_ref_put - decrement a percpu refcount + * @ref: percpu_ref to put + * + * Decrement the refcount, and if 0, call the release function (which was passed + * to percpu_ref_init()) + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_put(struct percpu_ref *ref) +{ + percpu_ref_put_many(ref, 1); +} + +/** * percpu_ref_is_zero - test whether a percpu refcount reached zero * @ref: percpu_ref to test * diff --git a/include/linux/printk.h b/include/linux/printk.h index d78125f73ac4..3dd489f2dedc 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -118,7 +118,6 @@ int no_printk(const char *fmt, ...) #ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) void early_printk(const char *fmt, ...); -void early_vprintk(const char *fmt, va_list ap); #else static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index cc79eff4a1ad..987a73a40ef8 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -52,7 +52,7 @@ extern void ptrace_notify(int exit_code); extern void __ptrace_link(struct task_struct *child, struct task_struct *new_parent); extern void __ptrace_unlink(struct task_struct *child); -extern void exit_ptrace(struct task_struct *tracer); +extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_READ 0x01 #define PTRACE_MODE_ATTACH 0x02 #define PTRACE_MODE_NOAUDIT 0x04 diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h deleted file mode 100644 index 56b7bc32db4f..000000000000 --- a/include/linux/res_counter.h +++ /dev/null @@ -1,223 +0,0 @@ -#ifndef __RES_COUNTER_H__ -#define __RES_COUNTER_H__ - -/* - * Resource Counters - * Contain common data types and routines for resource accounting - * - * Copyright 2007 OpenVZ SWsoft Inc - * - * Author: Pavel Emelianov <xemul@openvz.org> - * - * See Documentation/cgroups/resource_counter.txt for more - * info about what this counter is. - */ - -#include <linux/spinlock.h> -#include <linux/errno.h> - -/* - * The core object. the cgroup that wishes to account for some - * resource may include this counter into its structures and use - * the helpers described beyond - */ - -struct res_counter { - /* - * the current resource consumption level - */ - unsigned long long usage; - /* - * the maximal value of the usage from the counter creation - */ - unsigned long long max_usage; - /* - * the limit that usage cannot exceed - */ - unsigned long long limit; - /* - * the limit that usage can be exceed - */ - unsigned long long soft_limit; - /* - * the number of unsuccessful attempts to consume the resource - */ - unsigned long long failcnt; - /* - * the lock to protect all of the above. - * the routines below consider this to be IRQ-safe - */ - spinlock_t lock; - /* - * Parent counter, used for hierarchial resource accounting - */ - struct res_counter *parent; -}; - -#define RES_COUNTER_MAX ULLONG_MAX - -/** - * Helpers to interact with userspace - * res_counter_read_u64() - returns the value of the specified member. - * res_counter_read/_write - put/get the specified fields from the - * res_counter struct to/from the user - * - * @counter: the counter in question - * @member: the field to work with (see RES_xxx below) - * @buf: the buffer to opeate on,... - * @nbytes: its size... - * @pos: and the offset. - */ - -u64 res_counter_read_u64(struct res_counter *counter, int member); - -ssize_t res_counter_read(struct res_counter *counter, int member, - const char __user *buf, size_t nbytes, loff_t *pos, - int (*read_strategy)(unsigned long long val, char *s)); - -int res_counter_memparse_write_strategy(const char *buf, - unsigned long long *res); - -/* - * the field descriptors. one for each member of res_counter - */ - -enum { - RES_USAGE, - RES_MAX_USAGE, - RES_LIMIT, - RES_FAILCNT, - RES_SOFT_LIMIT, -}; - -/* - * helpers for accounting - */ - -void res_counter_init(struct res_counter *counter, struct res_counter *parent); - -/* - * charge - try to consume more resource. - * - * @counter: the counter - * @val: the amount of the resource. each controller defines its own - * units, e.g. numbers, bytes, Kbytes, etc - * - * returns 0 on success and <0 if the counter->usage will exceed the - * counter->limit - * - * charge_nofail works the same, except that it charges the resource - * counter unconditionally, and returns < 0 if the after the current - * charge we are over limit. - */ - -int __must_check res_counter_charge(struct res_counter *counter, - unsigned long val, struct res_counter **limit_fail_at); -int res_counter_charge_nofail(struct res_counter *counter, - unsigned long val, struct res_counter **limit_fail_at); - -/* - * uncharge - tell that some portion of the resource is released - * - * @counter: the counter - * @val: the amount of the resource - * - * these calls check for usage underflow and show a warning on the console - * - * returns the total charges still present in @counter. - */ - -u64 res_counter_uncharge(struct res_counter *counter, unsigned long val); - -u64 res_counter_uncharge_until(struct res_counter *counter, - struct res_counter *top, - unsigned long val); -/** - * res_counter_margin - calculate chargeable space of a counter - * @cnt: the counter - * - * Returns the difference between the hard limit and the current usage - * of resource counter @cnt. - */ -static inline unsigned long long res_counter_margin(struct res_counter *cnt) -{ - unsigned long long margin; - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - if (cnt->limit > cnt->usage) - margin = cnt->limit - cnt->usage; - else - margin = 0; - spin_unlock_irqrestore(&cnt->lock, flags); - return margin; -} - -/** - * Get the difference between the usage and the soft limit - * @cnt: The counter - * - * Returns 0 if usage is less than or equal to soft limit - * The difference between usage and soft limit, otherwise. - */ -static inline unsigned long long -res_counter_soft_limit_excess(struct res_counter *cnt) -{ - unsigned long long excess; - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - if (cnt->usage <= cnt->soft_limit) - excess = 0; - else - excess = cnt->usage - cnt->soft_limit; - spin_unlock_irqrestore(&cnt->lock, flags); - return excess; -} - -static inline void res_counter_reset_max(struct res_counter *cnt) -{ - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - cnt->max_usage = cnt->usage; - spin_unlock_irqrestore(&cnt->lock, flags); -} - -static inline void res_counter_reset_failcnt(struct res_counter *cnt) -{ - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - cnt->failcnt = 0; - spin_unlock_irqrestore(&cnt->lock, flags); -} - -static inline int res_counter_set_limit(struct res_counter *cnt, - unsigned long long limit) -{ - unsigned long flags; - int ret = -EBUSY; - - spin_lock_irqsave(&cnt->lock, flags); - if (cnt->usage <= limit) { - cnt->limit = limit; - ret = 0; - } - spin_unlock_irqrestore(&cnt->lock, flags); - return ret; -} - -static inline int -res_counter_set_soft_limit(struct res_counter *cnt, - unsigned long long soft_limit) -{ - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - cnt->soft_limit = soft_limit; - spin_unlock_irqrestore(&cnt->lock, flags); - return 0; -} - -#endif diff --git a/include/linux/slab.h b/include/linux/slab.h index c265bec6a57d..8a2457d42fc8 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -513,10 +513,6 @@ struct memcg_cache_params { int memcg_update_all_caches(int num_memcgs); -struct seq_file; -int cache_show(struct kmem_cache *s, struct seq_file *m); -void print_slabinfo_header(struct seq_file *m); - /** * kmalloc_array - allocate memory for an array. * @n: number of elements. diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h new file mode 100644 index 000000000000..145306bdc92f --- /dev/null +++ b/include/linux/swap_cgroup.h @@ -0,0 +1,42 @@ +#ifndef __LINUX_SWAP_CGROUP_H +#define __LINUX_SWAP_CGROUP_H + +#include <linux/swap.h> + +#ifdef CONFIG_MEMCG_SWAP + +extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, + unsigned short old, unsigned short new); +extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); +extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); +extern int swap_cgroup_swapon(int type, unsigned long max_pages); +extern void swap_cgroup_swapoff(int type); + +#else + +static inline +unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) +{ + return 0; +} + +static inline +unsigned short lookup_swap_cgroup_id(swp_entry_t ent) +{ + return 0; +} + +static inline int +swap_cgroup_swapon(int type, unsigned long max_pages) +{ + return 0; +} + +static inline void swap_cgroup_swapoff(int type) +{ + return; +} + +#endif /* CONFIG_MEMCG_SWAP */ + +#endif /* __LINUX_SWAP_CGROUP_H */ diff --git a/include/net/sock.h b/include/net/sock.h index e6f235ebf6c9..7ff44e062a38 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -54,8 +54,8 @@ #include <linux/security.h> #include <linux/slab.h> #include <linux/uaccess.h> +#include <linux/page_counter.h> #include <linux/memcontrol.h> -#include <linux/res_counter.h> #include <linux/static_key.h> #include <linux/aio.h> #include <linux/sched.h> @@ -1062,7 +1062,7 @@ enum cg_proto_flags { }; struct cg_proto { - struct res_counter memory_allocated; /* Current allocated memory. */ + struct page_counter memory_allocated; /* Current allocated memory. */ struct percpu_counter sockets_allocated; /* Current number of sockets. */ int memory_pressure; long sysctl_mem[3]; @@ -1214,34 +1214,26 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot, unsigned long amt, int *parent_status) { - struct res_counter *fail; - int ret; + page_counter_charge(&prot->memory_allocated, amt); - ret = res_counter_charge_nofail(&prot->memory_allocated, - amt << PAGE_SHIFT, &fail); - if (ret < 0) + if (page_counter_read(&prot->memory_allocated) > + prot->memory_allocated.limit) *parent_status = OVER_LIMIT; } static inline void memcg_memory_allocated_sub(struct cg_proto *prot, unsigned long amt) { - res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT); -} - -static inline u64 memcg_memory_allocated_read(struct cg_proto *prot) -{ - u64 ret; - ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE); - return ret >> PAGE_SHIFT; + page_counter_uncharge(&prot->memory_allocated, amt); } static inline long sk_memory_allocated(const struct sock *sk) { struct proto *prot = sk->sk_prot; + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return memcg_memory_allocated_read(sk->sk_cgrp); + return page_counter_read(&sk->sk_cgrp->memory_allocated); return atomic_long_read(prot->memory_allocated); } @@ -1255,7 +1247,7 @@ sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); /* update the root cgroup regardless */ atomic_long_add_return(amt, prot->memory_allocated); - return memcg_memory_allocated_read(sk->sk_cgrp); + return page_counter_read(&sk->sk_cgrp->memory_allocated); } return atomic_long_add_return(amt, prot->memory_allocated); diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index 43aaba1cc037..0956373b56db 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -153,6 +153,7 @@ enum KERN_MAX_LOCK_DEPTH=74, /* int: rtmutex's maximum lock depth */ KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */ KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ + KERN_PANIC_ON_WARN=77, /* int: call panic() in WARN() functions */ }; |