summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2014-01-10 13:33:39 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2014-01-10 13:33:39 +1100
commitb8057368d6c29db538ff259266a4375200c3d029 (patch)
tree456ef3d3c15af4ff4c213daae456855ac26a71c2 /include
parent592410b1ba9daf61c3bde92762a43eac58000850 (diff)
parentbb4d5c0664ffa18d5b4a6d68aead3b0d4a854ee1 (diff)
Merge remote-tracking branch 'tip/auto-latest'
Conflicts: drivers/acpi/acpi_extlog.c drivers/acpi/processor_idle.c
Diffstat (limited to 'include')
-rw-r--r--include/linux/context_tracking.h10
-rw-r--r--include/linux/context_tracking_state.h11
-rw-r--r--include/linux/edac.h28
-rw-r--r--include/linux/efi.h17
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--include/linux/rculist.h4
-rw-r--r--include/linux/rcupdate.h153
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/rcutree.h36
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/spinlock.h10
-rw-r--r--include/linux/tick.h8
-rw-r--r--include/linux/uprobes.h52
-rw-r--r--include/linux/vtime.h4
-rw-r--r--include/trace/events/ras.h10
-rw-r--r--include/uapi/asm-generic/statfs.h2
18 files changed, 217 insertions, 146 deletions
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 158158704c30..37b81bd51ec0 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -17,13 +17,13 @@ extern void __context_tracking_task_switch(struct task_struct *prev,
static inline void user_enter(void)
{
- if (static_key_false(&context_tracking_enabled))
+ if (context_tracking_is_enabled())
context_tracking_user_enter();
}
static inline void user_exit(void)
{
- if (static_key_false(&context_tracking_enabled))
+ if (context_tracking_is_enabled())
context_tracking_user_exit();
}
@@ -31,7 +31,7 @@ static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
- if (!static_key_false(&context_tracking_enabled))
+ if (!context_tracking_is_enabled())
return 0;
prev_ctx = this_cpu_read(context_tracking.state);
@@ -42,7 +42,7 @@ static inline enum ctx_state exception_enter(void)
static inline void exception_exit(enum ctx_state prev_ctx)
{
- if (static_key_false(&context_tracking_enabled)) {
+ if (context_tracking_is_enabled()) {
if (prev_ctx == IN_USER)
context_tracking_user_enter();
}
@@ -51,7 +51,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
static inline void context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next)
{
- if (static_key_false(&context_tracking_enabled))
+ if (context_tracking_is_enabled())
__context_tracking_task_switch(prev, next);
}
#else
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 0f1979d0674f..97a81225d037 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -22,15 +22,20 @@ struct context_tracking {
extern struct static_key context_tracking_enabled;
DECLARE_PER_CPU(struct context_tracking, context_tracking);
-static inline bool context_tracking_in_user(void)
+static inline bool context_tracking_is_enabled(void)
{
- return __this_cpu_read(context_tracking.state) == IN_USER;
+ return static_key_false(&context_tracking_enabled);
}
-static inline bool context_tracking_active(void)
+static inline bool context_tracking_cpu_is_enabled(void)
{
return __this_cpu_read(context_tracking.active);
}
+
+static inline bool context_tracking_in_user(void)
+{
+ return __this_cpu_read(context_tracking.state) == IN_USER;
+}
#else
static inline bool context_tracking_in_user(void) { return false; }
static inline bool context_tracking_active(void) { return false; }
diff --git a/include/linux/edac.h b/include/linux/edac.h
index dbdffe8d4469..8e6c20af11a2 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -35,6 +35,34 @@ extern void edac_atomic_assert_error(void);
extern struct bus_type *edac_get_sysfs_subsys(void);
extern void edac_put_sysfs_subsys(void);
+enum {
+ EDAC_REPORTING_ENABLED,
+ EDAC_REPORTING_DISABLED,
+ EDAC_REPORTING_FORCE
+};
+
+extern int edac_report_status;
+#ifdef CONFIG_EDAC
+static inline int get_edac_report_status(void)
+{
+ return edac_report_status;
+}
+
+static inline void set_edac_report_status(int new)
+{
+ edac_report_status = new;
+}
+#else
+static inline int get_edac_report_status(void)
+{
+ return EDAC_REPORTING_DISABLED;
+}
+
+static inline void set_edac_report_status(int new)
+{
+}
+#endif
+
static inline void opstate_init(void)
{
switch (edac_op_state) {
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 11ce6784a196..0a819e7a60c9 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -556,6 +556,9 @@ extern struct efi {
unsigned long hcdp; /* HCDP table */
unsigned long uga; /* UGA table */
unsigned long uv_systab; /* UV system table */
+ unsigned long fw_vendor; /* fw_vendor */
+ unsigned long runtime; /* runtime table */
+ unsigned long config_table; /* config tables */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -653,6 +656,7 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */
#define EFI_MEMMAP 4 /* Can we use EFI memory map? */
#define EFI_64BIT 5 /* Is the firmware 64-bit? */
+#define EFI_ARCH_1 6 /* First arch-specific bit */
#ifdef CONFIG_EFI
# ifdef CONFIG_X86
@@ -872,4 +876,17 @@ int efivars_sysfs_init(void);
#endif /* CONFIG_EFI_VARS */
+#ifdef CONFIG_EFI_RUNTIME_MAP
+int efi_runtime_map_init(struct kobject *);
+void efi_runtime_map_setup(void *, int, u32);
+#else
+static inline int efi_runtime_map_init(struct kobject *kobj)
+{
+ return 0;
+}
+
+static inline void
+efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {}
+#endif
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index ecb87544cc5d..2aa3d4b000e6 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -394,6 +394,15 @@ extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
extern int sysctl_panic_on_stackoverflow;
+/*
+ * Only to be used by arch init code. If the user over-wrote the default
+ * CONFIG_PANIC_TIMEOUT, honor it.
+ */
+static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
+{
+ if (panic_timeout == arch_default_timeout)
+ panic_timeout = timeout;
+}
extern const char *print_tainted(void);
enum lockdep_ok {
LOCKDEP_STILL_OK,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2e069d1288df..8f4a70f2eca8 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -319,7 +319,10 @@ struct perf_event {
*/
struct list_head migrate_entry;
- struct hlist_node hlist_entry;
+ union {
+ struct hlist_node hlist_entry;
+ struct list_head active_entry;
+ };
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 45a0a9e81478..dbaf99084112 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -55,8 +55,8 @@ static inline void __list_add_rcu(struct list_head *new,
next->prev = new;
}
#else
-extern void __list_add_rcu(struct list_head *new,
- struct list_head *prev, struct list_head *next);
+void __list_add_rcu(struct list_head *new,
+ struct list_head *prev, struct list_head *next);
#endif
/**
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 39cbb889e20d..3e355c688618 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -50,13 +50,13 @@ extern int rcutorture_runnable; /* for sysctl */
#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
-extern void rcutorture_record_test_transition(void);
-extern void rcutorture_record_progress(unsigned long vernum);
-extern void do_trace_rcu_torture_read(const char *rcutorturename,
- struct rcu_head *rhp,
- unsigned long secs,
- unsigned long c_old,
- unsigned long c);
+void rcutorture_record_test_transition(void);
+void rcutorture_record_progress(unsigned long vernum);
+void do_trace_rcu_torture_read(const char *rcutorturename,
+ struct rcu_head *rhp,
+ unsigned long secs,
+ unsigned long c_old,
+ unsigned long c);
#else
static inline void rcutorture_record_test_transition(void)
{
@@ -65,11 +65,11 @@ static inline void rcutorture_record_progress(unsigned long vernum)
{
}
#ifdef CONFIG_RCU_TRACE
-extern void do_trace_rcu_torture_read(const char *rcutorturename,
- struct rcu_head *rhp,
- unsigned long secs,
- unsigned long c_old,
- unsigned long c);
+void do_trace_rcu_torture_read(const char *rcutorturename,
+ struct rcu_head *rhp,
+ unsigned long secs,
+ unsigned long c_old,
+ unsigned long c);
#else
#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
do { } while (0)
@@ -118,8 +118,8 @@ extern void do_trace_rcu_torture_read(const char *rcutorturename,
* if CPU A and CPU B are the same CPU (but again only if the system has
* more than one CPU).
*/
-extern void call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
+void call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
#else /* #ifdef CONFIG_PREEMPT_RCU */
@@ -149,8 +149,8 @@ extern void call_rcu(struct rcu_head *head,
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
-extern void call_rcu_bh(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
+void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
@@ -171,16 +171,16 @@ extern void call_rcu_bh(struct rcu_head *head,
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
-extern void call_rcu_sched(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu));
+void call_rcu_sched(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu));
-extern void synchronize_sched(void);
+void synchronize_sched(void);
#ifdef CONFIG_PREEMPT_RCU
-extern void __rcu_read_lock(void);
-extern void __rcu_read_unlock(void);
-extern void rcu_read_unlock_special(struct task_struct *t);
+void __rcu_read_lock(void);
+void __rcu_read_unlock(void);
+void rcu_read_unlock_special(struct task_struct *t);
void synchronize_rcu(void);
/*
@@ -216,19 +216,19 @@ static inline int rcu_preempt_depth(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/* Internal to kernel */
-extern void rcu_init(void);
-extern void rcu_sched_qs(int cpu);
-extern void rcu_bh_qs(int cpu);
-extern void rcu_check_callbacks(int cpu, int user);
+void rcu_init(void);
+void rcu_sched_qs(int cpu);
+void rcu_bh_qs(int cpu);
+void rcu_check_callbacks(int cpu, int user);
struct notifier_block;
-extern void rcu_idle_enter(void);
-extern void rcu_idle_exit(void);
-extern void rcu_irq_enter(void);
-extern void rcu_irq_exit(void);
+void rcu_idle_enter(void);
+void rcu_idle_exit(void);
+void rcu_irq_enter(void);
+void rcu_irq_exit(void);
#ifdef CONFIG_RCU_USER_QS
-extern void rcu_user_enter(void);
-extern void rcu_user_exit(void);
+void rcu_user_enter(void);
+void rcu_user_exit(void);
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
@@ -262,7 +262,7 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
} while (0)
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
-extern bool __rcu_is_watching(void);
+bool __rcu_is_watching(void);
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
/*
@@ -289,8 +289,8 @@ void wait_rcu_gp(call_rcu_func_t crf);
* initialization.
*/
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
-extern void init_rcu_head_on_stack(struct rcu_head *head);
-extern void destroy_rcu_head_on_stack(struct rcu_head *head);
+void init_rcu_head_on_stack(struct rcu_head *head);
+void destroy_rcu_head_on_stack(struct rcu_head *head);
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
static inline void init_rcu_head_on_stack(struct rcu_head *head)
{
@@ -325,6 +325,7 @@ static inline void rcu_lock_release(struct lockdep_map *map)
extern struct lockdep_map rcu_lock_map;
extern struct lockdep_map rcu_bh_lock_map;
extern struct lockdep_map rcu_sched_lock_map;
+extern struct lockdep_map rcu_callback_map;
extern int debug_lockdep_rcu_enabled(void);
/**
@@ -362,7 +363,7 @@ static inline int rcu_read_lock_held(void)
* rcu_read_lock_bh_held() is defined out of line to avoid #include-file
* hell.
*/
-extern int rcu_read_lock_bh_held(void);
+int rcu_read_lock_bh_held(void);
/**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
@@ -448,7 +449,7 @@ static inline int rcu_read_lock_sched_held(void)
#ifdef CONFIG_PROVE_RCU
-extern int rcu_my_thread_group_empty(void);
+int rcu_my_thread_group_empty(void);
/**
* rcu_lockdep_assert - emit lockdep splat if specified condition not met
@@ -548,10 +549,48 @@ static inline void rcu_preempt_sleep_check(void)
smp_read_barrier_depends(); \
(_________p1); \
})
-#define __rcu_assign_pointer(p, v, space) \
+
+/**
+ * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
+ * @v: The value to statically initialize with.
+ */
+#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
+
+/**
+ * rcu_assign_pointer() - assign to RCU-protected pointer
+ * @p: pointer to assign to
+ * @v: value to assign (publish)
+ *
+ * Assigns the specified value to the specified RCU-protected
+ * pointer, ensuring that any concurrent RCU readers will see
+ * any prior initialization.
+ *
+ * Inserts memory barriers on architectures that require them
+ * (which is most of them), and also prevents the compiler from
+ * reordering the code that initializes the structure after the pointer
+ * assignment. More importantly, this call documents which pointers
+ * will be dereferenced by RCU read-side code.
+ *
+ * In some special cases, you may use RCU_INIT_POINTER() instead
+ * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
+ * to the fact that it does not constrain either the CPU or the compiler.
+ * That said, using RCU_INIT_POINTER() when you should have used
+ * rcu_assign_pointer() is a very bad thing that results in
+ * impossible-to-diagnose memory corruption. So please be careful.
+ * See the RCU_INIT_POINTER() comment header for details.
+ *
+ * Note that rcu_assign_pointer() evaluates each of its arguments only
+ * once, appearances notwithstanding. One of the "extra" evaluations
+ * is in typeof() and the other visible only to sparse (__CHECKER__),
+ * neither of which actually execute the argument. As with most cpp
+ * macros, this execute-arguments-only-once property is important, so
+ * please be careful when making changes to rcu_assign_pointer() and the
+ * other macros that it invokes.
+ */
+#define rcu_assign_pointer(p, v) \
do { \
smp_wmb(); \
- (p) = (typeof(*v) __force space *)(v); \
+ ACCESS_ONCE(p) = RCU_INITIALIZER(v); \
} while (0)
@@ -890,32 +929,6 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
}
/**
- * rcu_assign_pointer() - assign to RCU-protected pointer
- * @p: pointer to assign to
- * @v: value to assign (publish)
- *
- * Assigns the specified value to the specified RCU-protected
- * pointer, ensuring that any concurrent RCU readers will see
- * any prior initialization.
- *
- * Inserts memory barriers on architectures that require them
- * (which is most of them), and also prevents the compiler from
- * reordering the code that initializes the structure after the pointer
- * assignment. More importantly, this call documents which pointers
- * will be dereferenced by RCU read-side code.
- *
- * In some special cases, you may use RCU_INIT_POINTER() instead
- * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
- * to the fact that it does not constrain either the CPU or the compiler.
- * That said, using RCU_INIT_POINTER() when you should have used
- * rcu_assign_pointer() is a very bad thing that results in
- * impossible-to-diagnose memory corruption. So please be careful.
- * See the RCU_INIT_POINTER() comment header for details.
- */
-#define rcu_assign_pointer(p, v) \
- __rcu_assign_pointer((p), (v), __rcu)
-
-/**
* RCU_INIT_POINTER() - initialize an RCU protected pointer
*
* Initialize an RCU-protected pointer in special cases where readers
@@ -949,7 +962,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
*/
#define RCU_INIT_POINTER(p, v) \
do { \
- p = (typeof(*v) __force __rcu *)(v); \
+ p = RCU_INITIALIZER(v); \
} while (0)
/**
@@ -958,7 +971,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* GCC-style initialization for an RCU-protected pointer in a structure field.
*/
#define RCU_POINTER_INITIALIZER(p, v) \
- .p = (typeof(*v) __force __rcu *)(v)
+ .p = RCU_INITIALIZER(v)
/*
* Does the specified offset indicate that the corresponding rcu_head
@@ -1005,7 +1018,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
#ifdef CONFIG_RCU_NOCB_CPU
-extern bool rcu_is_nocb_cpu(int cpu);
+bool rcu_is_nocb_cpu(int cpu);
#else
static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
@@ -1013,8 +1026,8 @@ static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
/* Only for use by adaptive-ticks code. */
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
-extern bool rcu_sys_is_idle(void);
-extern void rcu_sysidle_force_exit(void);
+bool rcu_sys_is_idle(void);
+void rcu_sysidle_force_exit(void);
#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
static inline bool rcu_sys_is_idle(void)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 09ebcbe9fd78..6f01771b571c 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -125,7 +125,7 @@ static inline void exit_rcu(void)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern int rcu_scheduler_active __read_mostly;
-extern void rcu_scheduler_starting(void);
+void rcu_scheduler_starting(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
static inline void rcu_scheduler_starting(void)
{
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 4b9c81548742..72137ee8c603 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,9 +30,9 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
-extern void rcu_note_context_switch(int cpu);
-extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
-extern void rcu_cpu_stall_reset(void);
+void rcu_note_context_switch(int cpu);
+int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
+void rcu_cpu_stall_reset(void);
/*
* Note a virtualization-based context switch. This is simply a
@@ -44,9 +44,9 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(cpu);
}
-extern void synchronize_rcu_bh(void);
-extern void synchronize_sched_expedited(void);
-extern void synchronize_rcu_expedited(void);
+void synchronize_rcu_bh(void);
+void synchronize_sched_expedited(void);
+void synchronize_rcu_expedited(void);
void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
@@ -71,25 +71,25 @@ static inline void synchronize_rcu_bh_expedited(void)
synchronize_sched_expedited();
}
-extern void rcu_barrier(void);
-extern void rcu_barrier_bh(void);
-extern void rcu_barrier_sched(void);
+void rcu_barrier(void);
+void rcu_barrier_bh(void);
+void rcu_barrier_sched(void);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
-extern long rcu_batches_completed(void);
-extern long rcu_batches_completed_bh(void);
-extern long rcu_batches_completed_sched(void);
+long rcu_batches_completed(void);
+long rcu_batches_completed_bh(void);
+long rcu_batches_completed_sched(void);
-extern void rcu_force_quiescent_state(void);
-extern void rcu_bh_force_quiescent_state(void);
-extern void rcu_sched_force_quiescent_state(void);
+void rcu_force_quiescent_state(void);
+void rcu_bh_force_quiescent_state(void);
+void rcu_sched_force_quiescent_state(void);
-extern void exit_rcu(void);
+void exit_rcu(void);
-extern void rcu_scheduler_starting(void);
+void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
-extern bool rcu_is_watching(void);
+bool rcu_is_watching(void);
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 53f97eb8dbc7..3a1e9857b393 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -168,7 +168,6 @@ extern char ___assert_task_state[1 - 2*!!(
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
-#define task_is_dead(task) ((task)->exit_state != 0)
#define task_is_stopped_or_traced(task) \
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 41467f8ff8ec..31e0193cb0c5 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -48,7 +48,6 @@ extern unsigned int sysctl_numa_balancing_scan_delay;
extern unsigned int sysctl_numa_balancing_scan_period_min;
extern unsigned int sysctl_numa_balancing_scan_period_max;
extern unsigned int sysctl_numa_balancing_scan_size;
-extern unsigned int sysctl_numa_balancing_settle_count;
#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_migration_cost;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 75f34949d9ab..3f2867ff0ced 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -130,6 +130,16 @@ do { \
#define smp_mb__before_spinlock() smp_wmb()
#endif
+/*
+ * Place this after a lock-acquisition primitive to guarantee that
+ * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
+ * if the UNLOCK and LOCK are executed by the same CPU or if the
+ * UNLOCK and LOCK operate on the same lock variable.
+ */
+#ifndef smp_mb__after_unlock_lock
+#define smp_mb__after_unlock_lock() do { } while (0)
+#endif
+
/**
* raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 5128d33bbb39..0175d8663b6c 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -104,7 +104,7 @@ extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
extern void tick_clock_notify(void);
extern int tick_check_oneshot_change(int allow_nohz);
extern struct tick_sched *tick_get_tick_sched(int cpu);
-extern void tick_check_idle(int cpu);
+extern void tick_check_idle(void);
extern int tick_oneshot_mode_active(void);
# ifndef arch_needs_cpu
# define arch_needs_cpu(cpu) (0)
@@ -112,7 +112,7 @@ extern int tick_oneshot_mode_active(void);
# else
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
-static inline void tick_check_idle(int cpu) { }
+static inline void tick_check_idle(void) { }
static inline int tick_oneshot_mode_active(void) { return 0; }
# endif
@@ -121,7 +121,7 @@ static inline void tick_init(void) { }
static inline void tick_cancel_sched_timer(int cpu) { }
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
-static inline void tick_check_idle(int cpu) { }
+static inline void tick_check_idle(void) { }
static inline int tick_oneshot_mode_active(void) { return 0; }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
@@ -165,7 +165,7 @@ extern cpumask_var_t tick_nohz_full_mask;
static inline bool tick_nohz_full_enabled(void)
{
- if (!static_key_false(&context_tracking_enabled))
+ if (!context_tracking_is_enabled())
return false;
return tick_nohz_full_running;
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 319eae70fe84..e32251e00e62 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -26,16 +26,13 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
+#include <linux/types.h>
struct vm_area_struct;
struct mm_struct;
struct inode;
struct notifier_block;
-#ifdef CONFIG_ARCH_SUPPORTS_UPROBES
-# include <asm/uprobes.h>
-#endif
-
#define UPROBE_HANDLER_REMOVE 1
#define UPROBE_HANDLER_MASK 1
@@ -60,6 +57,8 @@ struct uprobe_consumer {
};
#ifdef CONFIG_UPROBES
+#include <asm/uprobes.h>
+
enum uprobe_task_state {
UTASK_RUNNING,
UTASK_SSTEP,
@@ -72,35 +71,28 @@ enum uprobe_task_state {
*/
struct uprobe_task {
enum uprobe_task_state state;
- struct arch_uprobe_task autask;
- struct return_instance *return_instances;
- unsigned int depth;
- struct uprobe *active_uprobe;
+ union {
+ struct {
+ struct arch_uprobe_task autask;
+ unsigned long vaddr;
+ };
+ struct {
+ struct callback_head dup_xol_work;
+ unsigned long dup_xol_addr;
+ };
+ };
+
+ struct uprobe *active_uprobe;
unsigned long xol_vaddr;
- unsigned long vaddr;
-};
-/*
- * On a breakpoint hit, thread contests for a slot. It frees the
- * slot after singlestep. Currently a fixed number of slots are
- * allocated.
- */
-struct xol_area {
- wait_queue_head_t wq; /* if all slots are busy */
- atomic_t slot_count; /* number of in-use slots */
- unsigned long *bitmap; /* 0 = free slot */
- struct page *page;
-
- /*
- * We keep the vma's vm_start rather than a pointer to the vma
- * itself. The probed process or a naughty kernel module could make
- * the vma go away, and we must handle that reasonably gracefully.
- */
- unsigned long vaddr; /* Page(s) of instruction slots */
+ struct return_instance *return_instances;
+ unsigned int depth;
};
+struct xol_area;
+
struct uprobes_state {
struct xol_area *xol_area;
};
@@ -109,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign
extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
+extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
@@ -120,7 +113,6 @@ extern void uprobe_end_dup_mmap(void);
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t);
extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
-extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
@@ -176,10 +168,6 @@ static inline bool uprobe_deny_signal(void)
{
return false;
}
-static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
-{
- return 0;
-}
static inline void uprobe_free_utask(struct task_struct *t)
{
}
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index f5b72b364bda..c5165fd256f9 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -19,8 +19,8 @@ static inline bool vtime_accounting_enabled(void) { return true; }
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
static inline bool vtime_accounting_enabled(void)
{
- if (static_key_false(&context_tracking_enabled)) {
- if (context_tracking_active())
+ if (context_tracking_is_enabled()) {
+ if (context_tracking_cpu_is_enabled())
return true;
}
diff --git a/include/trace/events/ras.h b/include/trace/events/ras.h
index 88b878383797..1c875ad1ee5f 100644
--- a/include/trace/events/ras.h
+++ b/include/trace/events/ras.h
@@ -5,7 +5,7 @@
#define _TRACE_AER_H
#include <linux/tracepoint.h>
-#include <linux/edac.h>
+#include <linux/aer.h>
/*
@@ -63,10 +63,10 @@ TRACE_EVENT(aer_event,
TP_printk("%s PCIe Bus Error: severity=%s, %s\n",
__get_str(dev_name),
- __entry->severity == HW_EVENT_ERR_CORRECTED ? "Corrected" :
- __entry->severity == HW_EVENT_ERR_FATAL ?
- "Fatal" : "Uncorrected",
- __entry->severity == HW_EVENT_ERR_CORRECTED ?
+ __entry->severity == AER_CORRECTABLE ? "Corrected" :
+ __entry->severity == AER_FATAL ?
+ "Fatal" : "Uncorrected, non-fatal",
+ __entry->severity == AER_CORRECTABLE ?
__print_flags(__entry->status, "|", aer_correctable_errors) :
__print_flags(__entry->status, "|", aer_uncorrectable_errors))
);
diff --git a/include/uapi/asm-generic/statfs.h b/include/uapi/asm-generic/statfs.h
index 0999647fca13..cb89cc730f0b 100644
--- a/include/uapi/asm-generic/statfs.h
+++ b/include/uapi/asm-generic/statfs.h
@@ -13,7 +13,7 @@
*/
#ifndef __statfs_word
#if __BITS_PER_LONG == 64
-#define __statfs_word long
+#define __statfs_word __kernel_long_t
#else
#define __statfs_word __u32
#endif