summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/.gitignore1
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/system_certificates.S14
-rw-r--r--kernel/system_keyring.c4
-rw-r--r--kernel/workqueue.c32
6 files changed, 33 insertions, 29 deletions
diff --git a/kernel/.gitignore b/kernel/.gitignore
index b3097bde4e9c..790d83c7d160 100644
--- a/kernel/.gitignore
+++ b/kernel/.gitignore
@@ -5,3 +5,4 @@ config_data.h
config_data.gz
timeconst.h
hz.bc
+x509_certificate_list
diff --git a/kernel/futex.c b/kernel/futex.c
index 80ba086f021d..f6ff0191ecf7 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -251,6 +251,9 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
return -EINVAL;
address -= key->both.offset;
+ if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
+ return -EFAULT;
+
/*
* PROCESS_PRIVATE futexes are fast.
* As the mm cannot disappear under us and the 'key' only needs
@@ -259,8 +262,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
* but access_ok() should be faster than find_vma()
*/
if (!fshared) {
- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
- return -EFAULT;
key->private.mm = mm;
key->private.address = address;
get_futex_key_refs(key);
@@ -288,7 +289,7 @@ again:
put_page(page);
/* serialize against __split_huge_page_splitting() */
local_irq_disable();
- if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
+ if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
page_head = compound_head(page);
/*
* page_head is valid pointer but we must pin
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 490afc03627e..d0d8fca54065 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
size_t vmcoreinfo_size;
size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
+/* Flag to indicate we are going to kexec a new kernel */
+bool kexec_in_progress = false;
+
/* Location of the reserved area for the crash kernel */
struct resource crashk_res = {
.name = "Crash kernel",
@@ -1675,6 +1678,7 @@ int kernel_kexec(void)
} else
#endif
{
+ kexec_in_progress = true;
kernel_restart_prepare(NULL);
printk(KERN_EMERG "Starting new kernel\n");
machine_shutdown();
diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S
index 4aef390671cb..3e9868d47535 100644
--- a/kernel/system_certificates.S
+++ b/kernel/system_certificates.S
@@ -3,8 +3,18 @@
__INITRODATA
+ .align 8
.globl VMLINUX_SYMBOL(system_certificate_list)
VMLINUX_SYMBOL(system_certificate_list):
+__cert_list_start:
.incbin "kernel/x509_certificate_list"
- .globl VMLINUX_SYMBOL(system_certificate_list_end)
-VMLINUX_SYMBOL(system_certificate_list_end):
+__cert_list_end:
+
+ .align 8
+ .globl VMLINUX_SYMBOL(system_certificate_list_size)
+VMLINUX_SYMBOL(system_certificate_list_size):
+#ifdef CONFIG_64BIT
+ .quad __cert_list_end - __cert_list_start
+#else
+ .long __cert_list_end - __cert_list_start
+#endif
diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c
index 564dd93430a2..52ebc70263f4 100644
--- a/kernel/system_keyring.c
+++ b/kernel/system_keyring.c
@@ -22,7 +22,7 @@ struct key *system_trusted_keyring;
EXPORT_SYMBOL_GPL(system_trusted_keyring);
extern __initconst const u8 system_certificate_list[];
-extern __initconst const u8 system_certificate_list_end[];
+extern __initconst const unsigned long system_certificate_list_size;
/*
* Load the compiled-in keys
@@ -60,8 +60,8 @@ static __init int load_system_certificate_list(void)
pr_notice("Loading compiled-in X.509 certificates\n");
- end = system_certificate_list_end;
p = system_certificate_list;
+ end = p + system_certificate_list_size;
while (p < end) {
/* Each cert begins with an ASN.1 SEQUENCE tag and must be more
* than 256 bytes in size.
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c66912be990f..b010eac595d2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2851,19 +2851,6 @@ already_gone:
return false;
}
-static bool __flush_work(struct work_struct *work)
-{
- struct wq_barrier barr;
-
- if (start_flush_work(work, &barr)) {
- wait_for_completion(&barr.done);
- destroy_work_on_stack(&barr.work);
- return true;
- } else {
- return false;
- }
-}
-
/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
@@ -2877,10 +2864,18 @@ static bool __flush_work(struct work_struct *work)
*/
bool flush_work(struct work_struct *work)
{
+ struct wq_barrier barr;
+
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
- return __flush_work(work);
+ if (start_flush_work(work, &barr)) {
+ wait_for_completion(&barr.done);
+ destroy_work_on_stack(&barr.work);
+ return true;
+ } else {
+ return false;
+ }
}
EXPORT_SYMBOL_GPL(flush_work);
@@ -4832,14 +4827,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
schedule_work_on(cpu, &wfc.work);
-
- /*
- * The work item is on-stack and can't lead to deadlock through
- * flushing. Use __flush_work() to avoid spurious lockdep warnings
- * when work_on_cpu()s are nested.
- */
- __flush_work(&wfc.work);
-
+ flush_work(&wfc.work);
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);