diff options
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 52 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_crtc_helper.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/atombios_crtc.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_asic.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_device.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_pm.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_vm.c | 11 | ||||
-rw-r--r-- | fs/proc/task_mmu.c | 2 | ||||
-rw-r--r-- | include/linux/percpu-refcount.h | 6 | ||||
-rw-r--r-- | kernel/futex.c | 213 | ||||
-rw-r--r-- | kernel/sched/core.c | 5 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 10 | ||||
-rw-r--r-- | kernel/sched/fair.c | 15 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 | ||||
-rw-r--r-- | mm/mempolicy.c | 6 | ||||
-rw-r--r-- | mm/rmap.c | 3 | ||||
-rw-r--r-- | tools/perf/util/dwarf-aux.c | 7 | ||||
-rw-r--r-- | tools/perf/util/probe-finder.c | 4 |
18 files changed, 259 insertions, 109 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index eab8ccfe6beb..db2e45b4808e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -40,10 +40,10 @@ #define BYT_TURBO_VIDS 0x66d -#define FRAC_BITS 6 +#define FRAC_BITS 8 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) #define fp_toint(X) ((X) >> FRAC_BITS) -#define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS) + static inline int32_t mul_fp(int32_t x, int32_t y) { @@ -59,8 +59,8 @@ struct sample { int32_t core_pct_busy; u64 aperf; u64 mperf; - unsigned long long tsc; int freq; + ktime_t time; }; struct pstate_data { @@ -98,9 +98,9 @@ struct cpudata { struct vid_data vid; struct _pid pid; + ktime_t last_sample_time; u64 prev_aperf; u64 prev_mperf; - unsigned long long prev_tsc; struct sample sample; }; @@ -200,7 +200,10 @@ static signed int pid_calc(struct _pid *pid, int32_t busy) pid->last_err = fp_error; result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; - + if (result >= 0) + result = result + (1 << (FRAC_BITS-1)); + else + result = result - (1 << (FRAC_BITS-1)); return (signed int)fp_toint(result); } @@ -560,47 +563,42 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) static inline void intel_pstate_calc_busy(struct cpudata *cpu, struct sample *sample) { - int32_t core_pct; - int32_t c0_pct; + int64_t core_pct; + int32_t rem; - core_pct = div_fp(int_tofp((sample->aperf)), - int_tofp((sample->mperf))); - core_pct = mul_fp(core_pct, int_tofp(100)); - FP_ROUNDUP(core_pct); + core_pct = int_tofp(sample->aperf) * int_tofp(100); + core_pct = div_u64_rem(core_pct, int_tofp(sample->mperf), &rem); - c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc)); + if ((rem << 1) >= int_tofp(sample->mperf)) + core_pct += 1; sample->freq = fp_toint( mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); - sample->core_pct_busy = mul_fp(core_pct, c0_pct); + sample->core_pct_busy = (int32_t)core_pct; } static inline void intel_pstate_sample(struct cpudata *cpu) { u64 aperf, mperf; - unsigned long long tsc; rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); - tsc = native_read_tsc(); aperf = aperf >> FRAC_BITS; mperf = mperf >> FRAC_BITS; - tsc = tsc >> FRAC_BITS; + cpu->last_sample_time = cpu->sample.time; + cpu->sample.time = ktime_get(); cpu->sample.aperf = aperf; cpu->sample.mperf = mperf; - cpu->sample.tsc = tsc; cpu->sample.aperf -= cpu->prev_aperf; cpu->sample.mperf -= cpu->prev_mperf; - cpu->sample.tsc -= cpu->prev_tsc; intel_pstate_calc_busy(cpu, &cpu->sample); cpu->prev_aperf = aperf; cpu->prev_mperf = mperf; - cpu->prev_tsc = tsc; } static inline void intel_pstate_set_sample_time(struct cpudata *cpu) @@ -614,13 +612,25 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) { - int32_t core_busy, max_pstate, current_pstate; + int32_t core_busy, max_pstate, current_pstate, sample_ratio; + u32 duration_us; + u32 sample_time; core_busy = cpu->sample.core_pct_busy; max_pstate = int_tofp(cpu->pstate.max_pstate); current_pstate = int_tofp(cpu->pstate.current_pstate); core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); - return FP_ROUNDUP(core_busy); + + sample_time = (pid_params.sample_rate_ms * USEC_PER_MSEC); + duration_us = (u32) ktime_us_delta(cpu->sample.time, + cpu->last_sample_time); + if (duration_us > sample_time * 3) { + sample_ratio = div_fp(int_tofp(sample_time), + int_tofp(duration_us)); + core_busy = mul_fp(core_busy, sample_ratio); + } + + return core_busy; } static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index df281b54db01..872ba11c4533 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -29,6 +29,7 @@ * Jesse Barnes <jesse.barnes@intel.com> */ +#include <linux/kernel.h> #include <linux/export.h> #include <linux/moduleparam.h> @@ -88,7 +89,13 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder) struct drm_connector *connector; struct drm_device *dev = encoder->dev; - WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + /* + * We can expect this mutex to be locked if we are not panicking. + * Locking is currently fubar in the panic handler. + */ + if (!oops_in_progress) + WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->encoder == encoder) return true; @@ -112,7 +119,13 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc) struct drm_encoder *encoder; struct drm_device *dev = crtc->dev; - WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + /* + * We can expect this mutex to be locked if we are not panicking. + * Locking is currently fubar in the panic handler. + */ + if (!oops_in_progress) + WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) return true; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c31c12b4e666..e911898348f8 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -270,8 +270,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) switch (mode) { case DRM_MODE_DPMS_ON: radeon_crtc->enabled = true; - /* adjust pm to dpms changes BEFORE enabling crtcs */ - radeon_pm_compute_clocks(rdev); atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); @@ -289,10 +287,10 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; - /* adjust pm to dpms changes AFTER disabling crtcs */ - radeon_pm_compute_clocks(rdev); break; } + /* adjust pm to dpms */ + radeon_pm_compute_clocks(rdev); } static void diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index be20e62dac83..e5f0177bea1e 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2049,8 +2049,8 @@ static struct radeon_asic ci_asic = { .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &cik_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, - .copy = &cik_copy_dma, - .copy_ring_index = R600_RING_TYPE_DMA_INDEX, + .copy = &cik_copy_cpdma, + .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 14671406212f..2cd144c378d6 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1558,6 +1558,10 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) drm_kms_helper_poll_enable(dev); + /* set the power state here in case we are a PX system or headless */ + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) + radeon_pm_compute_clocks(rdev); + if (fbcon) { radeon_fbdev_set_suspend(rdev, 0); console_unlock(); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 53d6e1bb48dc..2bdae61c0ac0 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1104,7 +1104,6 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev) if (ret) goto dpm_resume_fail; rdev->pm.dpm_enabled = true; - radeon_pm_compute_clocks(rdev); return; dpm_resume_fail: diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 1f426696de36..c11b71d249e3 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -132,7 +132,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, struct radeon_cs_reloc *list; unsigned i, idx; - list = kmalloc_array(vm->max_pde_used + 1, + list = kmalloc_array(vm->max_pde_used + 2, sizeof(struct radeon_cs_reloc), GFP_KERNEL); if (!list) return NULL; @@ -585,7 +585,8 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, { static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; - uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); + struct radeon_bo *pd = vm->page_directory; + uint64_t pd_addr = radeon_bo_gpu_offset(pd); uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; struct radeon_ib ib; @@ -642,6 +643,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, incr, R600_PTE_VALID); if (ib.length_dw != 0) { + radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); r = radeon_ib_schedule(rdev, &ib, NULL); if (r) { @@ -689,15 +691,18 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, /* walk over the address space and update the page tables */ for (addr = start; addr < end; ) { uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; + struct radeon_bo *pt = vm->page_tables[pt_idx].bo; unsigned nptes; uint64_t pte; + radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj); + if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else nptes = RADEON_VM_PTE_COUNT - (addr & mask); - pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo); + pte = radeon_bo_gpu_offset(pt); pte += (addr & mask) * 8; if ((last_pte + 8 * count) != pte) { diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 442177b1119a..c4b2646b6d7c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1351,7 +1351,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, struct numa_maps *md; struct page *page; - if (pte_none(*pte)) + if (!pte_present(*pte)) return 0; page = pte_page(*pte); diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 95961f0bf62d..0afb48fd449d 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -110,7 +110,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref) pcpu_count = ACCESS_ONCE(ref->pcpu_count); if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) - __this_cpu_inc(*pcpu_count); + this_cpu_inc(*pcpu_count); else atomic_inc(&ref->count); @@ -139,7 +139,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) pcpu_count = ACCESS_ONCE(ref->pcpu_count); if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { - __this_cpu_inc(*pcpu_count); + this_cpu_inc(*pcpu_count); ret = true; } @@ -164,7 +164,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref) pcpu_count = ACCESS_ONCE(ref->pcpu_count); if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) - __this_cpu_dec(*pcpu_count); + this_cpu_dec(*pcpu_count); else if (unlikely(atomic_dec_and_test(&ref->count))) ref->release(ref); diff --git a/kernel/futex.c b/kernel/futex.c index 81dbe773ce4c..de938d20df19 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -743,10 +743,58 @@ void exit_pi_state_list(struct task_struct *curr) raw_spin_unlock_irq(&curr->pi_lock); } +/* + * We need to check the following states: + * + * Waiter | pi_state | pi->owner | uTID | uODIED | ? + * + * [1] NULL | --- | --- | 0 | 0/1 | Valid + * [2] NULL | --- | --- | >0 | 0/1 | Valid + * + * [3] Found | NULL | -- | Any | 0/1 | Invalid + * + * [4] Found | Found | NULL | 0 | 1 | Valid + * [5] Found | Found | NULL | >0 | 1 | Invalid + * + * [6] Found | Found | task | 0 | 1 | Valid + * + * [7] Found | Found | NULL | Any | 0 | Invalid + * + * [8] Found | Found | task | ==taskTID | 0/1 | Valid + * [9] Found | Found | task | 0 | 0 | Invalid + * [10] Found | Found | task | !=taskTID | 0/1 | Invalid + * + * [1] Indicates that the kernel can acquire the futex atomically. We + * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit. + * + * [2] Valid, if TID does not belong to a kernel thread. If no matching + * thread is found then it indicates that the owner TID has died. + * + * [3] Invalid. The waiter is queued on a non PI futex + * + * [4] Valid state after exit_robust_list(), which sets the user space + * value to FUTEX_WAITERS | FUTEX_OWNER_DIED. + * + * [5] The user space value got manipulated between exit_robust_list() + * and exit_pi_state_list() + * + * [6] Valid state after exit_pi_state_list() which sets the new owner in + * the pi_state but cannot access the user space value. + * + * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set. + * + * [8] Owner and user space value match + * + * [9] There is no transient state which sets the user space TID to 0 + * except exit_robust_list(), but this is indicated by the + * FUTEX_OWNER_DIED bit. See [4] + * + * [10] There is no transient state which leaves owner and user space + * TID out of sync. + */ static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, - union futex_key *key, struct futex_pi_state **ps, - struct task_struct *task) + union futex_key *key, struct futex_pi_state **ps) { struct futex_pi_state *pi_state = NULL; struct futex_q *this, *next; @@ -756,12 +804,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, plist_for_each_entry_safe(this, next, &hb->chain, list) { if (match_futex(&this->key, key)) { /* - * Another waiter already exists - bump up - * the refcount and return its pi_state: + * Sanity check the waiter before increasing + * the refcount and attaching to it. */ pi_state = this->pi_state; /* - * Userspace might have messed up non-PI and PI futexes + * Userspace might have messed up non-PI and + * PI futexes [3] */ if (unlikely(!pi_state)) return -EINVAL; @@ -769,44 +818,70 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, WARN_ON(!atomic_read(&pi_state->refcount)); /* - * When pi_state->owner is NULL then the owner died - * and another waiter is on the fly. pi_state->owner - * is fixed up by the task which acquires - * pi_state->rt_mutex. - * - * We do not check for pid == 0 which can happen when - * the owner died and robust_list_exit() cleared the - * TID. + * Handle the owner died case: */ - if (pid && pi_state->owner) { + if (uval & FUTEX_OWNER_DIED) { /* - * Bail out if user space manipulated the - * futex value. + * exit_pi_state_list sets owner to NULL and + * wakes the topmost waiter. The task which + * acquires the pi_state->rt_mutex will fixup + * owner. */ - if (pid != task_pid_vnr(pi_state->owner)) + if (!pi_state->owner) { + /* + * No pi state owner, but the user + * space TID is not 0. Inconsistent + * state. [5] + */ + if (pid) + return -EINVAL; + /* + * Take a ref on the state and + * return. [4] + */ + goto out_state; + } + + /* + * If TID is 0, then either the dying owner + * has not yet executed exit_pi_state_list() + * or some waiter acquired the rtmutex in the + * pi state, but did not yet fixup the TID in + * user space. + * + * Take a ref on the state and return. [6] + */ + if (!pid) + goto out_state; + } else { + /* + * If the owner died bit is not set, + * then the pi_state must have an + * owner. [7] + */ + if (!pi_state->owner) return -EINVAL; } /* - * Protect against a corrupted uval. If uval - * is 0x80000000 then pid is 0 and the waiter - * bit is set. So the deadlock check in the - * calling code has failed and we did not fall - * into the check above due to !pid. + * Bail out if user space manipulated the + * futex value. If pi state exists then the + * owner TID must be the same as the user + * space TID. [9/10] */ - if (task && pi_state->owner == task) - return -EDEADLK; + if (pid != task_pid_vnr(pi_state->owner)) + return -EINVAL; + out_state: atomic_inc(&pi_state->refcount); *ps = pi_state; - return 0; } } /* * We are the first waiter - try to look up the real owner and attach - * the new pi_state to it, but bail out when TID = 0 + * the new pi_state to it, but bail out when TID = 0 [1] */ if (!pid) return -ESRCH; @@ -839,6 +914,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, return ret; } + /* + * No existing pi state. First waiter. [2] + */ pi_state = alloc_pi_state(); /* @@ -910,10 +988,18 @@ retry: return -EDEADLK; /* - * Surprise - we got the lock. Just return to userspace: + * Surprise - we got the lock, but we do not trust user space at all. */ - if (unlikely(!curval)) - return 1; + if (unlikely(!curval)) { + /* + * We verify whether there is kernel state for this + * futex. If not, we can safely assume, that the 0 -> + * TID transition is correct. If state exists, we do + * not bother to fixup the user space state as it was + * corrupted already. + */ + return futex_top_waiter(hb, key) ? -EINVAL : 1; + } uval = curval; @@ -951,7 +1037,7 @@ retry: * We dont have the lock. Look up the PI state (or create it if * we are the first waiter): */ - ret = lookup_pi_state(uval, hb, key, ps, task); + ret = lookup_pi_state(uval, hb, key, ps); if (unlikely(ret)) { switch (ret) { @@ -1044,6 +1130,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) struct task_struct *new_owner; struct futex_pi_state *pi_state = this->pi_state; u32 uninitialized_var(curval), newval; + int ret = 0; if (!pi_state) return -EINVAL; @@ -1067,23 +1154,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) new_owner = this->task; /* - * We pass it to the next owner. (The WAITERS bit is always - * kept enabled while there is PI state around. We must also - * preserve the owner died bit.) + * We pass it to the next owner. The WAITERS bit is always + * kept enabled while there is PI state around. We cleanup the + * owner died bit, because we are the owner. */ - if (!(uval & FUTEX_OWNER_DIED)) { - int ret = 0; - - newval = FUTEX_WAITERS | task_pid_vnr(new_owner); + newval = FUTEX_WAITERS | task_pid_vnr(new_owner); - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) - ret = -EFAULT; - else if (curval != uval) - ret = -EINVAL; - if (ret) { - raw_spin_unlock(&pi_state->pi_mutex.wait_lock); - return ret; - } + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) + ret = -EFAULT; + else if (curval != uval) + ret = -EINVAL; + if (ret) { + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); + return ret; } raw_spin_lock_irq(&pi_state->owner->pi_lock); @@ -1442,6 +1525,13 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, if (requeue_pi) { /* + * Requeue PI only works on two distinct uaddrs. This + * check is only valid for private futexes. See below. + */ + if (uaddr1 == uaddr2) + return -EINVAL; + + /* * requeue_pi requires a pi_state, try to allocate it now * without any locks in case it fails. */ @@ -1479,6 +1569,15 @@ retry: if (unlikely(ret != 0)) goto out_put_key1; + /* + * The check above which compares uaddrs is not sufficient for + * shared futexes. We need to compare the keys: + */ + if (requeue_pi && match_futex(&key1, &key2)) { + ret = -EINVAL; + goto out_put_keys; + } + hb1 = hash_futex(&key1); hb2 = hash_futex(&key2); @@ -1544,7 +1643,7 @@ retry_private: * rereading and handing potential crap to * lookup_pi_state. */ - ret = lookup_pi_state(ret, hb2, &key2, &pi_state, NULL); + ret = lookup_pi_state(ret, hb2, &key2, &pi_state); } switch (ret) { @@ -2327,9 +2426,10 @@ retry: /* * To avoid races, try to do the TID -> 0 atomic transition * again. If it succeeds then we can return without waking - * anyone else up: + * anyone else up. We only try this if neither the waiters nor + * the owner died bit are set. */ - if (!(uval & FUTEX_OWNER_DIED) && + if (!(uval & ~FUTEX_TID_MASK) && cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0)) goto pi_faulted; /* @@ -2359,11 +2459,9 @@ retry: /* * No waiters - kernel unlocks the futex: */ - if (!(uval & FUTEX_OWNER_DIED)) { - ret = unlock_futex_pi(uaddr, uval); - if (ret == -EFAULT) - goto pi_faulted; - } + ret = unlock_futex_pi(uaddr, uval); + if (ret == -EFAULT) + goto pi_faulted; out_unlock: spin_unlock(&hb->lock); @@ -2525,6 +2623,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (ret) goto out_key2; + /* + * The check above which compares uaddrs is not sufficient for + * shared futexes. We need to compare the keys: + */ + if (match_futex(&q.key, &key2)) { + ret = -EINVAL; + goto out_put_keys; + } + /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0a7251678982..084d17f89139 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3685,7 +3685,7 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, if (retval) return retval; - if (attr.sched_policy < 0) + if ((int)attr.sched_policy < 0) return -EINVAL; rcu_read_lock(); @@ -7751,8 +7751,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) /* restart the period timer (if active) to handle new period expiry */ if (runtime_enabled && cfs_b->timer_active) { /* force a reprogram */ - cfs_b->timer_active = 0; - __start_cfs_bandwidth(cfs_b); + __start_cfs_bandwidth(cfs_b, true); } raw_spin_unlock_irq(&cfs_b->lock); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 800e99b99075..14bc348ba3b4 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -513,9 +513,17 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) struct sched_dl_entity, dl_timer); struct task_struct *p = dl_task_of(dl_se); - struct rq *rq = task_rq(p); + struct rq *rq; +again: + rq = task_rq(p); raw_spin_lock(&rq->lock); + if (rq != task_rq(p)) { + /* Task was moved, retrying. */ + raw_spin_unlock(&rq->lock); + goto again; + } + /* * We need to take care of a possible races here. In fact, the * task might have changed its scheduling policy to something diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0fdb96de81a5..8cbe2d2c16de 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1707,18 +1707,19 @@ no_join: void task_numa_free(struct task_struct *p) { struct numa_group *grp = p->numa_group; - int i; void *numa_faults = p->numa_faults_memory; + unsigned long flags; + int i; if (grp) { - spin_lock_irq(&grp->lock); + spin_lock_irqsave(&grp->lock, flags); for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) grp->faults[i] -= p->numa_faults_memory[i]; grp->total_faults -= p->total_numa_faults; list_del(&p->numa_entry); grp->nr_tasks--; - spin_unlock_irq(&grp->lock); + spin_unlock_irqrestore(&grp->lock, flags); rcu_assign_pointer(p->numa_group, NULL); put_numa_group(grp); } @@ -3129,7 +3130,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) */ if (!cfs_b->timer_active) { __refill_cfs_bandwidth_runtime(cfs_b); - __start_cfs_bandwidth(cfs_b); + __start_cfs_bandwidth(cfs_b, false); } if (cfs_b->runtime > 0) { @@ -3308,7 +3309,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) raw_spin_lock(&cfs_b->lock); list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); if (!cfs_b->timer_active) - __start_cfs_bandwidth(cfs_b); + __start_cfs_bandwidth(cfs_b, false); raw_spin_unlock(&cfs_b->lock); } @@ -3690,7 +3691,7 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) } /* requires cfs_b->lock, may release to reprogram timer */ -void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force) { /* * The timer may be active because we're trying to set a new bandwidth @@ -3705,7 +3706,7 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cpu_relax(); raw_spin_lock(&cfs_b->lock); /* if someone else restarted the timer then we're done */ - if (cfs_b->timer_active) + if (!force && cfs_b->timer_active) return; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 456e492a3dca..369b4d663c42 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -278,7 +278,7 @@ extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); -extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); +extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force); extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); extern void free_rt_sched_group(struct task_group *tg); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 78e1472933ea..30cc47f8ffa0 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -526,9 +526,13 @@ static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma, int nid; struct page *page; spinlock_t *ptl; + pte_t entry; ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd); - page = pte_page(huge_ptep_get((pte_t *)pmd)); + entry = huge_ptep_get((pte_t *)pmd); + if (!pte_present(entry)) + goto unlock; + page = pte_page(entry); nid = page_to_nid(page); if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) goto unlock; diff --git a/mm/rmap.c b/mm/rmap.c index 9c3e77396d1a..83bfafabb47b 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1564,10 +1564,9 @@ void __put_anon_vma(struct anon_vma *anon_vma) { struct anon_vma *root = anon_vma->root; + anon_vma_free(anon_vma); if (root != anon_vma && atomic_dec_and_test(&root->refcount)) anon_vma_free(root); - - anon_vma_free(anon_vma); } static struct anon_vma *rmap_walk_anon_lock(struct page *page, diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 7defd77105d0..cc66c4049e09 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -747,14 +747,17 @@ struct __find_variable_param { static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data) { struct __find_variable_param *fvp = data; + Dwarf_Attribute attr; int tag; tag = dwarf_tag(die_mem); if ((tag == DW_TAG_formal_parameter || tag == DW_TAG_variable) && - die_compare_name(die_mem, fvp->name)) + die_compare_name(die_mem, fvp->name) && + /* Does the DIE have location information or external instance? */ + (dwarf_attr(die_mem, DW_AT_external, &attr) || + dwarf_attr(die_mem, DW_AT_location, &attr))) return DIE_FIND_CB_END; - if (dwarf_haspc(die_mem, fvp->addr)) return DIE_FIND_CB_CONTINUE; else diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 562762117639..9d8eb26f0533 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -511,12 +511,12 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf) ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops, &pf->sp_die, pf->tvar); - if (ret == -ENOENT) + if (ret == -ENOENT || ret == -EINVAL) pr_err("Failed to find the location of %s at this address.\n" " Perhaps, it has been optimized out.\n", pf->pvar->var); else if (ret == -ENOTSUP) pr_err("Sorry, we don't support this variable location yet.\n"); - else if (pf->pvar->field) { + else if (ret == 0 && pf->pvar->field) { ret = convert_variable_fields(vr_die, pf->pvar->var, pf->pvar->field, &pf->tvar->ref, &die_mem); |