diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-13 12:38:26 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-13 12:38:26 -0800 |
commit | 8e9a2dba8686187d8c8179e5b86640e653963889 (patch) | |
tree | a4ba543649219cbb28d91aab65b785d763f5d069 /drivers | |
parent | 6098850e7e6978f95a958f79a645a653228d0002 (diff) | |
parent | 450cbdd0125cfa5d7bbf9e2a6b6961cc48d29730 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar:
"The main changes in this cycle are:
- Another attempt at enabling cross-release lockdep dependency
tracking (automatically part of CONFIG_PROVE_LOCKING=y), this time
with better performance and fewer false positives. (Byungchul Park)
- Introduce lockdep_assert_irqs_enabled()/disabled() and convert
open-coded equivalents to lockdep variants. (Frederic Weisbecker)
- Add down_read_killable() and use it in the VFS's iterate_dir()
method. (Kirill Tkhai)
- Convert remaining uses of ACCESS_ONCE() to
READ_ONCE()/WRITE_ONCE(). Most of the conversion was Coccinelle
driven. (Mark Rutland, Paul E. McKenney)
- Get rid of lockless_dereference(), by strengthening Alpha atomics,
strengthening READ_ONCE() with smp_read_barrier_depends() and thus
being able to convert users of lockless_dereference() to
READ_ONCE(). (Will Deacon)
- Various micro-optimizations:
- better PV qspinlocks (Waiman Long),
- better x86 barriers (Michael S. Tsirkin)
- better x86 refcounts (Kees Cook)
- ... plus other fixes and enhancements. (Borislav Petkov, Juergen
Gross, Miguel Bernal Marin)"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits)
locking/x86: Use LOCK ADD for smp_mb() instead of MFENCE
rcu: Use lockdep to assert IRQs are disabled/enabled
netpoll: Use lockdep to assert IRQs are disabled/enabled
timers/posix-cpu-timers: Use lockdep to assert IRQs are disabled/enabled
sched/clock, sched/cputime: Use lockdep to assert IRQs are disabled/enabled
irq_work: Use lockdep to assert IRQs are disabled/enabled
irq/timings: Use lockdep to assert IRQs are disabled/enabled
perf/core: Use lockdep to assert IRQs are disabled/enabled
x86: Use lockdep to assert IRQs are disabled/enabled
smp/core: Use lockdep to assert IRQs are disabled/enabled
timers/hrtimer: Use lockdep to assert IRQs are disabled/enabled
timers/nohz: Use lockdep to assert IRQs are disabled/enabled
workqueue: Use lockdep to assert IRQs are disabled/enabled
irq/softirqs: Use lockdep to assert IRQs are disabled/enabled
locking/lockdep: Add IRQs disabled/enabled assertion APIs: lockdep_assert_irqs_enabled()/disabled()
locking/pvqspinlock: Implement hybrid PV queued/unfair locks
locking/rwlocks: Fix comments
x86/paravirt: Set up the virt_spin_lock_key after static keys get initialized
block, locking/lockdep: Assign a lock_class per gendisk used for wait_for_completion()
workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes
...
Diffstat (limited to 'drivers')
90 files changed, 229 insertions, 230 deletions
diff --git a/drivers/base/core.c b/drivers/base/core.c index 12ebd055724c..4b8ba2a75a4d 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -668,7 +668,7 @@ const char *dev_driver_string(const struct device *dev) * so be careful about accessing it. dev->bus and dev->class should * never change once they are set, so they don't need special care. */ - drv = ACCESS_ONCE(dev->driver); + drv = READ_ONCE(dev->driver); return drv ? drv->name : (dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "")); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 7bcf80fa9ada..41d7c2b99f69 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -134,11 +134,11 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev) if (!dev->power.use_autosuspend) goto out; - autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); + autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); if (autosuspend_delay < 0) goto out; - last_busy = ACCESS_ONCE(dev->power.last_busy); + last_busy = READ_ONCE(dev->power.last_busy); elapsed = jiffies - last_busy; if (elapsed < 0) goto out; /* jiffies has wrapped around. */ diff --git a/drivers/char/random.c b/drivers/char/random.c index 8ad92707e45f..6c7ccac2679e 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -641,7 +641,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) return; retry: - entropy_count = orig = ACCESS_ONCE(r->entropy_count); + entropy_count = orig = READ_ONCE(r->entropy_count); if (nfrac < 0) { /* Debit */ entropy_count += nfrac; @@ -1265,7 +1265,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, /* Can we pull enough? */ retry: - entropy_count = orig = ACCESS_ONCE(r->entropy_count); + entropy_count = orig = READ_ONCE(r->entropy_count); ibytes = nbytes; /* never pull more than available */ have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index 39e489a96ad7..60da2537bef9 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c @@ -71,7 +71,7 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id) if (readl_relaxed(timer->control) & timer->match_mask) { writel_relaxed(timer->match_mask, timer->control); - event_handler = ACCESS_ONCE(timer->evt.event_handler); + event_handler = READ_ONCE(timer->evt.event_handler); if (event_handler) event_handler(&timer->evt); return IRQ_HANDLED; diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index d258953ff488..f4f258075b89 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -172,7 +172,7 @@ static void caam_jr_dequeue(unsigned long devarg) while (rd_reg32(&jrp->rregs->outring_used)) { - head = ACCESS_ONCE(jrp->head); + head = READ_ONCE(jrp->head); spin_lock(&jrp->outlock); @@ -341,7 +341,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, spin_lock_bh(&jrp->inplock); head = jrp->head; - tail = ACCESS_ONCE(jrp->tail); + tail = READ_ONCE(jrp->tail); if (!rd_reg32(&jrp->rregs->inpring_avail) || CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 874ddf5e9087..0f20f5ec9617 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -193,7 +193,7 @@ static int wait_for_csb(struct nx842_workmem *wmem, ktime_t start = wmem->start, now = ktime_get(); ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); - while (!(ACCESS_ONCE(csb->flags) & CSB_V)) { + while (!(READ_ONCE(csb->flags) & CSB_V)) { cpu_relax(); now = ktime_get(); if (ktime_after(now, timeout)) diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 346c4987b284..11d6419788c2 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -175,11 +175,11 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file, /* * To trigger the error, we need to read the data back * (the data was written with errors above). - * The ACCESS_ONCE macros and printk are used to prevent the + * The READ_ONCE macros and printk are used to prevent the * the compiler optimizing these reads out. */ - reg = ACCESS_ONCE(ptemp[0]); - read_reg = ACCESS_ONCE(ptemp[1]); + reg = READ_ONCE(ptemp[0]); + read_reg = READ_ONCE(ptemp[1]); /* Force Read */ rmb(); @@ -618,7 +618,7 @@ static ssize_t altr_edac_device_trig(struct file *file, for (i = 0; i < (priv->trig_alloc_sz / sizeof(*ptemp)); i++) { /* Read data so we're in the correct state */ rmb(); - if (ACCESS_ONCE(ptemp[i])) + if (READ_ONCE(ptemp[i])) result = -1; /* Toggle Error bit (it is latched), leave ECC enabled */ writel(error_mask, (drvdata->base + priv->set_err_ofst)); @@ -635,7 +635,7 @@ static ssize_t altr_edac_device_trig(struct file *file, /* Read out written data. ECC error caused here */ for (i = 0; i < ALTR_TRIGGER_READ_WRD_CNT; i++) - if (ACCESS_ONCE(ptemp[i]) != i) + if (READ_ONCE(ptemp[i]) != i) edac_printk(KERN_ERR, EDAC_DEVICE, "Read doesn't match written data\n"); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 8bf89267dc25..ccf52368a073 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -734,7 +734,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, __le16 res_count, next_res_count; i = ar_first_buffer_index(ctx); - res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); + res_count = READ_ONCE(ctx->descriptors[i].res_count); /* A buffer that is not yet completely filled must be the last one. */ while (i != last && res_count == 0) { @@ -742,8 +742,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, /* Peek at the next descriptor. */ next_i = ar_next_buffer_index(i); rmb(); /* read descriptors in order */ - next_res_count = ACCESS_ONCE( - ctx->descriptors[next_i].res_count); + next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); /* * If the next descriptor is still empty, we must stop at this * descriptor. @@ -759,8 +758,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { next_i = ar_next_buffer_index(next_i); rmb(); - next_res_count = ACCESS_ONCE( - ctx->descriptors[next_i].res_count); + next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); if (next_res_count != cpu_to_le16(PAGE_SIZE)) goto next_buffer_is_active; } @@ -2812,7 +2810,7 @@ static int handle_ir_buffer_fill(struct context *context, u32 buffer_dma; req_count = le16_to_cpu(last->req_count); - res_count = le16_to_cpu(ACCESS_ONCE(last->res_count)); + res_count = le16_to_cpu(READ_ONCE(last->res_count)); completed = req_count - res_count; buffer_dma = le32_to_cpu(last->data_address); diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c index a01461d63f68..00de793e6423 100644 --- a/drivers/firmware/tegra/ivc.c +++ b/drivers/firmware/tegra/ivc.c @@ -99,11 +99,11 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, { /* * This function performs multiple checks on the same values with - * security implications, so create snapshots with ACCESS_ONCE() to + * security implications, so create snapshots with READ_ONCE() to * ensure that these checks use the same values. */ - u32 tx = ACCESS_ONCE(header->tx.count); - u32 rx = ACCESS_ONCE(header->rx.count); + u32 tx = READ_ONCE(header->tx.count); + u32 rx = READ_ONCE(header->rx.count); /* * Perform an over-full check to prevent denial of service attacks @@ -124,8 +124,8 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, static inline bool tegra_ivc_full(struct tegra_ivc *ivc, struct tegra_ivc_header *header) { - u32 tx = ACCESS_ONCE(header->tx.count); - u32 rx = ACCESS_ONCE(header->rx.count); + u32 tx = READ_ONCE(header->tx.count); + u32 rx = READ_ONCE(header->rx.count); /* * Invalid cases where the counters indicate that the queue is over @@ -137,8 +137,8 @@ static inline bool tegra_ivc_full(struct tegra_ivc *ivc, static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, struct tegra_ivc_header *header) { - u32 tx = ACCESS_ONCE(header->tx.count); - u32 rx = ACCESS_ONCE(header->rx.count); + u32 tx = READ_ONCE(header->tx.count); + u32 rx = READ_ONCE(header->rx.count); /* * This function isn't expected to be used in scenarios where an @@ -151,8 +151,8 @@ static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc) { - ACCESS_ONCE(ivc->tx.channel->tx.count) = - ACCESS_ONCE(ivc->tx.channel->tx.count) + 1; + WRITE_ONCE(ivc->tx.channel->tx.count, + READ_ONCE(ivc->tx.channel->tx.count) + 1); if (ivc->tx.position == ivc->num_frames - 1) ivc->tx.position = 0; @@ -162,8 +162,8 @@ static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc) static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc) { - ACCESS_ONCE(ivc->rx.channel->rx.count) = - ACCESS_ONCE(ivc->rx.channel->rx.count) + 1; + WRITE_ONCE(ivc->rx.channel->rx.count, + READ_ONCE(ivc->rx.channel->rx.count) + 1); if (ivc->rx.position == ivc->num_frames - 1) ivc->rx.position = 0; @@ -428,7 +428,7 @@ int tegra_ivc_notified(struct tegra_ivc *ivc) /* Copy the receiver's state out of shared memory. */ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); - state = ACCESS_ONCE(ivc->rx.channel->tx.state); + state = READ_ONCE(ivc->rx.channel->tx.state); if (state == TEGRA_IVC_STATE_SYNC) { offset = offsetof(struct tegra_ivc_header, tx.count); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 333bad749067..303b5e099a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg) */ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) { - uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); + uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); struct dma_fence *fence, **ptr; int r; @@ -300,7 +300,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) amdgpu_fence_process(ring); emitted = 0x100000000ull; emitted -= atomic_read(&ring->fence_drv.last_seq); - emitted += ACCESS_ONCE(ring->fence_drv.sync_seq); + emitted += READ_ONCE(ring->fence_drv.sync_seq); return lower_32_bits(emitted); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7171968f261e..6149a47fe63d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -788,11 +788,11 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) seq_printf(m, "\t0x%08x: %12ld byte %s", id, amdgpu_bo_size(bo), placement); - offset = ACCESS_ONCE(bo->tbo.mem.start); + offset = READ_ONCE(bo->tbo.mem.start); if (offset != AMDGPU_BO_INVALID_OFFSET) seq_printf(m, " @ 0x%010Lx", offset); - pin_count = ACCESS_ONCE(bo->pin_count); + pin_count = READ_ONCE(bo->pin_count); if (pin_count) seq_printf(m, " pin count %d", pin_count); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 38cea6fb25a8..a25f6c72f219 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -187,7 +187,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) if (kfifo_is_empty(&entity->job_queue)) return false; - if (ACCESS_ONCE(entity->dependency)) + if (READ_ONCE(entity->dependency)) return false; return true; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3386452bd2f0..cf3deb283da5 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -451,7 +451,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, else r = 0; - cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); + cur_placement = READ_ONCE(robj->tbo.mem.mem_type); args->domain = radeon_mem_type_to_domain(cur_placement); drm_gem_object_put_unlocked(gobj); return r; @@ -481,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, r = ret; /* Flush HDP cache via MMIO if necessary */ - cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); + cur_placement = READ_ONCE(robj->tbo.mem.mem_type); if (rdev->asic->mmio_hdp_flush && radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) robj->rdev->asic->mmio_hdp_flush(rdev); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index a552e4ea5440..6ac094ee8983 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -904,7 +904,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, if (unlikely(drm_is_render_client(file_priv))) require_exist = true; - if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { + if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) { DRM_ERROR("Locked master refused legacy " "surface reference.\n"); return -EACCES; diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index d9a1e9893136..97bea2e1aa6a 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -380,7 +380,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, if (sc->flags & SCF_FROZEN) { wait_event_interruptible_timeout( dd->event_queue, - !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN), + !(READ_ONCE(dd->flags) & HFI1_FROZEN), msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); if (dd->flags & HFI1_FROZEN) return -ENOLCK; diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 7108a4b5e94c..75e740780285 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -1423,14 +1423,14 @@ retry: goto done; } /* copy from receiver cache line and recalculate */ - sc->alloc_free = ACCESS_ONCE(sc->free); + sc->alloc_free = READ_ONCE(sc->free); avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free); if (blocks > avail) { /* still no room, actively update */ sc_release_update(sc); - sc->alloc_free = ACCESS_ONCE(sc->free); + sc->alloc_free = READ_ONCE(sc->free); trycount++; goto retry; } @@ -1667,7 +1667,7 @@ void sc_release_update(struct send_context *sc) /* call sent buffer callbacks */ code = -1; /* code not yet set */ - head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */ + head = READ_ONCE(sc->sr_head); /* snapshot the head */ tail = sc->sr_tail; while (head != tail) { pbuf = &sc->sr[tail].pbuf; diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index b3291f0fde9a..a7fc664f0d4e 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c @@ -363,7 +363,7 @@ static void ruc_loopback(struct rvt_qp *sqp) again: smp_read_barrier_depends(); /* see post_one_send() */ - if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) + if (sqp->s_last == READ_ONCE(sqp->s_head)) goto clr_busy; wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 6781bcdb10b3..08346d25441c 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -1725,7 +1725,7 @@ retry: swhead = sde->descq_head & sde->sdma_mask; /* this code is really bad for cache line trading */ - swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; + swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; cnt = sde->descq_cnt; if (swhead < swtail) @@ -1872,7 +1872,7 @@ retry: if ((status & sde->idle_mask) && !idle_check_done) { u16 swtail; - swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; + swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; if (swtail != hwhead) { hwhead = (u16)read_sde_csr(sde, SD(HEAD)); idle_check_done = 1; @@ -2222,7 +2222,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) u16 len; head = sde->descq_head & sde->sdma_mask; - tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; + tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; seq_printf(s, SDE_FMT, sde->this_idx, sde->cpu, sdma_state_name(sde->state.current_state), @@ -3305,7 +3305,7 @@ int sdma_ahg_alloc(struct sdma_engine *sde) return -EINVAL; } while (1) { - nr = ffz(ACCESS_ONCE(sde->ahg_bits)); + nr = ffz(READ_ONCE(sde->ahg_bits)); if (nr > 31) { trace_hfi1_ahg_allocate(sde, -ENOSPC); return -ENOSPC; diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h index 107011d8613b..374c59784950 100644 --- a/drivers/infiniband/hw/hfi1/sdma.h +++ b/drivers/infiniband/hw/hfi1/sdma.h @@ -445,7 +445,7 @@ static inline u16 sdma_descq_freecnt(struct sdma_engine *sde) { return sde->descq_cnt - (sde->descq_tail - - ACCESS_ONCE(sde->descq_head)) - 1; + READ_ONCE(sde->descq_head)) - 1; } static inline u16 sdma_descq_inprocess(struct sdma_engine *sde) diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index 0b646173ca22..9a31c585427f 100644 --- a/drivers/infiniband/hw/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c @@ -80,7 +80,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) goto bail; /* We are in the error state, flush the work request. */ smp_read_barrier_depends(); /* see post_one_send() */ - if (qp->s_last == ACCESS_ONCE(qp->s_head)) + if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (iowait_sdma_pending(&priv->s_iowait)) { @@ -121,7 +121,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) goto bail; /* Check if send work queue is empty. */ smp_read_barrier_depends(); /* see post_one_send() */ - if (qp->s_cur == ACCESS_ONCE(qp->s_head)) { + if (qp->s_cur == READ_ONCE(qp->s_head)) { clear_ahg(qp); goto bail; } diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 2ba74fdd6f15..7fec6b984e3e 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -487,7 +487,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) goto bail; /* We are in the error state, flush the work request. */ smp_read_barrier_depends(); /* see post_one_send */ - if (qp->s_last == ACCESS_ONCE(qp->s_head)) + if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (iowait_sdma_pending(&priv->s_iowait)) { @@ -501,7 +501,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) /* see post_one_send() */ smp_read_barrier_depends(); - if (qp->s_cur == ACCESS_ONCE(qp->s_head)) + if (qp->s_cur == READ_ONCE(qp->s_head)) goto bail; wqe = rvt_get_swqe_ptr(qp, qp->s_cur); diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index c0c0e0445cbf..8ec6e8a8d6f7 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -276,7 +276,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, /* Wait until all requests have been freed. */ wait_event_interruptible( pq->wait, - (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); + (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); kfree(pq->reqs); kfree(pq->req_in_use); kmem_cache_destroy(pq->txreq_cache); @@ -591,7 +591,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, if (ret != -EBUSY) { req->status = ret; WRITE_ONCE(req->has_error, 1); - if (ACCESS_ONCE(req->seqcomp) == + if (READ_ONCE(req->seqcomp) == req->seqsubmitted - 1) goto free_req; return ret; @@ -825,7 +825,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) */ if (req->data_len) { iovec = &req->iovs[req->iov_idx]; - if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) { + if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { if (++req->iov_idx == req->data_iovs) { ret = -EFAULT; goto free_txreq; @@ -1390,7 +1390,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) } else { if (status != SDMA_TXREQ_S_OK) req->status = status; - if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) && + if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) && (READ_ONCE(req->done) || READ_ONCE(req->has_error))) { user_sdma_free_request(req, false); diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 53efbb0b40c4..9a37e844d4c8 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -368,7 +368,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) again: smp_read_barrier_depends(); /* see post_one_send() */ - if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) + if (sqp->s_last == READ_ONCE(sqp->s_head)) goto clr_busy; wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 498e2202e72c..bddcc37ace44 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -61,7 +61,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) goto bail; /* We are in the error state, flush the work request. */ smp_read_barrier_depends(); /* see post_one_send() */ - if (qp->s_last == ACCESS_ONCE(qp->s_head)) + if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_dma_busy)) { @@ -91,7 +91,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) goto bail; /* Check if send work queue is empty. */ smp_read_barrier_depends(); /* see post_one_send() */ - if (qp->s_cur == ACCESS_ONCE(qp->s_head)) + if (qp->s_cur == READ_ONCE(qp->s_head)) goto bail; /* * Start a new request. diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index be4907453ac4..15962ed193ce 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -253,7 +253,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) goto bail; /* We are in the error state, flush the work request. */ smp_read_barrier_depends(); /* see post_one_send */ - if (qp->s_last == ACCESS_ONCE(qp->s_head)) + if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_dma_busy)) { @@ -267,7 +267,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) /* see post_one_send() */ smp_read_barrier_depends(); - if (qp->s_cur == ACCESS_ONCE(qp->s_head)) + if (qp->s_cur == READ_ONCE(qp->s_head)) goto bail; wqe = rvt_get_swqe_ptr(qp, qp->s_cur); diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 22df09ae809e..b670cb9d2006 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -1073,7 +1073,7 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) rdi->driver_f.notify_error_qp(qp); /* Schedule the sending tasklet to drain the send work queue. */ - if (ACCESS_ONCE(qp->s_last) != qp->s_head) + if (READ_ONCE(qp->s_last) != qp->s_head) rdi->driver_f.schedule_send(qp); rvt_clear_mr_refs(qp, 0); @@ -1686,7 +1686,7 @@ static inline int rvt_qp_is_avail( if (likely(qp->s_avail)) return 0; smp_read_barrier_depends(); /* see rc.c */ - slast = ACCESS_ONCE(qp->s_last); + slast = READ_ONCE(qp->s_last); if (qp->s_head >= slast) avail = qp->s_size - (qp->s_head - slast); else @@ -1917,7 +1917,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, * ahead and kick the send engine into gear. Otherwise we will always * just schedule the send to happen later. */ - call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next; + call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next; for (; wr; wr = wr->next) { err = rvt_post_one_wr(qp, wr, &call_send); diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c index 2e8f801932be..a1db1e5040dc 100644 --- a/drivers/input/misc/regulator-haptic.c +++ b/drivers/input/misc/regulator-haptic.c @@ -233,7 +233,7 @@ static int __maybe_unused regulator_haptic_resume(struct device *dev) haptic->suspended = false; - magnitude = ACCESS_ONCE(haptic->magnitude); + magnitude = READ_ONCE(haptic->magnitude); if (magnitude) regulator_haptic_set_voltage(haptic, magnitude); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index d216a8f7bc22..33bb074d6941 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -347,7 +347,7 @@ static void __cache_size_refresh(void) BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); BUG_ON(dm_bufio_client_count < 0); - dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size); + dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size); /* * Use default if set to 0 and report the actual cache size used. @@ -960,7 +960,7 @@ static void __get_memory_limit(struct dm_bufio_client *c, { unsigned long buffers; - if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) { + if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) { if (mutex_trylock(&dm_bufio_clients_lock)) { __cache_size_refresh(); mutex_unlock(&dm_bufio_clients_lock); @@ -1600,7 +1600,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) static unsigned long get_retain_buffers(struct dm_bufio_client *c) { - unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); + unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes); return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); } @@ -1647,7 +1647,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); - return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]); + return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]); } /* @@ -1818,7 +1818,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); static unsigned get_max_age_hz(void) { - unsigned max_age = ACCESS_ONCE(dm_bufio_max_age); + unsigned max_age = READ_ONCE(dm_bufio_max_age); if (max_age > UINT_MAX / HZ) max_age = UINT_MAX / HZ; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 096fe9b66c50..8c5756e1df94 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -6,6 +6,7 @@ * This file is released under the GPL. */ +#include <linux/compiler.h> #include <linux/module.h> #include <linux/device-mapper.h> #include <linux/dm-io.h> @@ -80,13 +81,13 @@ struct journal_entry { #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block]) #if BITS_PER_LONG == 64 -#define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0) +#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0) #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) #elif defined(CONFIG_LBDAF) -#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0) +#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0) #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) #else -#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0) +#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0) #define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo) #endif #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1)) @@ -320,7 +321,7 @@ static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, in static int dm_integrity_failed(struct dm_integrity_c *ic) { - return ACCESS_ONCE(ic->failed); + return READ_ONCE(ic->failed); } static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, @@ -1545,7 +1546,7 @@ retry_kmap: smp_mb(); if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) wake_up(&ic->copy_to_journal_wait); - if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) { + if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) { queue_work(ic->commit_wq, &ic->commit_work); } else { schedule_autocommit(ic); @@ -1798,7 +1799,7 @@ static void integrity_commit(struct work_struct *w) ic->n_committed_sections += commit_sections; spin_unlock_irq(&ic->endio_wait.lock); - if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) + if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) queue_work(ic->writer_wq, &ic->writer_work); release_flush_bios: @@ -1980,7 +1981,7 @@ static void integrity_writer(struct work_struct *w) unsigned prev_free_sectors; /* the following test is not needed, but it tests the replay code */ - if (ACCESS_ONCE(ic->suspending)) + if (READ_ONCE(ic->suspending)) return; spin_lock_irq(&ic->endio_wait.lock); diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index cf2c67e35eaf..eb45cc3df31d 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -107,7 +107,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t) try_again: spin_lock_irq(&throttle_spinlock); - throttle = ACCESS_ONCE(t->throttle); + throttle = READ_ONCE(t->throttle); if (likely(throttle >= 100)) goto skip_limit; @@ -157,7 +157,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t) t->num_io_jobs--; - if (likely(ACCESS_ONCE(t->throttle) >= 100)) + if (likely(READ_ONCE(t->throttle) >= 100)) goto skip_limit; if (!t->num_io_jobs) { diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 11f273d2f018..3f88c9d32f7e 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m, pgpath = path_to_pgpath(path); - if (unlikely(lockless_dereference(m->current_pg) != pg)) { + if (unlikely(READ_ONCE(m->current_pg) != pg)) { /* Only update current_pgpath if pg changed */ spin_lock_irqsave(&m->lock, flags); m->current_pgpath = pgpath; @@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) } /* Were we instructed to switch PG? */ - if (lockless_dereference(m->next_pg)) { + if (READ_ONCE(m->next_pg)) { spin_lock_irqsave(&m->lock, flags); pg = m->next_pg; if (!pg) { @@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) /* Don't change PG until it has no remaining paths */ check_current_pg: - pg = lockless_dereference(m->current_pg); + pg = READ_ONCE(m->current_pg); if (pg) { pgpath = choose_path_in_pg(m, pg, nr_bytes); if (!IS_ERR_OR_NULL(pgpath)) @@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, struct request *clone; /* Do we need to select a new pgpath? */ - pgpath = lockless_dereference(m->current_pgpath); + pgpath = READ_ONCE(m->current_pgpath); if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) pgpath = choose_pgpath(m, nr_bytes); @@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m bool queue_io; /* Do we need to select a new pgpath? */ - pgpath = lockless_dereference(m->current_pgpath); + pgpath = READ_ONCE(m->current_pgpath); queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); if (!pgpath || !queue_io) pgpath = choose_pgpath(m, nr_bytes); @@ -1804,7 +1804,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, struct pgpath *current_pgpath; int r; - current_pgpath = lockless_dereference(m->current_pgpath); + current_pgpath = READ_ONCE(m->current_pgpath); if (!current_pgpath) current_pgpath = choose_pgpath(m, 0); @@ -1826,7 +1826,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, } if (r == -ENOTCONN) { - if (!lockless_dereference(m->current_pg)) { + if (!READ_ONCE(m->current_pg)) { /* Path status changed, redo selection */ (void) choose_pgpath(m, 0); } @@ -1895,9 +1895,9 @@ static int multipath_busy(struct dm_target *ti) return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED); /* Guess which priority_group will be used at next mapping time */ - pg = lockless_dereference(m->current_pg); - next_pg = lockless_dereference(m->next_pg); - if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg)) + pg = READ_ONCE(m->current_pg); + next_pg = READ_ONCE(m->next_pg); + if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg)) pg = next_pg; if (!pg) { diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index a7868503d135..29bc51084c82 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -432,7 +432,7 @@ do_sync_free: synchronize_rcu_expedited(); dm_stat_free(&s->rcu_head); } else { - ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1; + WRITE_ONCE(dm_stat_need_rcu_barrier, 1); call_rcu(&s->rcu_head, dm_stat_free); } return 0; @@ -640,12 +640,12 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, */ last = raw_cpu_ptr(stats->last); stats_aux->merged = - (bi_sector == (ACCESS_ONCE(last->last_sector) && + (bi_sector == (READ_ONCE(last->last_sector) && ((bi_rw == WRITE) == - (ACCESS_ONCE(last->last_rw) == WRITE)) + (READ_ONCE(last->last_rw) == WRITE)) )); - ACCESS_ONCE(last->last_sector) = end_sector; - ACCESS_ONCE(last->last_rw) = bi_rw; + WRITE_ONCE(last->last_sector, end_sector); + WRITE_ONCE(last->last_rw, bi_rw); } rcu_read_lock(); @@ -694,22 +694,22 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared for_each_possible_cpu(cpu) { p = &s->stat_percpu[cpu][x]; - shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]); - shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]); - shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]); - shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]); - shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]); - shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]); - shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]); - shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]); - shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]); - shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]); - shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total); - shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue); + shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]); + shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]); + shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]); + shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]); + shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]); + shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]); + shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]); + shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]); + shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]); + shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]); + shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total); + shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue); if (s->n_histogram_entries) { unsigned i; for (i = 0; i < s->n_histogram_entries + 1; i++) - shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]); + shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]); } } } diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index 4c8de1ff78ca..8d0ba879777e 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -144,7 +144,7 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long switch_get_position(sctx, region_nr, ®ion_index, &bit); - return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) & + return (READ_ONCE(sctx->region_table[region_index]) >> bit) & ((1 << sctx->region_table_entry_bits) - 1); } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 1e25705209c2..89e5dff9b4cf 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2431,7 +2431,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) struct pool_c *pt = pool->ti->private; bool needs_check = dm_pool_metadata_needs_check(pool->pmd); enum pool_mode old_mode = get_pool_mode(pool); - unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ; + unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ; /* * Never allow the pool to transition to PM_WRITE mode if user diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index bda3caca23ca..fba93237a780 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -589,7 +589,7 @@ static void verity_prefetch_io(struct work_struct *work) verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL); verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL); if (!i) { - unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); + unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster); cluster >>= v->data_dev_block_bits; if (unlikely(!cluster)) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4be85324f44d..8aaffa19b29a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -114,7 +114,7 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; static int __dm_get_module_param_int(int *module_param, int min, int max) { - int param = ACCESS_ONCE(*module_param); + int param = READ_ONCE(*module_param); int modified_param = 0; bool modified = true; @@ -136,7 +136,7 @@ static int __dm_get_module_param_int(int *module_param, int min, int max) unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max) { - unsigned param = ACCESS_ONCE(*module_param); + unsigned param = READ_ONCE(*module_param); unsigned modified_param = 0; if (!param) diff --git a/drivers/md/md.c b/drivers/md/md.c index 0ff1bbf6c90e..447ddcbc9566 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2651,7 +2651,7 @@ state_show(struct md_rdev *rdev, char *page) { char *sep = ","; size_t len = 0; - unsigned long flags = ACCESS_ONCE(rdev->flags); + unsigned long flags = READ_ONCE(rdev->flags); if (test_bit(Faulty, &flags) || (!test_bit(ExternalBbl, &flags) && diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 928e24a07133..7d9a50eed9db 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6072,7 +6072,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n */ rcu_read_lock(); for (i = 0; i < conf->raid_disks; i++) { - struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); + struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev); if (rdev == NULL || test_bit(Faulty, &rdev->flags)) still_degraded = 1; diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c index 2322af1b8742..53011629c9ad 100644 --- a/drivers/media/dvb-core/dvb_ringbuffer.c +++ b/drivers/media/dvb-core/dvb_ringbuffer.c @@ -66,12 +66,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf) { ssize_t free; - /* ACCESS_ONCE() to load read pointer on writer side + /* READ_ONCE() to load read pointer on writer side * this pairs with smp_store_release() in dvb_ringbuffer_read(), * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(), * or dvb_ringbuffer_reset() */ - free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite; + free = READ_ONCE(rbuf->pread) - rbuf->pwrite; if (free <= 0) free += rbuf->size; return free-1; @@ -143,7 +143,7 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si todo -= split; /* smp_store_release() for read pointer update to ensure * that buf is not overwritten until read is complete, - * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() + * this pairs with READ_ONCE() in dvb_ringbuffer_free() */ smp_store_release(&rbuf->pread, 0); } @@ -168,7 +168,7 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len) todo -= split; /* smp_store_release() for read pointer update to ensure * that buf is not overwritten until read is complete, - * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() + * this pairs with READ_ONCE() in dvb_ringbuffer_free() */ smp_store_release(&rbuf->pread, 0); } diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c index 637cc4686742..b665757ca89a 100644 --- a/drivers/misc/mic/scif/scif_rb.c +++ b/drivers/misc/mic/scif/scif_rb.c @@ -138,7 +138,7 @@ void scif_rb_commit(struct scif_rb *rb) * the read barrier in scif_rb_count(..) */ wmb(); - ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; + WRITE_ONCE(*rb->write_ptr, rb->current_write_offset); #ifdef CONFIG_INTEL_MIC_CARD /* * X100 Si bug: For the case where a Core is performing an EXT_WR @@ -147,7 +147,7 @@ void scif_rb_commit(struct scif_rb *rb) * This way, if ordering is violated for the Interrupt Message, it will * fall just behind the first Posted associated with the first EXT_WR. */ - ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; + WRITE_ONCE(*rb->write_ptr, rb->current_write_offset); #endif } @@ -210,7 +210,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb) * scif_rb_space(..) */ mb(); - ACCESS_ONCE(*rb->read_ptr) = new_offset; + WRITE_ONCE(*rb->read_ptr, new_offset); #ifdef CONFIG_INTEL_MIC_CARD /* * X100 Si Bug: For the case where a Core is performing an EXT_WR @@ -219,7 +219,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb) * This way, if ordering is violated for the Interrupt Message, it will * fall just behind the first Posted associated with the first EXT_WR. */ - ACCESS_ONCE(*rb->read_ptr) = new_offset; + WRITE_ONCE(*rb->read_ptr, new_offset); #endif } diff --git a/drivers/misc/mic/scif/scif_rma_list.c b/drivers/misc/mic/scif/scif_rma_list.c index e1ef8daedd5a..a036dbb4101e 100644 --- a/drivers/misc/mic/scif/scif_rma_list.c +++ b/drivers/misc/mic/scif/scif_rma_list.c @@ -277,7 +277,7 @@ retry: * Need to restart list traversal if there has been * an asynchronous list entry deletion. */ - if (ACCESS_ONCE(ep->rma_info.async_list_del)) + if (READ_ONCE(ep->rma_info.async_list_del)) goto retry; } mutex_unlock(&ep->rma_info.rma_lock); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index c02cc817a490..1ed9529e7bd1 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1378,7 +1378,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) unsigned int count; slaves = rcu_dereference(bond->slave_arr); - count = slaves ? ACCESS_ONCE(slaves->count) : 0; + count = slaves ? READ_ONCE(slaves->count) : 0; if (likely(count)) tx_slave = slaves->arr[hash_index % count]; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b2db581131b2..08a4f57cf409 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1167,7 +1167,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) slave = bond_slave_get_rcu(skb->dev); bond = slave->bond; - recv_probe = ACCESS_ONCE(bond->recv_probe); + recv_probe = READ_ONCE(bond->recv_probe); if (recv_probe) { ret = recv_probe(skb, bond, slave); if (ret == RX_HANDLER_CONSUMED) { @@ -3811,7 +3811,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev else bond_xmit_slave_id(bond, skb, 0); } else { - int slave_cnt = ACCESS_ONCE(bond->slave_cnt); + int slave_cnt = READ_ONCE(bond->slave_cnt); if (likely(slave_cnt)) { slave_id = bond_rr_gen_slave_id(bond); @@ -3973,7 +3973,7 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int count; slaves = rcu_dereference(bond->slave_arr); - count = slaves ? ACCESS_ONCE(slaves->count) : 0; + count = slaves ? READ_ONCE(slaves->count) : 0; if (likely(count)) { slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; bond_dev_queue_xmit(bond, skb, slave->dev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 4ef68f69b58c..43f52a8fe708 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -405,7 +405,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q, */ static inline int reclaimable(const struct sge_txq *q) { - int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); + int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); hw_cidx -= q->cidx; return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; } @@ -1375,7 +1375,7 @@ out_free: dev_kfree_skb_any(skb); */ static inline void reclaim_completed_tx_imm(struct sge_txq *q) { - int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); + int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); int reclaim = hw_cidx - q->cidx; if (reclaim < 0) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0e3d9f39a807..c6e859a27ee6 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -605,7 +605,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val) if (wrapped) newacc += 65536; - ACCESS_ONCE(*acc) = newacc; + WRITE_ONCE(*acc, newacc); } static void populate_erx_stats(struct be_adapter *adapter, diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 0cec06bec63e..340e28211135 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -373,7 +373,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force) unsigned int count; smp_rmb(); - count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail); + count = tx_count(READ_ONCE(priv->tx_head), tx_tail); if (count == 0) goto out; @@ -431,7 +431,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) dma_addr_t phys; smp_rmb(); - count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail)); + count = tx_count(tx_head, READ_ONCE(priv->tx_tail)); if (count == (TX_DESC_NUM - 1)) { netif_stop_queue(ndev); return NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 8f326f87a815..2cb9539c931e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -264,7 +264,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) vsi->rx_buf_failed, vsi->rx_page_failed); rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); + struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]); if (!rx_ring) continue; @@ -320,7 +320,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed"); } for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); if (!tx_ring) continue; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 05e89864f781..e9e04a485e0a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1570,7 +1570,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, } rcu_read_lock(); for (j = 0; j < vsi->num_queue_pairs; j++) { - tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); + tx_ring = READ_ONCE(vsi->tx_rings[j]); if (!tx_ring) continue; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6498da8806cb..de1fcac7834d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -455,7 +455,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, u64 bytes, packets; unsigned int start; - tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + tx_ring = READ_ONCE(vsi->tx_rings[i]); if (!tx_ring) continue; i40e_get_netdev_stats_struct_tx(tx_ring, stats); @@ -791,7 +791,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rcu_read_lock(); for (q = 0; q < vsi->num_queue_pairs; q++) { /* locate Tx ring */ - p = ACCESS_ONCE(vsi->tx_rings[q]); + p = READ_ONCE(vsi->tx_rings[q]); do { start = u64_stats_fetch_begin_irq(&p->syncp); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index d8456c381c99..97381238eb7c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -130,7 +130,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) } smp_mb(); /* Force any pending update before accessing. */ - adj = ACCESS_ONCE(pf->ptp_base_adj); + adj = READ_ONCE(pf->ptp_base_adj); freq = adj; freq *= ppb; @@ -499,7 +499,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); /* Update the base adjustement value. */ - ACCESS_ONCE(pf->ptp_base_adj) = incval; + WRITE_ONCE(pf->ptp_base_adj, incval); smp_mb(); /* Force the above update. */ } diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 58adbf234e07..31a3f09df9f7 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -375,7 +375,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg); /* write operations, indexed using DWORDS */ #define wr32(reg, val) \ do { \ - u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ if (!E1000_REMOVED(hw_addr)) \ writel((val), &hw_addr[(reg)]); \ } while (0) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ea69af267d63..18b6c25d4705 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -750,7 +750,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) u32 igb_rd32(struct e1000_hw *hw, u32 reg) { struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); - u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); u32 value = 0; if (E1000_REMOVED(hw_addr)) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index e083732adf64..a01409e2e06c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -161,7 +161,7 @@ static inline bool ixgbe_removed(void __iomem *addr) static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); if (ixgbe_removed(reg_addr)) return; @@ -180,7 +180,7 @@ static inline void writeq(u64 val, void __iomem *addr) static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); if (ixgbe_removed(reg_addr)) return; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6d5f31e94358..935a2f15b0b0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -380,7 +380,7 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) */ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); u32 value; if (ixgbe_removed(reg_addr)) @@ -8624,7 +8624,7 @@ static void ixgbe_get_stats64(struct net_device *netdev, rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); + struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); u64 bytes, packets; unsigned int start; @@ -8640,12 +8640,12 @@ static void ixgbe_get_stats64(struct net_device *netdev, } for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); + struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); ixgbe_get_ring_stats64(stats, ring); } for (i = 0; i < adapter->num_xdp_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]); + struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); ixgbe_get_ring_stats64(stats, ring); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 86d6924a2b71..ae312c45696a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -378,7 +378,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) } smp_mb(); - incval = ACCESS_ONCE(adapter->base_incval); + incval = READ_ONCE(adapter->base_incval); freq = incval; freq *= ppb; @@ -1159,7 +1159,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) } /* update the base incval used to calculate frequency adjustment */ - ACCESS_ONCE(adapter->base_incval) = incval; + WRITE_ONCE(adapter->base_incval, incval); smp_mb(); /* need lock to prevent incorrect read while modifying cyclecounter */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 032f8ac06357..cacb30682434 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -164,7 +164,7 @@ static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); u32 value; if (IXGBE_REMOVED(reg_addr)) diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 04d8d4ee4f04..c651fefcc3d2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -182,7 +182,7 @@ struct ixgbevf_info { static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); if (IXGBE_REMOVED(reg_addr)) return; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 8a32a8f7f9c0..3541a7f9d12e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -414,8 +414,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, index = cons_index & size_mask; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; - last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb); - ring_cons = ACCESS_ONCE(ring->cons); + last_nr_txbb = READ_ONCE(ring->last_nr_txbb); + ring_cons = READ_ONCE(ring->cons); ring_index = ring_cons & size_mask; stamp_index = ring_index; @@ -479,8 +479,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, wmb(); /* we want to dirty this cache line once */ - ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; - ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; + WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb); + WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped); if (cq->type == TX_XDP) return done < budget; @@ -858,7 +858,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop; /* fetch ring->cons far ahead before needing it to avoid stall */ - ring_cons = ACCESS_ONCE(ring->cons); + ring_cons = READ_ONCE(ring->cons); real_size = get_real_size(skb, shinfo, dev, &lso_header_size, &inline_ok, &fragptr); @@ -1066,7 +1066,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) */ smp_rmb(); - ring_cons = ACCESS_ONCE(ring->cons); + ring_cons = READ_ONCE(ring->cons); if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { netif_tx_wake_queue(ring->tx_queue); ring->wake_queue++; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 50ea69d88480..5dd5f61e1114 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -2629,7 +2629,7 @@ static void vxge_poll_vp_lockup(unsigned long data) ring = &vdev->vpaths[i].ring; /* Truncated to machine word size number of frames */ - rx_frms = ACCESS_ONCE(ring->stats.rx_frms); + rx_frms = READ_ONCE(ring->stats.rx_frms); /* Did this vpath received any packets */ if (ring->stats.prev_rx_frms == rx_frms) { diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 13f72f5b18d2..a95a46bcd339 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -2073,7 +2073,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); - if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { + if (likely(READ_ONCE(efx->irq_soft_enabled))) { /* Note test interrupts */ if (context->index == efx->irq_level) efx->last_irq_cpu = raw_smp_processor_id(); @@ -2088,7 +2088,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) { struct efx_nic *efx = dev_id; - bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); + bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); struct efx_channel *channel; efx_dword_t reg; u32 queues; @@ -3291,7 +3291,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, bool rx_cont; u16 flags = 0; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return 0; /* Basic packet information */ @@ -3428,7 +3428,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) unsigned int tx_ev_q_label; int tx_descs = 0; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return 0; if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) @@ -5316,7 +5316,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx) int i; for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { - if (ACCESS_ONCE(table->entry[i].spec) & + if (READ_ONCE(table->entry[i].spec) & EFX_EF10_FILTER_FLAG_AUTO_OLD) { rc = efx_ef10_filter_remove_internal(efx, 1U << EFX_FILTER_PRI_AUTO, i, true); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index b9cb697b2818..016616a63880 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2809,7 +2809,7 @@ static void efx_reset_work(struct work_struct *data) unsigned long pending; enum reset_type method; - pending = ACCESS_ONCE(efx->reset_pending); + pending = READ_ONCE(efx->reset_pending); method = fls(pending) - 1; if (method == RESET_TYPE_MC_BIST) @@ -2874,7 +2874,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) /* If we're not READY then just leave the flags set as the cue * to abort probing or reschedule the reset later. */ - if (ACCESS_ONCE(efx->state) != STATE_READY) + if (READ_ONCE(efx->state) != STATE_READY) return; /* efx_process_channel() will no longer read events once a diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 29614da91cbf..7263275fde4a 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2545,7 +2545,7 @@ static void ef4_reset_work(struct work_struct *data) unsigned long pending; enum reset_type method; - pending = ACCESS_ONCE(efx->reset_pending); + pending = READ_ONCE(efx->reset_pending); method = fls(pending) - 1; if ((method == RESET_TYPE_RECOVER_OR_DISABLE || @@ -2605,7 +2605,7 @@ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type) /* If we're not READY then just leave the flags set as the cue * to abort probing or reschedule the reset later. */ - if (ACCESS_ONCE(efx->state) != STATE_READY) + if (READ_ONCE(efx->state) != STATE_READY) return; queue_work(reset_workqueue, &efx->reset_work); diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index 93c713c1f627..cd8bb472d758 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c @@ -452,7 +452,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); - if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + if (!likely(READ_ONCE(efx->irq_soft_enabled))) return IRQ_HANDLED; /* Check to see if we have a serious error condition */ @@ -1372,7 +1372,7 @@ static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx) ef4_oword_t reg; int link_speed, isolate; - isolate = !!ACCESS_ONCE(efx->reset_pending); + isolate = !!READ_ONCE(efx->reset_pending); switch (link_state->speed) { case 10000: link_speed = 3; break; diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c index 05916c710d8c..494884f6af4a 100644 --- a/drivers/net/ethernet/sfc/falcon/farch.c +++ b/drivers/net/ethernet/sfc/falcon/farch.c @@ -834,7 +834,7 @@ ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event) struct ef4_nic *efx = channel->efx; int tx_packets = 0; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return 0; if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { @@ -990,7 +990,7 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event) struct ef4_rx_queue *rx_queue; struct ef4_nic *efx = channel->efx; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return; rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); @@ -1504,7 +1504,7 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx) irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id) { struct ef4_nic *efx = dev_id; - bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); + bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); ef4_oword_t *int_ker = efx->irq_status.addr; irqreturn_t result = IRQ_NONE; struct ef4_channel *channel; @@ -1596,7 +1596,7 @@ irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id) "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); - if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + if (!likely(READ_ONCE(efx->irq_soft_enabled))) return IRQ_HANDLED; /* Handle non-event-queue sources */ diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h index a4c4592f6023..54ca457cdb15 100644 --- a/drivers/net/ethernet/sfc/falcon/nic.h +++ b/drivers/net/ethernet/sfc/falcon/nic.h @@ -83,7 +83,7 @@ static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_ static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue, unsigned int write_count) { - unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); + unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); if (empty_read_count == 0) return false; @@ -464,11 +464,11 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx); static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel) { - return ACCESS_ONCE(channel->event_test_cpu); + return READ_ONCE(channel->event_test_cpu); } static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx) { - return ACCESS_ONCE(efx->last_irq_cpu); + return READ_ONCE(efx->last_irq_cpu); } /* Global Resources */ diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index 6a75f4140a4b..6486814e97dc 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -134,8 +134,8 @@ static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1) */ netif_tx_stop_queue(txq1->core_txq); smp_mb(); - txq1->old_read_count = ACCESS_ONCE(txq1->read_count); - txq2->old_read_count = ACCESS_ONCE(txq2->read_count); + txq1->old_read_count = READ_ONCE(txq1->read_count); + txq2->old_read_count = READ_ONCE(txq2->read_count); fill_level = max(txq1->insert_count - txq1->old_read_count, txq2->insert_count - txq2->old_read_count); @@ -524,7 +524,7 @@ void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index) /* Check whether the hardware queue is now empty */ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { - tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); + tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); if (tx_queue->read_count == tx_queue->old_write_count) { smp_mb(); tx_queue->empty_read_count = diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index ba45150f53c7..86454d25a405 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -827,7 +827,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) struct efx_nic *efx = channel->efx; int tx_packets = 0; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return 0; if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { @@ -979,7 +979,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) struct efx_rx_queue *rx_queue; struct efx_nic *efx = channel->efx; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return; rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); @@ -1520,7 +1520,7 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) { struct efx_nic *efx = dev_id; - bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); + bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); efx_oword_t *int_ker = efx->irq_status.addr; irqreturn_t result = IRQ_NONE; struct efx_channel *channel; @@ -1612,7 +1612,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); - if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + if (!likely(READ_ONCE(efx->irq_soft_enabled))) return IRQ_HANDLED; /* Handle non-event-queue sources */ diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 4d7fb8af880d..7b51b6371724 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -81,7 +81,7 @@ static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count) { - unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); + unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); if (empty_read_count == 0) return false; @@ -617,11 +617,11 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) { - return ACCESS_ONCE(channel->event_test_cpu); + return READ_ONCE(channel->event_test_cpu); } static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) { - return ACCESS_ONCE(efx->last_irq_cpu); + return READ_ONCE(efx->last_irq_cpu); } /* Global Resources */ diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 60cdb97f58e2..56c2db398def 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -658,7 +658,7 @@ static void efx_ptp_send_times(struct efx_nic *efx, /* Write host time for specified period or until MC is done */ while ((timespec64_compare(&now.ts_real, &limit) < 0) && - ACCESS_ONCE(*mc_running)) { + READ_ONCE(*mc_running)) { struct timespec64 update_time; unsigned int host_time; @@ -668,7 +668,7 @@ static void efx_ptp_send_times(struct efx_nic *efx, do { pps_get_ts(&now); } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && - ACCESS_ONCE(*mc_running)); + READ_ONCE(*mc_running)); /* Synchronise NIC with single word of time only */ host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | @@ -832,14 +832,14 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) ptp->start.dma_addr); /* Clear flag that signals MC ready */ - ACCESS_ONCE(*start) = 0; + WRITE_ONCE(*start, 0); rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, MC_CMD_PTP_IN_SYNCHRONIZE_LEN); EFX_WARN_ON_ONCE_PARANOID(rc); /* Wait for start from MCDI (or timeout) */ timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); - while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) { + while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) { udelay(20); /* Usually start MCDI execution quickly */ loops++; } @@ -849,7 +849,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) if (!time_before(jiffies, timeout)) ++ptp->sync_timeouts; - if (ACCESS_ONCE(*start)) + if (READ_ONCE(*start)) efx_ptp_send_times(efx, &last_time); /* Collect results */ diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 32bf1fecf864..efb66ea21f27 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -136,8 +136,8 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) */ netif_tx_stop_queue(txq1->core_txq); smp_mb(); - txq1->old_read_count = ACCESS_ONCE(txq1->read_count); - txq2->old_read_count = ACCESS_ONCE(txq2->read_count); + txq1->old_read_count = READ_ONCE(txq1->read_count); + txq2->old_read_count = READ_ONCE(txq2->read_count); fill_level = max(txq1->insert_count - txq1->old_read_count, txq2->insert_count - txq2->old_read_count); @@ -752,7 +752,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) /* Check whether the hardware queue is now empty */ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { - tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); + tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); if (tx_queue->read_count == tx_queue->old_write_count) { smp_mb(); tx_queue->empty_read_count = diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 6a4e8e1bbd90..8ab0fb6892d5 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6245,7 +6245,7 @@ static void niu_get_rx_stats(struct niu *np, pkts = dropped = errors = bytes = 0; - rx_rings = ACCESS_ONCE(np->rx_rings); + rx_rings = READ_ONCE(np->rx_rings); if (!rx_rings) goto no_rings; @@ -6276,7 +6276,7 @@ static void niu_get_tx_stats(struct niu *np, pkts = errors = bytes = 0; - tx_rings = ACCESS_ONCE(np->tx_rings); + tx_rings = READ_ONCE(np->tx_rings); if (!tx_rings) goto no_rings; diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 6c0c84c33e1f..b13890953ebb 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -257,7 +257,7 @@ static struct tap_queue *tap_get_queue(struct tap_dev *tap, * and validate that the result isn't NULL - in case we are * racing against queue removal. */ - int numvtaps = ACCESS_ONCE(tap->numvtaps); + int numvtaps = READ_ONCE(tap->numvtaps); __u32 rxq; if (!numvtaps) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 42bb820a56c9..c1685a6d7883 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -469,7 +469,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, u32 numqueues = 0; rcu_read_lock(); - numqueues = ACCESS_ONCE(tun->numqueues); + numqueues = READ_ONCE(tun->numqueues); txq = __skb_get_hash_symmetric(skb); if (txq) { @@ -864,7 +864,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) rcu_read_lock(); tfile = rcu_dereference(tun->tfiles[txq]); - numqueues = ACCESS_ONCE(tun->numqueues); + numqueues = READ_ONCE(tun->numqueues); /* Drop packet if interface is not attached */ if (txq >= numqueues) diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c index bd8d4392d68b..80f75139495f 100644 --- a/drivers/net/wireless/ath/ath5k/desc.c +++ b/drivers/net/wireless/ath/ath5k/desc.c @@ -500,13 +500,13 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, tx_status = &desc->ud.ds_tx5212.tx_stat; - txstat1 = ACCESS_ONCE(tx_status->tx_status_1); + txstat1 = READ_ONCE(tx_status->tx_status_1); /* No frame has been send or error */ if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE))) return -EINPROGRESS; - txstat0 = ACCESS_ONCE(tx_status->tx_status_0); + txstat0 = READ_ONCE(tx_status->tx_status_0); /* * Get descriptor status @@ -700,14 +700,14 @@ ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, u32 rxstat0, rxstat1; rx_status = &desc->ud.ds_rx.rx_stat; - rxstat1 = ACCESS_ONCE(rx_status->rx_status_1); + rxstat1 = READ_ONCE(rx_status->rx_status_1); /* No frame received / not ready */ if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE))) return -EINPROGRESS; memset(rs, 0, sizeof(struct ath5k_rx_status)); - rxstat0 = ACCESS_ONCE(rx_status->rx_status_0); + rxstat0 = READ_ONCE(rx_status->rx_status_0); /* * Frame receive status diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 613caca7dc02..785a0f33b7e6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3628,7 +3628,7 @@ static void brcmf_sdio_dataworker(struct work_struct *work) bus->dpc_running = true; wmb(); - while (ACCESS_ONCE(bus->dpc_triggered)) { + while (READ_ONCE(bus->dpc_triggered)) { bus->dpc_triggered = false; brcmf_sdio_dpc(bus); bus->idlecount = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 231878969332..0f45f34e39d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -1118,7 +1118,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - bool calibrating = ACCESS_ONCE(mvm->calibrating); + bool calibrating = READ_ONCE(mvm->calibrating); if (state) set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 6f2e2af23219..6e9d3289b9d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -652,7 +652,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) return -1; } else if (info.control.vif->type == NL80211_IFTYPE_STATION && is_multicast_ether_addr(hdr->addr1)) { - u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); + u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); if (ap_sta_id != IWL_MVM_INVALID_STA) sta_id = ap_sta_id; @@ -700,7 +700,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + tcp_hdrlen(skb); - dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len); + dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len); if (!sta->max_amsdu_len || !ieee80211_is_data_qos(hdr->frame_control) || diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index a06b6612b658..f25ce3a1ea50 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1247,7 +1247,7 @@ restart: spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; + r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; i = rxq->read; /* W/A 9000 device step A0 wrap-around bug */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 2e3e013ec95a..9ad3f4fe5894 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2076,12 +2076,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); txq = trans_pcie->txq[txq_idx]; - wr_ptr = ACCESS_ONCE(txq->write_ptr); + wr_ptr = READ_ONCE(txq->write_ptr); - while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) && + while (txq->read_ptr != READ_ONCE(txq->write_ptr) && !time_after(jiffies, now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { - u8 write_ptr = ACCESS_ONCE(txq->write_ptr); + u8 write_ptr = READ_ONCE(txq->write_ptr); if (WARN_ONCE(wr_ptr != write_ptr, "WR pointer moved while flushing %d -> %d\n", @@ -2553,7 +2553,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, spin_lock(&rxq->lock); - r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; + r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; for (i = rxq->read, j = 0; i != r && j < allocated_rb_nums; @@ -2814,7 +2814,7 @@ static struct iwl_trans_dump_data /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ - num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) + num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; len += num_rbs * (sizeof(*data) + diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 6467ffac9811..d2b3d6177a55 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1380,7 +1380,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, mac80211_hwsim_monitor_rx(hw, skb, channel); /* wmediumd mode check */ - _portid = ACCESS_ONCE(data->wmediumd); + _portid = READ_ONCE(data->wmediumd); if (_portid) return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); @@ -1477,7 +1477,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, struct ieee80211_channel *chan) { struct mac80211_hwsim_data *data = hw->priv; - u32 _pid = ACCESS_ONCE(data->wmediumd); + u32 _pid = READ_ONCE(data->wmediumd); if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index f05cfc83c9c8..f946bf889015 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -996,7 +996,7 @@ static void qlt_free_session_done(struct work_struct *work) if (logout_started) { bool traced = false; - while (!ACCESS_ONCE(sess->logout_completed)) { + while (!READ_ONCE(sess->logout_completed)) { if (!traced) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, "%s: waiting for sess %p logout\n", diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 942d094269fb..9469695f5871 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -985,7 +985,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) mb = udev->mb_addr; tcmu_flush_dcache_range(mb, sizeof(*mb)); - while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) { + while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; struct tcmu_cmd *cmd; diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 3e865dbf878c..fbaa2a90d25d 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -483,7 +483,7 @@ static ssize_t wdm_read if (rv < 0) return -ERESTARTSYS; - cntr = ACCESS_ONCE(desc->length); + cntr = READ_ONCE(desc->length); if (cntr == 0) { desc->read = 0; retry: diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e9326f31db8d..4ae667d8c238 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -150,7 +150,7 @@ static int usbfs_increase_memory_usage(u64 amount) { u64 lim; - lim = ACCESS_ONCE(usbfs_memory_mb); + lim = READ_ONCE(usbfs_memory_mb); lim <<= 20; atomic64_add(amount, &usbfs_memory_usage); diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index d930bfda4010..58d59c5f8592 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -973,7 +973,7 @@ static ssize_t interface_show(struct device *dev, struct device_attribute *attr, char *string; intf = to_usb_interface(dev); - string = ACCESS_ONCE(intf->cur_altsetting->string); + string = READ_ONCE(intf->cur_altsetting->string); if (!string) return 0; return sprintf(buf, "%s\n", string); @@ -989,7 +989,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, intf = to_usb_interface(dev); udev = interface_to_usbdev(intf); - alt = ACCESS_ONCE(intf->cur_altsetting); + alt = READ_ONCE(intf->cur_altsetting); return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X" "ic%02Xisc%02Xip%02Xin%02X\n", diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 1f9941145746..0b59fa50aa30 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -1261,7 +1261,7 @@ static int gr_handle_in_ep(struct gr_ep *ep) if (!req->last_desc) return 0; - if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN) + if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN) return 0; /* Not put in hardware buffers yet */ if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0)) @@ -1290,7 +1290,7 @@ static int gr_handle_out_ep(struct gr_ep *ep) if (!req->curr_desc) return 0; - ctrl = ACCESS_ONCE(req->curr_desc->ctrl); + ctrl = READ_ONCE(req->curr_desc->ctrl); if (ctrl & GR_DESC_OUT_CTRL_EN) return 0; /* Not received yet */ diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 44924824fa41..c86f89babd57 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -785,7 +785,7 @@ static void io_watchdog_func(unsigned long _ohci) } /* find the last TD processed by the controller. */ - head = hc32_to_cpu(ohci, ACCESS_ONCE(ed->hwHeadP)) & TD_MASK; + head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK; td_start = td; td_next = list_prepare_entry(td, &ed->td_list, td_list); list_for_each_entry_continue(td_next, &ed->td_list, td_list) { diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h index d97f0d9b3ce6..f1cc47292a59 100644 --- a/drivers/usb/host/uhci-hcd.h +++ b/drivers/usb/host/uhci-hcd.h @@ -187,7 +187,7 @@ struct uhci_qh { * We need a special accessor for the element pointer because it is * subject to asynchronous updates by the controller. */ -#define qh_element(qh) ACCESS_ONCE((qh)->element) +#define qh_element(qh) READ_ONCE((qh)->element) #define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \ cpu_to_hc32((uhci), (qh)->dma_handle)) @@ -275,7 +275,7 @@ struct uhci_td { * subject to asynchronous updates by the controller. */ #define td_status(uhci, td) hc32_to_cpu((uhci), \ - ACCESS_ONCE((td)->status)) + READ_ONCE((td)->status)) #define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle)) diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index f5a86f651f38..2bc3705a99bd 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -665,7 +665,7 @@ static int vfio_dev_viable(struct device *dev, void *data) { struct vfio_group *group = data; struct vfio_device *device; - struct device_driver *drv = ACCESS_ONCE(dev->driver); + struct device_driver *drv = READ_ONCE(dev->driver); struct vfio_unbound_dev *unbound; int ret = -EINVAL; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 046f6d280af5..35e929f132e8 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -929,7 +929,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) continue; } - tpg = ACCESS_ONCE(vs_tpg[*target]); + tpg = READ_ONCE(vs_tpg[*target]); if (unlikely(!tpg)) { /* Target does not exist, fail the request */ vhost_scsi_send_bad_target(vs, vq, head, out); |