locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the coccinelle script shown below and apply its output. For several reasons, it is desirable to use {READ,WRITE}_ONCE() in preference to ACCESS_ONCE(), and new code is expected to use one of the former. So far, there's been no reason to change most existing uses of ACCESS_ONCE(), as these aren't harmful, and changing them results in churn. However, for some features, the read/write distinction is critical to correct operation. To distinguish these cases, separate read/write accessors must be used. This patch migrates (most) remaining ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following coccinelle script: ---- // Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and // WRITE_ONCE() // $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch virtual patch @ depends on patch @ expression E1, E2; @@ - ACCESS_ONCE(E1) = E2 + WRITE_ONCE(E1, E2) @ depends on patch @ expression E; @@ - ACCESS_ONCE(E) + READ_ONCE(E) ---- Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: davem@davemloft.net Cc: linux-arch@vger.kernel.org Cc: mpe@ellerman.id.au Cc: shuah@kernel.org Cc: snitzer@redhat.com Cc: thor.thayer@linux.intel.com Cc: tj@kernel.org Cc: viro@zeniv.linux.org.uk Cc: will.deacon@arm.com Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
b03a0fe0c5
commit
6aa7de0591
|
@ -245,7 +245,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
|
|||
* and read back old value
|
||||
*/
|
||||
do {
|
||||
new = old = ACCESS_ONCE(*ipi_data_ptr);
|
||||
new = old = READ_ONCE(*ipi_data_ptr);
|
||||
new |= 1U << msg;
|
||||
} while (cmpxchg(ipi_data_ptr, old, new) != old);
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|||
|
||||
while (lockval.tickets.next != lockval.tickets.owner) {
|
||||
wfe();
|
||||
lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
|
||||
lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
|
|
|
@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
|
|||
bool entered_lp2 = false;
|
||||
|
||||
if (tegra_pending_sgi())
|
||||
ACCESS_ONCE(abort_flag) = true;
|
||||
WRITE_ONCE(abort_flag, true);
|
||||
|
||||
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ static notrace u32 __vdso_read_begin(const struct vdso_data *vdata)
|
|||
{
|
||||
u32 seq;
|
||||
repeat:
|
||||
seq = ACCESS_ONCE(vdata->seq_count);
|
||||
seq = READ_ONCE(vdata->seq_count);
|
||||
if (seq & 1) {
|
||||
cpu_relax();
|
||||
goto repeat;
|
||||
|
|
|
@ -61,7 +61,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
|||
|
||||
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
int tmp = ACCESS_ONCE(lock->lock);
|
||||
int tmp = READ_ONCE(lock->lock);
|
||||
|
||||
if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
|
||||
return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
|
||||
|
@ -73,19 +73,19 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
|||
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
|
||||
|
||||
asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
|
||||
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
|
||||
WRITE_ONCE(*p, (tmp + 2) & ~1);
|
||||
}
|
||||
|
||||
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
long tmp = ACCESS_ONCE(lock->lock);
|
||||
long tmp = READ_ONCE(lock->lock);
|
||||
|
||||
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
|
||||
}
|
||||
|
||||
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
|
||||
{
|
||||
long tmp = ACCESS_ONCE(lock->lock);
|
||||
long tmp = READ_ONCE(lock->lock);
|
||||
|
||||
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ static inline u32 vdso_data_read_begin(const union mips_vdso_data *data)
|
|||
u32 seq;
|
||||
|
||||
while (true) {
|
||||
seq = ACCESS_ONCE(data->seq_count);
|
||||
seq = READ_ONCE(data->seq_count);
|
||||
if (likely(!(seq & 1))) {
|
||||
/* Paired with smp_wmb() in vdso_data_write_*(). */
|
||||
smp_rmb();
|
||||
|
|
|
@ -166,7 +166,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
|
|||
nc_core_ready_count = nc_addr;
|
||||
|
||||
/* Ensure ready_count is zero-initialised before the assembly runs */
|
||||
ACCESS_ONCE(*nc_core_ready_count) = 0;
|
||||
WRITE_ONCE(*nc_core_ready_count, 0);
|
||||
coupled_barrier(&per_cpu(pm_barrier, core), online);
|
||||
|
||||
/* Run the generated entry code */
|
||||
|
|
|
@ -543,7 +543,7 @@ static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port)
|
|||
|
||||
try_again:
|
||||
/* pull chars out of the hat */
|
||||
ix = ACCESS_ONCE(port->rx_outp);
|
||||
ix = READ_ONCE(port->rx_outp);
|
||||
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) {
|
||||
if (push && !tport->low_latency)
|
||||
tty_flip_buffer_push(tport);
|
||||
|
@ -1724,7 +1724,7 @@ static int mn10300_serial_poll_get_char(struct uart_port *_port)
|
|||
if (mn10300_serial_int_tbl[port->rx_irq].port != NULL) {
|
||||
do {
|
||||
/* pull chars out of the hat */
|
||||
ix = ACCESS_ONCE(port->rx_outp);
|
||||
ix = READ_ONCE(port->rx_outp);
|
||||
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0)
|
||||
return NO_POLL_CHAR;
|
||||
|
||||
|
|
|
@ -260,7 +260,7 @@ atomic64_set(atomic64_t *v, s64 i)
|
|||
static __inline__ s64
|
||||
atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
return ACCESS_ONCE((v)->counter);
|
||||
return READ_ONCE((v)->counter);
|
||||
}
|
||||
|
||||
#define atomic64_inc(v) (atomic64_add( 1,(v)))
|
||||
|
|
|
@ -43,7 +43,7 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count)
|
|||
if (!opal_memcons)
|
||||
return -ENODEV;
|
||||
|
||||
out_pos = be32_to_cpu(ACCESS_ONCE(opal_memcons->out_pos));
|
||||
out_pos = be32_to_cpu(READ_ONCE(opal_memcons->out_pos));
|
||||
|
||||
/* Now we've read out_pos, put a barrier in before reading the new
|
||||
* data it points to in conbuf. */
|
||||
|
|
|
@ -117,14 +117,14 @@ extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
|
|||
|
||||
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
|
||||
{
|
||||
int old = ACCESS_ONCE(rw->lock);
|
||||
int old = READ_ONCE(rw->lock);
|
||||
return likely(old >= 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->lock, old, old + 1));
|
||||
}
|
||||
|
||||
static inline int arch_write_trylock_once(arch_rwlock_t *rw)
|
||||
{
|
||||
int old = ACCESS_ONCE(rw->lock);
|
||||
int old = READ_ONCE(rw->lock);
|
||||
return likely(old == 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
|
|||
int old;
|
||||
|
||||
do {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
old = READ_ONCE(rw->lock);
|
||||
} while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
|
||||
}
|
||||
|
||||
|
|
|
@ -162,8 +162,8 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
|
|||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
old = READ_ONCE(rw->lock);
|
||||
owner = READ_ONCE(rw->owner);
|
||||
if (old < 0)
|
||||
continue;
|
||||
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
|
||||
|
@ -178,7 +178,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
|
|||
int old;
|
||||
|
||||
while (count-- > 0) {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
old = READ_ONCE(rw->lock);
|
||||
if (old < 0)
|
||||
continue;
|
||||
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
|
||||
|
@ -202,8 +202,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
|
|||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
old = READ_ONCE(rw->lock);
|
||||
owner = READ_ONCE(rw->owner);
|
||||
smp_mb();
|
||||
if (old >= 0) {
|
||||
prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
|
||||
|
@ -230,8 +230,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
|
|||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
old = READ_ONCE(rw->lock);
|
||||
owner = READ_ONCE(rw->owner);
|
||||
if (old >= 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
|
||||
prev = old;
|
||||
|
@ -251,7 +251,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
|
|||
int old;
|
||||
|
||||
while (count-- > 0) {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
old = READ_ONCE(rw->lock);
|
||||
if (old)
|
||||
continue;
|
||||
if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
|
||||
|
|
|
@ -31,7 +31,7 @@ void atomic_set(atomic_t *, int);
|
|||
|
||||
#define atomic_set_release(v, i) atomic_set((v), (i))
|
||||
|
||||
#define atomic_read(v) ACCESS_ONCE((v)->counter)
|
||||
#define atomic_read(v) READ_ONCE((v)->counter)
|
||||
|
||||
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
|
||||
#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
|
||||
|
|
|
@ -163,14 +163,14 @@ int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
|
|||
int64_t completion_slot, int update)
|
||||
{
|
||||
if (update) {
|
||||
if (ACCESS_ONCE(dma_queue->hw_complete_count) >
|
||||
if (READ_ONCE(dma_queue->hw_complete_count) >
|
||||
completion_slot)
|
||||
return 1;
|
||||
|
||||
__gxio_dma_queue_update_credits(dma_queue);
|
||||
}
|
||||
|
||||
return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot;
|
||||
return READ_ONCE(dma_queue->hw_complete_count) > completion_slot;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);
|
||||
|
|
|
@ -121,7 +121,7 @@ static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue,
|
|||
* if the result is LESS than "hw_complete_count".
|
||||
*/
|
||||
uint64_t complete;
|
||||
complete = ACCESS_ONCE(dma_queue->hw_complete_count);
|
||||
complete = READ_ONCE(dma_queue->hw_complete_count);
|
||||
slot |= (complete & 0xffffffffff000000);
|
||||
if (slot < complete)
|
||||
slot += 0x1000000;
|
||||
|
|
|
@ -255,7 +255,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
|
||||
int do_syscall_trace_enter(struct pt_regs *regs)
|
||||
{
|
||||
u32 work = ACCESS_ONCE(current_thread_info()->flags);
|
||||
u32 work = READ_ONCE(current_thread_info()->flags);
|
||||
|
||||
if ((work & _TIF_SYSCALL_TRACE) &&
|
||||
tracehook_report_syscall_entry(regs)) {
|
||||
|
|
|
@ -75,7 +75,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
|
|||
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
|
||||
BUG_ON(regs != task_pt_regs(current));
|
||||
|
||||
work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
|
||||
work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
|
||||
|
||||
if (unlikely(work & _TIF_SYSCALL_EMU))
|
||||
emulated = true;
|
||||
|
|
|
@ -318,7 +318,7 @@ int gettimeofday(struct timeval *, struct timezone *)
|
|||
notrace time_t __vdso_time(time_t *t)
|
||||
{
|
||||
/* This is atomic on x86 so we don't need any locks. */
|
||||
time_t result = ACCESS_ONCE(gtod->wall_time_sec);
|
||||
time_t result = READ_ONCE(gtod->wall_time_sec);
|
||||
|
||||
if (t)
|
||||
*t = result;
|
||||
|
|
|
@ -2118,7 +2118,7 @@ static int x86_pmu_event_init(struct perf_event *event)
|
|||
event->destroy(event);
|
||||
}
|
||||
|
||||
if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
|
||||
if (READ_ONCE(x86_pmu.attr_rdpmc))
|
||||
event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
|
||||
|
||||
return err;
|
||||
|
|
|
@ -48,7 +48,7 @@ static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
|
|||
unsigned ret;
|
||||
|
||||
repeat:
|
||||
ret = ACCESS_ONCE(s->seq);
|
||||
ret = READ_ONCE(s->seq);
|
||||
if (unlikely(ret & 1)) {
|
||||
cpu_relax();
|
||||
goto repeat;
|
||||
|
|
|
@ -155,14 +155,14 @@ void init_espfix_ap(int cpu)
|
|||
page = cpu/ESPFIX_STACKS_PER_PAGE;
|
||||
|
||||
/* Did another CPU already set this up? */
|
||||
stack_page = ACCESS_ONCE(espfix_pages[page]);
|
||||
stack_page = READ_ONCE(espfix_pages[page]);
|
||||
if (likely(stack_page))
|
||||
goto done;
|
||||
|
||||
mutex_lock(&espfix_init_mutex);
|
||||
|
||||
/* Did we race on the lock? */
|
||||
stack_page = ACCESS_ONCE(espfix_pages[page]);
|
||||
stack_page = READ_ONCE(espfix_pages[page]);
|
||||
if (stack_page)
|
||||
goto unlock_done;
|
||||
|
||||
|
@ -200,7 +200,7 @@ void init_espfix_ap(int cpu)
|
|||
set_pte(&pte_p[n*PTE_STRIDE], pte);
|
||||
|
||||
/* Job is done for this CPU and any CPU which shares this page */
|
||||
ACCESS_ONCE(espfix_pages[page]) = stack_page;
|
||||
WRITE_ONCE(espfix_pages[page], stack_page);
|
||||
|
||||
unlock_done:
|
||||
mutex_unlock(&espfix_init_mutex);
|
||||
|
|
|
@ -105,7 +105,7 @@ static void nmi_max_handler(struct irq_work *w)
|
|||
{
|
||||
struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
|
||||
int remainder_ns, decimal_msecs;
|
||||
u64 whole_msecs = ACCESS_ONCE(a->max_duration);
|
||||
u64 whole_msecs = READ_ONCE(a->max_duration);
|
||||
|
||||
remainder_ns = do_div(whole_msecs, (1000 * 1000));
|
||||
decimal_msecs = remainder_ns / 1000;
|
||||
|
|
|
@ -443,7 +443,7 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
|
|||
|
||||
static u64 __get_spte_lockless(u64 *sptep)
|
||||
{
|
||||
return ACCESS_ONCE(*sptep);
|
||||
return READ_ONCE(*sptep);
|
||||
}
|
||||
#else
|
||||
union split_spte {
|
||||
|
@ -4819,7 +4819,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|||
* If we don't have indirect shadow pages, it means no page is
|
||||
* write-protected, so we can exit simply.
|
||||
*/
|
||||
if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
|
||||
if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
|
||||
return;
|
||||
|
||||
remote_flush = local_flush = false;
|
||||
|
|
|
@ -157,7 +157,7 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|||
return false;
|
||||
|
||||
index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
|
||||
return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
|
||||
return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
|
||||
}
|
||||
|
||||
void kvm_page_track_cleanup(struct kvm *kvm)
|
||||
|
|
|
@ -547,7 +547,7 @@ int xen_alloc_p2m_entry(unsigned long pfn)
|
|||
if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
|
||||
topidx = p2m_top_index(pfn);
|
||||
top_mfn_p = &p2m_top_mfn[topidx];
|
||||
mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
|
||||
mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]);
|
||||
|
||||
BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
|
||||
|
||||
|
|
|
@ -34,23 +34,23 @@
|
|||
static void lcd_put_byte(u8 *addr, u8 data)
|
||||
{
|
||||
#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
|
||||
ACCESS_ONCE(*addr) = data;
|
||||
WRITE_ONCE(*addr, data);
|
||||
#else
|
||||
ACCESS_ONCE(*addr) = data & 0xf0;
|
||||
ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
|
||||
WRITE_ONCE(*addr, data & 0xf0);
|
||||
WRITE_ONCE(*addr, (data << 4) & 0xf0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __init lcd_init(void)
|
||||
{
|
||||
ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
|
||||
WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
|
||||
mdelay(5);
|
||||
ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
|
||||
WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
|
||||
udelay(200);
|
||||
ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
|
||||
WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
|
||||
udelay(50);
|
||||
#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
|
||||
ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
|
||||
WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
|
||||
udelay(50);
|
||||
lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
|
||||
udelay(50);
|
||||
|
|
|
@ -261,7 +261,7 @@ static inline bool stat_sample_valid(struct blk_rq_stat *stat)
|
|||
|
||||
static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
|
||||
{
|
||||
u64 now, issue = ACCESS_ONCE(rwb->sync_issue);
|
||||
u64 now, issue = READ_ONCE(rwb->sync_issue);
|
||||
|
||||
if (!issue || !rwb->sync_cookie)
|
||||
return 0;
|
||||
|
|
|
@ -668,7 +668,7 @@ const char *dev_driver_string(const struct device *dev)
|
|||
* so be careful about accessing it. dev->bus and dev->class should
|
||||
* never change once they are set, so they don't need special care.
|
||||
*/
|
||||
drv = ACCESS_ONCE(dev->driver);
|
||||
drv = READ_ONCE(dev->driver);
|
||||
return drv ? drv->name :
|
||||
(dev->bus ? dev->bus->name :
|
||||
(dev->class ? dev->class->name : ""));
|
||||
|
|
|
@ -134,11 +134,11 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
|
|||
if (!dev->power.use_autosuspend)
|
||||
goto out;
|
||||
|
||||
autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
|
||||
autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
|
||||
if (autosuspend_delay < 0)
|
||||
goto out;
|
||||
|
||||
last_busy = ACCESS_ONCE(dev->power.last_busy);
|
||||
last_busy = READ_ONCE(dev->power.last_busy);
|
||||
elapsed = jiffies - last_busy;
|
||||
if (elapsed < 0)
|
||||
goto out; /* jiffies has wrapped around. */
|
||||
|
|
|
@ -641,7 +641,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
|
|||
return;
|
||||
|
||||
retry:
|
||||
entropy_count = orig = ACCESS_ONCE(r->entropy_count);
|
||||
entropy_count = orig = READ_ONCE(r->entropy_count);
|
||||
if (nfrac < 0) {
|
||||
/* Debit */
|
||||
entropy_count += nfrac;
|
||||
|
@ -1265,7 +1265,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
|
|||
|
||||
/* Can we pull enough? */
|
||||
retry:
|
||||
entropy_count = orig = ACCESS_ONCE(r->entropy_count);
|
||||
entropy_count = orig = READ_ONCE(r->entropy_count);
|
||||
ibytes = nbytes;
|
||||
/* never pull more than available */
|
||||
have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
|
||||
|
|
|
@ -71,7 +71,7 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
|
|||
if (readl_relaxed(timer->control) & timer->match_mask) {
|
||||
writel_relaxed(timer->match_mask, timer->control);
|
||||
|
||||
event_handler = ACCESS_ONCE(timer->evt.event_handler);
|
||||
event_handler = READ_ONCE(timer->evt.event_handler);
|
||||
if (event_handler)
|
||||
event_handler(&timer->evt);
|
||||
return IRQ_HANDLED;
|
||||
|
|
|
@ -172,7 +172,7 @@ static void caam_jr_dequeue(unsigned long devarg)
|
|||
|
||||
while (rd_reg32(&jrp->rregs->outring_used)) {
|
||||
|
||||
head = ACCESS_ONCE(jrp->head);
|
||||
head = READ_ONCE(jrp->head);
|
||||
|
||||
spin_lock(&jrp->outlock);
|
||||
|
||||
|
@ -341,7 +341,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
|
|||
spin_lock_bh(&jrp->inplock);
|
||||
|
||||
head = jrp->head;
|
||||
tail = ACCESS_ONCE(jrp->tail);
|
||||
tail = READ_ONCE(jrp->tail);
|
||||
|
||||
if (!rd_reg32(&jrp->rregs->inpring_avail) ||
|
||||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
|
||||
|
|
|
@ -193,7 +193,7 @@ static int wait_for_csb(struct nx842_workmem *wmem,
|
|||
ktime_t start = wmem->start, now = ktime_get();
|
||||
ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX);
|
||||
|
||||
while (!(ACCESS_ONCE(csb->flags) & CSB_V)) {
|
||||
while (!(READ_ONCE(csb->flags) & CSB_V)) {
|
||||
cpu_relax();
|
||||
now = ktime_get();
|
||||
if (ktime_after(now, timeout))
|
||||
|
|
|
@ -734,7 +734,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
|
|||
__le16 res_count, next_res_count;
|
||||
|
||||
i = ar_first_buffer_index(ctx);
|
||||
res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
|
||||
res_count = READ_ONCE(ctx->descriptors[i].res_count);
|
||||
|
||||
/* A buffer that is not yet completely filled must be the last one. */
|
||||
while (i != last && res_count == 0) {
|
||||
|
@ -742,8 +742,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
|
|||
/* Peek at the next descriptor. */
|
||||
next_i = ar_next_buffer_index(i);
|
||||
rmb(); /* read descriptors in order */
|
||||
next_res_count = ACCESS_ONCE(
|
||||
ctx->descriptors[next_i].res_count);
|
||||
next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
|
||||
/*
|
||||
* If the next descriptor is still empty, we must stop at this
|
||||
* descriptor.
|
||||
|
@ -759,8 +758,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
|
|||
if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
|
||||
next_i = ar_next_buffer_index(next_i);
|
||||
rmb();
|
||||
next_res_count = ACCESS_ONCE(
|
||||
ctx->descriptors[next_i].res_count);
|
||||
next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
|
||||
if (next_res_count != cpu_to_le16(PAGE_SIZE))
|
||||
goto next_buffer_is_active;
|
||||
}
|
||||
|
@ -2812,7 +2810,7 @@ static int handle_ir_buffer_fill(struct context *context,
|
|||
u32 buffer_dma;
|
||||
|
||||
req_count = le16_to_cpu(last->req_count);
|
||||
res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
|
||||
res_count = le16_to_cpu(READ_ONCE(last->res_count));
|
||||
completed = req_count - res_count;
|
||||
buffer_dma = le32_to_cpu(last->data_address);
|
||||
|
||||
|
|
|
@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
|
|||
*/
|
||||
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
|
||||
uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
|
||||
struct dma_fence *fence, **ptr;
|
||||
int r;
|
||||
|
||||
|
@ -300,7 +300,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
|
|||
amdgpu_fence_process(ring);
|
||||
emitted = 0x100000000ull;
|
||||
emitted -= atomic_read(&ring->fence_drv.last_seq);
|
||||
emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
|
||||
emitted += READ_ONCE(ring->fence_drv.sync_seq);
|
||||
return lower_32_bits(emitted);
|
||||
}
|
||||
|
||||
|
|
|
@ -788,11 +788,11 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
|
|||
seq_printf(m, "\t0x%08x: %12ld byte %s",
|
||||
id, amdgpu_bo_size(bo), placement);
|
||||
|
||||
offset = ACCESS_ONCE(bo->tbo.mem.start);
|
||||
offset = READ_ONCE(bo->tbo.mem.start);
|
||||
if (offset != AMDGPU_BO_INVALID_OFFSET)
|
||||
seq_printf(m, " @ 0x%010Lx", offset);
|
||||
|
||||
pin_count = ACCESS_ONCE(bo->pin_count);
|
||||
pin_count = READ_ONCE(bo->pin_count);
|
||||
if (pin_count)
|
||||
seq_printf(m, " pin count %d", pin_count);
|
||||
seq_printf(m, "\n");
|
||||
|
|
|
@ -187,7 +187,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
|
|||
if (kfifo_is_empty(&entity->job_queue))
|
||||
return false;
|
||||
|
||||
if (ACCESS_ONCE(entity->dependency))
|
||||
if (READ_ONCE(entity->dependency))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
|
|
@ -451,7 +451,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
|||
else
|
||||
r = 0;
|
||||
|
||||
cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
|
||||
cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
|
||||
args->domain = radeon_mem_type_to_domain(cur_placement);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
|
@ -481,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
|||
r = ret;
|
||||
|
||||
/* Flush HDP cache via MMIO if necessary */
|
||||
cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
|
||||
cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
|
||||
if (rdev->asic->mmio_hdp_flush &&
|
||||
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
|
||||
robj->rdev->asic->mmio_hdp_flush(rdev);
|
||||
|
|
|
@ -904,7 +904,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
|
|||
if (unlikely(drm_is_render_client(file_priv)))
|
||||
require_exist = true;
|
||||
|
||||
if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
|
||||
if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) {
|
||||
DRM_ERROR("Locked master refused legacy "
|
||||
"surface reference.\n");
|
||||
return -EACCES;
|
||||
|
|
|
@ -380,7 +380,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
|
|||
if (sc->flags & SCF_FROZEN) {
|
||||
wait_event_interruptible_timeout(
|
||||
dd->event_queue,
|
||||
!(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
|
||||
!(READ_ONCE(dd->flags) & HFI1_FROZEN),
|
||||
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
|
||||
if (dd->flags & HFI1_FROZEN)
|
||||
return -ENOLCK;
|
||||
|
|
|
@ -1423,14 +1423,14 @@ retry:
|
|||
goto done;
|
||||
}
|
||||
/* copy from receiver cache line and recalculate */
|
||||
sc->alloc_free = ACCESS_ONCE(sc->free);
|
||||
sc->alloc_free = READ_ONCE(sc->free);
|
||||
avail =
|
||||
(unsigned long)sc->credits -
|
||||
(sc->fill - sc->alloc_free);
|
||||
if (blocks > avail) {
|
||||
/* still no room, actively update */
|
||||
sc_release_update(sc);
|
||||
sc->alloc_free = ACCESS_ONCE(sc->free);
|
||||
sc->alloc_free = READ_ONCE(sc->free);
|
||||
trycount++;
|
||||
goto retry;
|
||||
}
|
||||
|
@ -1667,7 +1667,7 @@ void sc_release_update(struct send_context *sc)
|
|||
|
||||
/* call sent buffer callbacks */
|
||||
code = -1; /* code not yet set */
|
||||
head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */
|
||||
head = READ_ONCE(sc->sr_head); /* snapshot the head */
|
||||
tail = sc->sr_tail;
|
||||
while (head != tail) {
|
||||
pbuf = &sc->sr[tail].pbuf;
|
||||
|
|
|
@ -363,7 +363,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
|
|||
|
||||
again:
|
||||
smp_read_barrier_depends(); /* see post_one_send() */
|
||||
if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
|
||||
if (sqp->s_last == READ_ONCE(sqp->s_head))
|
||||
goto clr_busy;
|
||||
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
|
||||
|
||||
|
|
|
@ -1725,7 +1725,7 @@ retry:
|
|||
|
||||
swhead = sde->descq_head & sde->sdma_mask;
|
||||
/* this code is really bad for cache line trading */
|
||||
swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
|
||||
swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
|
||||
cnt = sde->descq_cnt;
|
||||
|
||||
if (swhead < swtail)
|
||||
|
@ -1872,7 +1872,7 @@ retry:
|
|||
if ((status & sde->idle_mask) && !idle_check_done) {
|
||||
u16 swtail;
|
||||
|
||||
swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
|
||||
swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
|
||||
if (swtail != hwhead) {
|
||||
hwhead = (u16)read_sde_csr(sde, SD(HEAD));
|
||||
idle_check_done = 1;
|
||||
|
@ -2222,7 +2222,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
|
|||
u16 len;
|
||||
|
||||
head = sde->descq_head & sde->sdma_mask;
|
||||
tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
|
||||
tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
|
||||
seq_printf(s, SDE_FMT, sde->this_idx,
|
||||
sde->cpu,
|
||||
sdma_state_name(sde->state.current_state),
|
||||
|
@ -3305,7 +3305,7 @@ int sdma_ahg_alloc(struct sdma_engine *sde)
|
|||
return -EINVAL;
|
||||
}
|
||||
while (1) {
|
||||
nr = ffz(ACCESS_ONCE(sde->ahg_bits));
|
||||
nr = ffz(READ_ONCE(sde->ahg_bits));
|
||||
if (nr > 31) {
|
||||
trace_hfi1_ahg_allocate(sde, -ENOSPC);
|
||||
return -ENOSPC;
|
||||
|
|
|
@ -445,7 +445,7 @@ static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
|
|||
{
|
||||
return sde->descq_cnt -
|
||||
(sde->descq_tail -
|
||||
ACCESS_ONCE(sde->descq_head)) - 1;
|
||||
READ_ONCE(sde->descq_head)) - 1;
|
||||
}
|
||||
|
||||
static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
|
||||
|
|
|
@ -80,7 +80,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
|||
goto bail;
|
||||
/* We are in the error state, flush the work request. */
|
||||
smp_read_barrier_depends(); /* see post_one_send() */
|
||||
if (qp->s_last == ACCESS_ONCE(qp->s_head))
|
||||
if (qp->s_last == READ_ONCE(qp->s_head))
|
||||
goto bail;
|
||||
/* If DMAs are in progress, we can't flush immediately. */
|
||||
if (iowait_sdma_pending(&priv->s_iowait)) {
|
||||
|
@ -121,7 +121,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
|||
goto bail;
|
||||
/* Check if send work queue is empty. */
|
||||
smp_read_barrier_depends(); /* see post_one_send() */
|
||||
if (qp->s_cur == ACCESS_ONCE(qp->s_head)) {
|
||||
if (qp->s_cur == READ_ONCE(qp->s_head)) {
|
||||
clear_ahg(qp);
|
||||
goto bail;
|
||||
}
|
||||
|
|
|
@ -487,7 +487,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
|||
goto bail;
|
||||
/* We are in the error state, flush the work request. */
|
||||
smp_read_barrier_depends(); /* see post_one_send */
|
||||
if (qp->s_last == ACCESS_ONCE(qp->s_head))
|
||||
if (qp->s_last == READ_ONCE(qp->s_head))
|
||||
goto bail;
|
||||
/* If DMAs are in progress, we can't flush immediately. */
|
||||
if (iowait_sdma_pending(&priv->s_iowait)) {
|
||||
|
@ -501,7 +501,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
|||
|
||||
/* see post_one_send() */
|
||||
smp_read_barrier_depends();
|
||||
if (qp->s_cur == ACCESS_ONCE(qp->s_head))
|
||||
if (qp->s_cur == READ_ONCE(qp->s_head))
|
||||
goto bail;
|
||||
|
||||
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
|
||||
|
|
|
@ -276,7 +276,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
|
|||
/* Wait until all requests have been freed. */
|
||||
wait_event_interruptible(
|
||||
pq->wait,
|
||||
(ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
|
||||
(READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
|
||||
kfree(pq->reqs);
|
||||
kfree(pq->req_in_use);
|
||||
kmem_cache_destroy(pq->txreq_cache);
|
||||
|
@ -591,7 +591,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
|||
if (ret != -EBUSY) {
|
||||
req->status = ret;
|
||||
WRITE_ONCE(req->has_error, 1);
|
||||
if (ACCESS_ONCE(req->seqcomp) ==
|
||||
if (READ_ONCE(req->seqcomp) ==
|
||||
req->seqsubmitted - 1)
|
||||
goto free_req;
|
||||
return ret;
|
||||
|
@ -825,7 +825,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
|
|||
*/
|
||||
if (req->data_len) {
|
||||
iovec = &req->iovs[req->iov_idx];
|
||||
if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
|
||||
if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
|
||||
if (++req->iov_idx == req->data_iovs) {
|
||||
ret = -EFAULT;
|
||||
goto free_txreq;
|
||||
|
@ -1390,7 +1390,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
|
|||
} else {
|
||||
if (status != SDMA_TXREQ_S_OK)
|
||||
req->status = status;
|
||||
if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
|
||||
if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) &&
|
||||
(READ_ONCE(req->done) ||
|
||||
READ_ONCE(req->has_error))) {
|
||||
user_sdma_free_request(req, false);
|
||||
|
|
|
@ -368,7 +368,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
|
|||
|
||||
again:
|
||||
smp_read_barrier_depends(); /* see post_one_send() */
|
||||
if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
|
||||
if (sqp->s_last == READ_ONCE(sqp->s_head))
|
||||
goto clr_busy;
|
||||
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
|
|||
goto bail;
|
||||
/* We are in the error state, flush the work request. */
|
||||
smp_read_barrier_depends(); /* see post_one_send() */
|
||||
if (qp->s_last == ACCESS_ONCE(qp->s_head))
|
||||
if (qp->s_last == READ_ONCE(qp->s_head))
|
||||
goto bail;
|
||||
/* If DMAs are in progress, we can't flush immediately. */
|
||||
if (atomic_read(&priv->s_dma_busy)) {
|
||||
|
@ -91,7 +91,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
|
|||
goto bail;
|
||||
/* Check if send work queue is empty. */
|
||||
smp_read_barrier_depends(); /* see post_one_send() */
|
||||
if (qp->s_cur == ACCESS_ONCE(qp->s_head))
|
||||
if (qp->s_cur == READ_ONCE(qp->s_head))
|
||||
goto bail;
|
||||
/*
|
||||
* Start a new request.
|
||||
|
|
|
@ -253,7 +253,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
|
|||
goto bail;
|
||||
/* We are in the error state, flush the work request. */
|
||||
smp_read_barrier_depends(); /* see post_one_send */
|
||||
if (qp->s_last == ACCESS_ONCE(qp->s_head))
|
||||
if (qp->s_last == READ_ONCE(qp->s_head))
|
||||
goto bail;
|
||||
/* If DMAs are in progress, we can't flush immediately. */
|
||||
if (atomic_read(&priv->s_dma_busy)) {
|
||||
|
@ -267,7 +267,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
|
|||
|
||||
/* see post_one_send() */
|
||||
smp_read_barrier_depends();
|
||||
if (qp->s_cur == ACCESS_ONCE(qp->s_head))
|
||||
if (qp->s_cur == READ_ONCE(qp->s_head))
|
||||
goto bail;
|
||||
|
||||
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
|
||||
|
|
|
@ -1073,7 +1073,7 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
|
|||
rdi->driver_f.notify_error_qp(qp);
|
||||
|
||||
/* Schedule the sending tasklet to drain the send work queue. */
|
||||
if (ACCESS_ONCE(qp->s_last) != qp->s_head)
|
||||
if (READ_ONCE(qp->s_last) != qp->s_head)
|
||||
rdi->driver_f.schedule_send(qp);
|
||||
|
||||
rvt_clear_mr_refs(qp, 0);
|
||||
|
@ -1686,7 +1686,7 @@ static inline int rvt_qp_is_avail(
|
|||
if (likely(qp->s_avail))
|
||||
return 0;
|
||||
smp_read_barrier_depends(); /* see rc.c */
|
||||
slast = ACCESS_ONCE(qp->s_last);
|
||||
slast = READ_ONCE(qp->s_last);
|
||||
if (qp->s_head >= slast)
|
||||
avail = qp->s_size - (qp->s_head - slast);
|
||||
else
|
||||
|
@ -1917,7 +1917,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
* ahead and kick the send engine into gear. Otherwise we will always
|
||||
* just schedule the send to happen later.
|
||||
*/
|
||||
call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
|
||||
call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
|
||||
|
||||
for (; wr; wr = wr->next) {
|
||||
err = rvt_post_one_wr(qp, wr, &call_send);
|
||||
|
|
|
@ -233,7 +233,7 @@ static int __maybe_unused regulator_haptic_resume(struct device *dev)
|
|||
|
||||
haptic->suspended = false;
|
||||
|
||||
magnitude = ACCESS_ONCE(haptic->magnitude);
|
||||
magnitude = READ_ONCE(haptic->magnitude);
|
||||
if (magnitude)
|
||||
regulator_haptic_set_voltage(haptic, magnitude);
|
||||
|
||||
|
|
|
@ -347,7 +347,7 @@ static void __cache_size_refresh(void)
|
|||
BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
|
||||
BUG_ON(dm_bufio_client_count < 0);
|
||||
|
||||
dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
|
||||
dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
|
||||
|
||||
/*
|
||||
* Use default if set to 0 and report the actual cache size used.
|
||||
|
@ -960,7 +960,7 @@ static void __get_memory_limit(struct dm_bufio_client *c,
|
|||
{
|
||||
unsigned long buffers;
|
||||
|
||||
if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
|
||||
if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
|
||||
if (mutex_trylock(&dm_bufio_clients_lock)) {
|
||||
__cache_size_refresh();
|
||||
mutex_unlock(&dm_bufio_clients_lock);
|
||||
|
@ -1600,7 +1600,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
|
|||
|
||||
static unsigned long get_retain_buffers(struct dm_bufio_client *c)
|
||||
{
|
||||
unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
|
||||
unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
|
||||
return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
|
@ -1647,7 +1647,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
|||
{
|
||||
struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
|
||||
|
||||
return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
|
||||
return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1818,7 +1818,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
|
|||
|
||||
static unsigned get_max_age_hz(void)
|
||||
{
|
||||
unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
|
||||
unsigned max_age = READ_ONCE(dm_bufio_max_age);
|
||||
|
||||
if (max_age > UINT_MAX / HZ)
|
||||
max_age = UINT_MAX / HZ;
|
||||
|
|
|
@ -107,7 +107,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t)
|
|||
try_again:
|
||||
spin_lock_irq(&throttle_spinlock);
|
||||
|
||||
throttle = ACCESS_ONCE(t->throttle);
|
||||
throttle = READ_ONCE(t->throttle);
|
||||
|
||||
if (likely(throttle >= 100))
|
||||
goto skip_limit;
|
||||
|
@ -157,7 +157,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
|
|||
|
||||
t->num_io_jobs--;
|
||||
|
||||
if (likely(ACCESS_ONCE(t->throttle) >= 100))
|
||||
if (likely(READ_ONCE(t->throttle) >= 100))
|
||||
goto skip_limit;
|
||||
|
||||
if (!t->num_io_jobs) {
|
||||
|
|
|
@ -431,7 +431,7 @@ do_sync_free:
|
|||
synchronize_rcu_expedited();
|
||||
dm_stat_free(&s->rcu_head);
|
||||
} else {
|
||||
ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
|
||||
WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
|
||||
call_rcu(&s->rcu_head, dm_stat_free);
|
||||
}
|
||||
return 0;
|
||||
|
@ -639,12 +639,12 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
|
|||
*/
|
||||
last = raw_cpu_ptr(stats->last);
|
||||
stats_aux->merged =
|
||||
(bi_sector == (ACCESS_ONCE(last->last_sector) &&
|
||||
(bi_sector == (READ_ONCE(last->last_sector) &&
|
||||
((bi_rw == WRITE) ==
|
||||
(ACCESS_ONCE(last->last_rw) == WRITE))
|
||||
(READ_ONCE(last->last_rw) == WRITE))
|
||||
));
|
||||
ACCESS_ONCE(last->last_sector) = end_sector;
|
||||
ACCESS_ONCE(last->last_rw) = bi_rw;
|
||||
WRITE_ONCE(last->last_sector, end_sector);
|
||||
WRITE_ONCE(last->last_rw, bi_rw);
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -693,22 +693,22 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
|
|||
|
||||
for_each_possible_cpu(cpu) {
|
||||
p = &s->stat_percpu[cpu][x];
|
||||
shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
|
||||
shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
|
||||
shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
|
||||
shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
|
||||
shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
|
||||
shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
|
||||
shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
|
||||
shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
|
||||
shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
|
||||
shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
|
||||
shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
|
||||
shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
|
||||
shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
|
||||
shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
|
||||
shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
|
||||
shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
|
||||
shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
|
||||
shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
|
||||
shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
|
||||
shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
|
||||
shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
|
||||
shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
|
||||
shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
|
||||
shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
|
||||
if (s->n_histogram_entries) {
|
||||
unsigned i;
|
||||
for (i = 0; i < s->n_histogram_entries + 1; i++)
|
||||
shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
|
||||
shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -144,7 +144,7 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long
|
|||
|
||||
switch_get_position(sctx, region_nr, ®ion_index, &bit);
|
||||
|
||||
return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) &
|
||||
return (READ_ONCE(sctx->region_table[region_index]) >> bit) &
|
||||
((1 << sctx->region_table_entry_bits) - 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -2431,7 +2431,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
|
|||
struct pool_c *pt = pool->ti->private;
|
||||
bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
|
||||
enum pool_mode old_mode = get_pool_mode(pool);
|
||||
unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
|
||||
unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ;
|
||||
|
||||
/*
|
||||
* Never allow the pool to transition to PM_WRITE mode if user
|
||||
|
|
|
@ -589,7 +589,7 @@ static void verity_prefetch_io(struct work_struct *work)
|
|||
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
|
||||
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
|
||||
if (!i) {
|
||||
unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
|
||||
unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster);
|
||||
|
||||
cluster >>= v->data_dev_block_bits;
|
||||
if (unlikely(!cluster))
|
||||
|
|
|
@ -114,7 +114,7 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
|
|||
|
||||
static int __dm_get_module_param_int(int *module_param, int min, int max)
|
||||
{
|
||||
int param = ACCESS_ONCE(*module_param);
|
||||
int param = READ_ONCE(*module_param);
|
||||
int modified_param = 0;
|
||||
bool modified = true;
|
||||
|
||||
|
@ -136,7 +136,7 @@ static int __dm_get_module_param_int(int *module_param, int min, int max)
|
|||
unsigned __dm_get_module_param(unsigned *module_param,
|
||||
unsigned def, unsigned max)
|
||||
{
|
||||
unsigned param = ACCESS_ONCE(*module_param);
|
||||
unsigned param = READ_ONCE(*module_param);
|
||||
unsigned modified_param = 0;
|
||||
|
||||
if (!param)
|
||||
|
|
|
@ -2651,7 +2651,7 @@ state_show(struct md_rdev *rdev, char *page)
|
|||
{
|
||||
char *sep = ",";
|
||||
size_t len = 0;
|
||||
unsigned long flags = ACCESS_ONCE(rdev->flags);
|
||||
unsigned long flags = READ_ONCE(rdev->flags);
|
||||
|
||||
if (test_bit(Faulty, &flags) ||
|
||||
(!test_bit(ExternalBbl, &flags) &&
|
||||
|
|
|
@ -6072,7 +6072,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
|
|||
*/
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < conf->raid_disks; i++) {
|
||||
struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
|
||||
struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
|
||||
|
||||
if (rdev == NULL || test_bit(Faulty, &rdev->flags))
|
||||
still_degraded = 1;
|
||||
|
|
|
@ -138,7 +138,7 @@ void scif_rb_commit(struct scif_rb *rb)
|
|||
* the read barrier in scif_rb_count(..)
|
||||
*/
|
||||
wmb();
|
||||
ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
|
||||
WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
|
||||
#ifdef CONFIG_INTEL_MIC_CARD
|
||||
/*
|
||||
* X100 Si bug: For the case where a Core is performing an EXT_WR
|
||||
|
@ -147,7 +147,7 @@ void scif_rb_commit(struct scif_rb *rb)
|
|||
* This way, if ordering is violated for the Interrupt Message, it will
|
||||
* fall just behind the first Posted associated with the first EXT_WR.
|
||||
*/
|
||||
ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
|
||||
WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -210,7 +210,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
|
|||
* scif_rb_space(..)
|
||||
*/
|
||||
mb();
|
||||
ACCESS_ONCE(*rb->read_ptr) = new_offset;
|
||||
WRITE_ONCE(*rb->read_ptr, new_offset);
|
||||
#ifdef CONFIG_INTEL_MIC_CARD
|
||||
/*
|
||||
* X100 Si Bug: For the case where a Core is performing an EXT_WR
|
||||
|
@ -219,7 +219,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
|
|||
* This way, if ordering is violated for the Interrupt Message, it will
|
||||
* fall just behind the first Posted associated with the first EXT_WR.
|
||||
*/
|
||||
ACCESS_ONCE(*rb->read_ptr) = new_offset;
|
||||
WRITE_ONCE(*rb->read_ptr, new_offset);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -277,7 +277,7 @@ retry:
|
|||
* Need to restart list traversal if there has been
|
||||
* an asynchronous list entry deletion.
|
||||
*/
|
||||
if (ACCESS_ONCE(ep->rma_info.async_list_del))
|
||||
if (READ_ONCE(ep->rma_info.async_list_del))
|
||||
goto retry;
|
||||
}
|
||||
mutex_unlock(&ep->rma_info.rma_lock);
|
||||
|
|
|
@ -1378,7 +1378,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
|
|||
unsigned int count;
|
||||
|
||||
slaves = rcu_dereference(bond->slave_arr);
|
||||
count = slaves ? ACCESS_ONCE(slaves->count) : 0;
|
||||
count = slaves ? READ_ONCE(slaves->count) : 0;
|
||||
if (likely(count))
|
||||
tx_slave = slaves->arr[hash_index %
|
||||
count];
|
||||
|
|
|
@ -1167,7 +1167,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
|
|||
slave = bond_slave_get_rcu(skb->dev);
|
||||
bond = slave->bond;
|
||||
|
||||
recv_probe = ACCESS_ONCE(bond->recv_probe);
|
||||
recv_probe = READ_ONCE(bond->recv_probe);
|
||||
if (recv_probe) {
|
||||
ret = recv_probe(skb, bond, slave);
|
||||
if (ret == RX_HANDLER_CONSUMED) {
|
||||
|
@ -3810,7 +3810,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
|
|||
else
|
||||
bond_xmit_slave_id(bond, skb, 0);
|
||||
} else {
|
||||
int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
|
||||
int slave_cnt = READ_ONCE(bond->slave_cnt);
|
||||
|
||||
if (likely(slave_cnt)) {
|
||||
slave_id = bond_rr_gen_slave_id(bond);
|
||||
|
@ -3972,7 +3972,7 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
unsigned int count;
|
||||
|
||||
slaves = rcu_dereference(bond->slave_arr);
|
||||
count = slaves ? ACCESS_ONCE(slaves->count) : 0;
|
||||
count = slaves ? READ_ONCE(slaves->count) : 0;
|
||||
if (likely(count)) {
|
||||
slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
|
||||
bond_dev_queue_xmit(bond, skb, slave->dev);
|
||||
|
|
|
@ -405,7 +405,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q,
|
|||
*/
|
||||
static inline int reclaimable(const struct sge_txq *q)
|
||||
{
|
||||
int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
|
||||
int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
|
||||
hw_cidx -= q->cidx;
|
||||
return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
|
||||
}
|
||||
|
@ -1375,7 +1375,7 @@ out_free: dev_kfree_skb_any(skb);
|
|||
*/
|
||||
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
|
||||
{
|
||||
int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
|
||||
int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
|
||||
int reclaim = hw_cidx - q->cidx;
|
||||
|
||||
if (reclaim < 0)
|
||||
|
|
|
@ -605,7 +605,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
|
|||
|
||||
if (wrapped)
|
||||
newacc += 65536;
|
||||
ACCESS_ONCE(*acc) = newacc;
|
||||
WRITE_ONCE(*acc, newacc);
|
||||
}
|
||||
|
||||
static void populate_erx_stats(struct be_adapter *adapter,
|
||||
|
|
|
@ -373,7 +373,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
|
|||
unsigned int count;
|
||||
|
||||
smp_rmb();
|
||||
count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
|
||||
count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
|
||||
if (count == 0)
|
||||
goto out;
|
||||
|
||||
|
@ -431,7 +431,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
dma_addr_t phys;
|
||||
|
||||
smp_rmb();
|
||||
count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
|
||||
count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
|
||||
if (count == (TX_DESC_NUM - 1)) {
|
||||
netif_stop_queue(ndev);
|
||||
return NETDEV_TX_BUSY;
|
||||
|
|
|
@ -264,7 +264,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
|
|||
vsi->rx_buf_failed, vsi->rx_page_failed);
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < vsi->num_queue_pairs; i++) {
|
||||
struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
|
||||
struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
|
||||
|
||||
if (!rx_ring)
|
||||
continue;
|
||||
|
@ -320,7 +320,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
|
|||
ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed");
|
||||
}
|
||||
for (i = 0; i < vsi->num_queue_pairs; i++) {
|
||||
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
|
||||
struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
|
||||
|
||||
if (!tx_ring)
|
||||
continue;
|
||||
|
|
|
@ -1570,7 +1570,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
|
|||
}
|
||||
rcu_read_lock();
|
||||
for (j = 0; j < vsi->num_queue_pairs; j++) {
|
||||
tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
|
||||
tx_ring = READ_ONCE(vsi->tx_rings[j]);
|
||||
|
||||
if (!tx_ring)
|
||||
continue;
|
||||
|
|
|
@ -455,7 +455,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
|
|||
u64 bytes, packets;
|
||||
unsigned int start;
|
||||
|
||||
tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
|
||||
tx_ring = READ_ONCE(vsi->tx_rings[i]);
|
||||
if (!tx_ring)
|
||||
continue;
|
||||
i40e_get_netdev_stats_struct_tx(tx_ring, stats);
|
||||
|
@ -791,7 +791,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
|
|||
rcu_read_lock();
|
||||
for (q = 0; q < vsi->num_queue_pairs; q++) {
|
||||
/* locate Tx ring */
|
||||
p = ACCESS_ONCE(vsi->tx_rings[q]);
|
||||
p = READ_ONCE(vsi->tx_rings[q]);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&p->syncp);
|
||||
|
|
|
@ -130,7 +130,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
|
|||
}
|
||||
|
||||
smp_mb(); /* Force any pending update before accessing. */
|
||||
adj = ACCESS_ONCE(pf->ptp_base_adj);
|
||||
adj = READ_ONCE(pf->ptp_base_adj);
|
||||
|
||||
freq = adj;
|
||||
freq *= ppb;
|
||||
|
@ -499,7 +499,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
|
|||
wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
|
||||
|
||||
/* Update the base adjustement value. */
|
||||
ACCESS_ONCE(pf->ptp_base_adj) = incval;
|
||||
WRITE_ONCE(pf->ptp_base_adj, incval);
|
||||
smp_mb(); /* Force the above update. */
|
||||
}
|
||||
|
||||
|
|
|
@ -375,7 +375,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg);
|
|||
/* write operations, indexed using DWORDS */
|
||||
#define wr32(reg, val) \
|
||||
do { \
|
||||
u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
|
||||
u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
|
||||
if (!E1000_REMOVED(hw_addr)) \
|
||||
writel((val), &hw_addr[(reg)]); \
|
||||
} while (0)
|
||||
|
|
|
@ -750,7 +750,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
|
|||
u32 igb_rd32(struct e1000_hw *hw, u32 reg)
|
||||
{
|
||||
struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
|
||||
u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
|
||||
u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
|
||||
u32 value = 0;
|
||||
|
||||
if (E1000_REMOVED(hw_addr))
|
||||
|
|
|
@ -161,7 +161,7 @@ static inline bool ixgbe_removed(void __iomem *addr)
|
|||
|
||||
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
|
||||
{
|
||||
u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
|
||||
u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
|
||||
|
||||
if (ixgbe_removed(reg_addr))
|
||||
return;
|
||||
|
@ -180,7 +180,7 @@ static inline void writeq(u64 val, void __iomem *addr)
|
|||
|
||||
static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
|
||||
{
|
||||
u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
|
||||
u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
|
||||
|
||||
if (ixgbe_removed(reg_addr))
|
||||
return;
|
||||
|
|
|
@ -380,7 +380,7 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
|
|||
*/
|
||||
u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
|
||||
{
|
||||
u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
|
||||
u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
|
||||
u32 value;
|
||||
|
||||
if (ixgbe_removed(reg_addr))
|
||||
|
@ -8630,7 +8630,7 @@ static void ixgbe_get_stats64(struct net_device *netdev,
|
|||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
|
||||
struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
|
||||
u64 bytes, packets;
|
||||
unsigned int start;
|
||||
|
||||
|
@ -8646,12 +8646,12 @@ static void ixgbe_get_stats64(struct net_device *netdev,
|
|||
}
|
||||
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
|
||||
struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
|
||||
|
||||
ixgbe_get_ring_stats64(stats, ring);
|
||||
}
|
||||
for (i = 0; i < adapter->num_xdp_queues; i++) {
|
||||
struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]);
|
||||
struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
|
||||
|
||||
ixgbe_get_ring_stats64(stats, ring);
|
||||
}
|
||||
|
|
|
@ -378,7 +378,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
|
|||
}
|
||||
|
||||
smp_mb();
|
||||
incval = ACCESS_ONCE(adapter->base_incval);
|
||||
incval = READ_ONCE(adapter->base_incval);
|
||||
|
||||
freq = incval;
|
||||
freq *= ppb;
|
||||
|
@ -1159,7 +1159,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
|
|||
}
|
||||
|
||||
/* update the base incval used to calculate frequency adjustment */
|
||||
ACCESS_ONCE(adapter->base_incval) = incval;
|
||||
WRITE_ONCE(adapter->base_incval, incval);
|
||||
smp_mb();
|
||||
|
||||
/* need lock to prevent incorrect read while modifying cyclecounter */
|
||||
|
|
|
@ -164,7 +164,7 @@ static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
|
|||
|
||||
u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
|
||||
{
|
||||
u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
|
||||
u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
|
||||
u32 value;
|
||||
|
||||
if (IXGBE_REMOVED(reg_addr))
|
||||
|
|
|
@ -182,7 +182,7 @@ struct ixgbevf_info {
|
|||
|
||||
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
|
||||
{
|
||||
u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
|
||||
u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
|
||||
|
||||
if (IXGBE_REMOVED(reg_addr))
|
||||
return;
|
||||
|
|
|
@ -414,8 +414,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
|
|||
|
||||
index = cons_index & size_mask;
|
||||
cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
|
||||
last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb);
|
||||
ring_cons = ACCESS_ONCE(ring->cons);
|
||||
last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
|
||||
ring_cons = READ_ONCE(ring->cons);
|
||||
ring_index = ring_cons & size_mask;
|
||||
stamp_index = ring_index;
|
||||
|
||||
|
@ -479,8 +479,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
|
|||
wmb();
|
||||
|
||||
/* we want to dirty this cache line once */
|
||||
ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
|
||||
ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
|
||||
WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
|
||||
WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
|
||||
|
||||
if (cq->type == TX_XDP)
|
||||
return done < budget;
|
||||
|
@ -858,7 +858,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
goto tx_drop;
|
||||
|
||||
/* fetch ring->cons far ahead before needing it to avoid stall */
|
||||
ring_cons = ACCESS_ONCE(ring->cons);
|
||||
ring_cons = READ_ONCE(ring->cons);
|
||||
|
||||
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
|
||||
&inline_ok, &fragptr);
|
||||
|
@ -1066,7 +1066,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
*/
|
||||
smp_rmb();
|
||||
|
||||
ring_cons = ACCESS_ONCE(ring->cons);
|
||||
ring_cons = READ_ONCE(ring->cons);
|
||||
if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
|
||||
netif_tx_wake_queue(ring->tx_queue);
|
||||
ring->wake_queue++;
|
||||
|
|
|
@ -2629,7 +2629,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
|
|||
ring = &vdev->vpaths[i].ring;
|
||||
|
||||
/* Truncated to machine word size number of frames */
|
||||
rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
|
||||
rx_frms = READ_ONCE(ring->stats.rx_frms);
|
||||
|
||||
/* Did this vpath received any packets */
|
||||
if (ring->stats.prev_rx_frms == rx_frms) {
|
||||
|
|
|
@ -2073,7 +2073,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
|
|||
netif_vdbg(efx, intr, efx->net_dev,
|
||||
"IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
|
||||
|
||||
if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
|
||||
if (likely(READ_ONCE(efx->irq_soft_enabled))) {
|
||||
/* Note test interrupts */
|
||||
if (context->index == efx->irq_level)
|
||||
efx->last_irq_cpu = raw_smp_processor_id();
|
||||
|
@ -2088,7 +2088,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
|
|||
static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct efx_nic *efx = dev_id;
|
||||
bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
|
||||
bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
|
||||
struct efx_channel *channel;
|
||||
efx_dword_t reg;
|
||||
u32 queues;
|
||||
|
@ -3291,7 +3291,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
|
|||
bool rx_cont;
|
||||
u16 flags = 0;
|
||||
|
||||
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
|
||||
if (unlikely(READ_ONCE(efx->reset_pending)))
|
||||
return 0;
|
||||
|
||||
/* Basic packet information */
|
||||
|
@ -3428,7 +3428,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
|
|||
unsigned int tx_ev_q_label;
|
||||
int tx_descs = 0;
|
||||
|
||||
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
|
||||
if (unlikely(READ_ONCE(efx->reset_pending)))
|
||||
return 0;
|
||||
|
||||
if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
|
||||
|
@ -5316,7 +5316,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
|
||||
if (ACCESS_ONCE(table->entry[i].spec) &
|
||||
if (READ_ONCE(table->entry[i].spec) &
|
||||
EFX_EF10_FILTER_FLAG_AUTO_OLD) {
|
||||
rc = efx_ef10_filter_remove_internal(efx,
|
||||
1U << EFX_FILTER_PRI_AUTO, i, true);
|
||||
|
|
|
@ -2809,7 +2809,7 @@ static void efx_reset_work(struct work_struct *data)
|
|||
unsigned long pending;
|
||||
enum reset_type method;
|
||||
|
||||
pending = ACCESS_ONCE(efx->reset_pending);
|
||||
pending = READ_ONCE(efx->reset_pending);
|
||||
method = fls(pending) - 1;
|
||||
|
||||
if (method == RESET_TYPE_MC_BIST)
|
||||
|
@ -2874,7 +2874,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
|
|||
/* If we're not READY then just leave the flags set as the cue
|
||||
* to abort probing or reschedule the reset later.
|
||||
*/
|
||||
if (ACCESS_ONCE(efx->state) != STATE_READY)
|
||||
if (READ_ONCE(efx->state) != STATE_READY)
|
||||
return;
|
||||
|
||||
/* efx_process_channel() will no longer read events once a
|
||||
|
|
|
@ -2545,7 +2545,7 @@ static void ef4_reset_work(struct work_struct *data)
|
|||
unsigned long pending;
|
||||
enum reset_type method;
|
||||
|
||||
pending = ACCESS_ONCE(efx->reset_pending);
|
||||
pending = READ_ONCE(efx->reset_pending);
|
||||
method = fls(pending) - 1;
|
||||
|
||||
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
|
||||
|
@ -2605,7 +2605,7 @@ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
|
|||
/* If we're not READY then just leave the flags set as the cue
|
||||
* to abort probing or reschedule the reset later.
|
||||
*/
|
||||
if (ACCESS_ONCE(efx->state) != STATE_READY)
|
||||
if (READ_ONCE(efx->state) != STATE_READY)
|
||||
return;
|
||||
|
||||
queue_work(reset_workqueue, &efx->reset_work);
|
||||
|
|
|
@ -452,7 +452,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
|
|||
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
|
||||
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
|
||||
|
||||
if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
|
||||
if (!likely(READ_ONCE(efx->irq_soft_enabled)))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* Check to see if we have a serious error condition */
|
||||
|
@ -1372,7 +1372,7 @@ static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx)
|
|||
ef4_oword_t reg;
|
||||
int link_speed, isolate;
|
||||
|
||||
isolate = !!ACCESS_ONCE(efx->reset_pending);
|
||||
isolate = !!READ_ONCE(efx->reset_pending);
|
||||
|
||||
switch (link_state->speed) {
|
||||
case 10000: link_speed = 3; break;
|
||||
|
|
|
@ -834,7 +834,7 @@ ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
|
|||
struct ef4_nic *efx = channel->efx;
|
||||
int tx_packets = 0;
|
||||
|
||||
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
|
||||
if (unlikely(READ_ONCE(efx->reset_pending)))
|
||||
return 0;
|
||||
|
||||
if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
|
||||
|
@ -990,7 +990,7 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
|
|||
struct ef4_rx_queue *rx_queue;
|
||||
struct ef4_nic *efx = channel->efx;
|
||||
|
||||
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
|
||||
if (unlikely(READ_ONCE(efx->reset_pending)))
|
||||
return;
|
||||
|
||||
rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
|
||||
|
@ -1504,7 +1504,7 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
|
|||
irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct ef4_nic *efx = dev_id;
|
||||
bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
|
||||
bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
|
||||
ef4_oword_t *int_ker = efx->irq_status.addr;
|
||||
irqreturn_t result = IRQ_NONE;
|
||||
struct ef4_channel *channel;
|
||||
|
@ -1596,7 +1596,7 @@ irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
|
|||
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
|
||||
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
|
||||
|
||||
if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
|
||||
if (!likely(READ_ONCE(efx->irq_soft_enabled)))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* Handle non-event-queue sources */
|
||||
|
|
|
@ -83,7 +83,7 @@ static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_
|
|||
static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue,
|
||||
unsigned int write_count)
|
||||
{
|
||||
unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
|
||||
unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
|
||||
|
||||
if (empty_read_count == 0)
|
||||
return false;
|
||||
|
@ -464,11 +464,11 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx);
|
|||
|
||||
static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel)
|
||||
{
|
||||
return ACCESS_ONCE(channel->event_test_cpu);
|
||||
return READ_ONCE(channel->event_test_cpu);
|
||||
}
|
||||
static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx)
|
||||
{
|
||||
return ACCESS_ONCE(efx->last_irq_cpu);
|
||||
return READ_ONCE(efx->last_irq_cpu);
|
||||
}
|
||||
|
||||
/* Global Resources */
|
||||
|
|
|
@ -134,8 +134,8 @@ static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
|
|||
*/
|
||||
netif_tx_stop_queue(txq1->core_txq);
|
||||
smp_mb();
|
||||
txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
|
||||
txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
|
||||
txq1->old_read_count = READ_ONCE(txq1->read_count);
|
||||
txq2->old_read_count = READ_ONCE(txq2->read_count);
|
||||
|
||||
fill_level = max(txq1->insert_count - txq1->old_read_count,
|
||||
txq2->insert_count - txq2->old_read_count);
|
||||
|
@ -524,7 +524,7 @@ void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
|
|||
|
||||
/* Check whether the hardware queue is now empty */
|
||||
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
|
||||
tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
|
||||
tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
|
||||
if (tx_queue->read_count == tx_queue->old_write_count) {
|
||||
smp_mb();
|
||||
tx_queue->empty_read_count =
|
||||
|
|
|
@ -827,7 +827,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
|
|||
struct efx_nic *efx = channel->efx;
|
||||
int tx_packets = 0;
|
||||
|
||||
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
|
||||
if (unlikely(READ_ONCE(efx->reset_pending)))
|
||||
return 0;
|
||||
|
||||
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
|
||||
|
@ -979,7 +979,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
|
|||
struct efx_rx_queue *rx_queue;
|
||||
struct efx_nic *efx = channel->efx;
|
||||
|
||||
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
|
||||
if (unlikely(READ_ONCE(efx->reset_pending)))
|
||||
return;
|
||||
|
||||
rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
|
||||
|
@ -1520,7 +1520,7 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
|
|||
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct efx_nic *efx = dev_id;
|
||||
bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
|
||||
bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
|
||||
efx_oword_t *int_ker = efx->irq_status.addr;
|
||||
irqreturn_t result = IRQ_NONE;
|
||||
struct efx_channel *channel;
|
||||
|
@ -1612,7 +1612,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
|
|||
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
|
||||
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
|
||||
|
||||
if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
|
||||
if (!likely(READ_ONCE(efx->irq_soft_enabled)))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* Handle non-event-queue sources */
|
||||
|
|
|
@ -81,7 +81,7 @@ static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
|
|||
static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
|
||||
unsigned int write_count)
|
||||
{
|
||||
unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
|
||||
unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
|
||||
|
||||
if (empty_read_count == 0)
|
||||
return false;
|
||||
|
@ -617,11 +617,11 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
|
|||
|
||||
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
|
||||
{
|
||||
return ACCESS_ONCE(channel->event_test_cpu);
|
||||
return READ_ONCE(channel->event_test_cpu);
|
||||
}
|
||||
static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
|
||||
{
|
||||
return ACCESS_ONCE(efx->last_irq_cpu);
|
||||
return READ_ONCE(efx->last_irq_cpu);
|
||||
}
|
||||
|
||||
/* Global Resources */
|
||||
|
|
|
@ -658,7 +658,7 @@ static void efx_ptp_send_times(struct efx_nic *efx,
|
|||
|
||||
/* Write host time for specified period or until MC is done */
|
||||
while ((timespec64_compare(&now.ts_real, &limit) < 0) &&
|
||||
ACCESS_ONCE(*mc_running)) {
|
||||
READ_ONCE(*mc_running)) {
|
||||
struct timespec64 update_time;
|
||||
unsigned int host_time;
|
||||
|
||||
|
@ -668,7 +668,7 @@ static void efx_ptp_send_times(struct efx_nic *efx,
|
|||
do {
|
||||
pps_get_ts(&now);
|
||||
} while ((timespec64_compare(&now.ts_real, &update_time) < 0) &&
|
||||
ACCESS_ONCE(*mc_running));
|
||||
READ_ONCE(*mc_running));
|
||||
|
||||
/* Synchronise NIC with single word of time only */
|
||||
host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
|
||||
|
@ -832,14 +832,14 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
|
|||
ptp->start.dma_addr);
|
||||
|
||||
/* Clear flag that signals MC ready */
|
||||
ACCESS_ONCE(*start) = 0;
|
||||
WRITE_ONCE(*start, 0);
|
||||
rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
|
||||
MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
|
||||
EFX_WARN_ON_ONCE_PARANOID(rc);
|
||||
|
||||
/* Wait for start from MCDI (or timeout) */
|
||||
timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
|
||||
while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) {
|
||||
while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) {
|
||||
udelay(20); /* Usually start MCDI execution quickly */
|
||||
loops++;
|
||||
}
|
||||
|
@ -849,7 +849,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
|
|||
if (!time_before(jiffies, timeout))
|
||||
++ptp->sync_timeouts;
|
||||
|
||||
if (ACCESS_ONCE(*start))
|
||||
if (READ_ONCE(*start))
|
||||
efx_ptp_send_times(efx, &last_time);
|
||||
|
||||
/* Collect results */
|
||||
|
|
|
@ -136,8 +136,8 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
|
|||
*/
|
||||
netif_tx_stop_queue(txq1->core_txq);
|
||||
smp_mb();
|
||||
txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
|
||||
txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
|
||||
txq1->old_read_count = READ_ONCE(txq1->read_count);
|
||||
txq2->old_read_count = READ_ONCE(txq2->read_count);
|
||||
|
||||
fill_level = max(txq1->insert_count - txq1->old_read_count,
|
||||
txq2->insert_count - txq2->old_read_count);
|
||||
|
@ -752,7 +752,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
|
|||
|
||||
/* Check whether the hardware queue is now empty */
|
||||
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
|
||||
tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
|
||||
tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
|
||||
if (tx_queue->read_count == tx_queue->old_write_count) {
|
||||
smp_mb();
|
||||
tx_queue->empty_read_count =
|
||||
|
|
|
@ -6245,7 +6245,7 @@ static void niu_get_rx_stats(struct niu *np,
|
|||
|
||||
pkts = dropped = errors = bytes = 0;
|
||||
|
||||
rx_rings = ACCESS_ONCE(np->rx_rings);
|
||||
rx_rings = READ_ONCE(np->rx_rings);
|
||||
if (!rx_rings)
|
||||
goto no_rings;
|
||||
|
||||
|
@ -6276,7 +6276,7 @@ static void niu_get_tx_stats(struct niu *np,
|
|||
|
||||
pkts = errors = bytes = 0;
|
||||
|
||||
tx_rings = ACCESS_ONCE(np->tx_rings);
|
||||
tx_rings = READ_ONCE(np->tx_rings);
|
||||
if (!tx_rings)
|
||||
goto no_rings;
|
||||
|
||||
|
|
|
@ -257,7 +257,7 @@ static struct tap_queue *tap_get_queue(struct tap_dev *tap,
|
|||
* and validate that the result isn't NULL - in case we are
|
||||
* racing against queue removal.
|
||||
*/
|
||||
int numvtaps = ACCESS_ONCE(tap->numvtaps);
|
||||
int numvtaps = READ_ONCE(tap->numvtaps);
|
||||
__u32 rxq;
|
||||
|
||||
if (!numvtaps)
|
||||
|
|
|
@ -469,7 +469,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
u32 numqueues = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
numqueues = ACCESS_ONCE(tun->numqueues);
|
||||
numqueues = READ_ONCE(tun->numqueues);
|
||||
|
||||
txq = __skb_get_hash_symmetric(skb);
|
||||
if (txq) {
|
||||
|
@ -864,7 +864,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
rcu_read_lock();
|
||||
tfile = rcu_dereference(tun->tfiles[txq]);
|
||||
numqueues = ACCESS_ONCE(tun->numqueues);
|
||||
numqueues = READ_ONCE(tun->numqueues);
|
||||
|
||||
/* Drop packet if interface is not attached */
|
||||
if (txq >= numqueues)
|
||||
|
|
|
@ -500,13 +500,13 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
|
|||
|
||||
tx_status = &desc->ud.ds_tx5212.tx_stat;
|
||||
|
||||
txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
|
||||
txstat1 = READ_ONCE(tx_status->tx_status_1);
|
||||
|
||||
/* No frame has been send or error */
|
||||
if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE)))
|
||||
return -EINPROGRESS;
|
||||
|
||||
txstat0 = ACCESS_ONCE(tx_status->tx_status_0);
|
||||
txstat0 = READ_ONCE(tx_status->tx_status_0);
|
||||
|
||||
/*
|
||||
* Get descriptor status
|
||||
|
@ -700,14 +700,14 @@ ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
|
|||
u32 rxstat0, rxstat1;
|
||||
|
||||
rx_status = &desc->ud.ds_rx.rx_stat;
|
||||
rxstat1 = ACCESS_ONCE(rx_status->rx_status_1);
|
||||
rxstat1 = READ_ONCE(rx_status->rx_status_1);
|
||||
|
||||
/* No frame received / not ready */
|
||||
if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE)))
|
||||
return -EINPROGRESS;
|
||||
|
||||
memset(rs, 0, sizeof(struct ath5k_rx_status));
|
||||
rxstat0 = ACCESS_ONCE(rx_status->rx_status_0);
|
||||
rxstat0 = READ_ONCE(rx_status->rx_status_0);
|
||||
|
||||
/*
|
||||
* Frame receive status
|
||||
|
|
|
@ -3628,7 +3628,7 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
|
|||
|
||||
bus->dpc_running = true;
|
||||
wmb();
|
||||
while (ACCESS_ONCE(bus->dpc_triggered)) {
|
||||
while (READ_ONCE(bus->dpc_triggered)) {
|
||||
bus->dpc_triggered = false;
|
||||
brcmf_sdio_dpc(bus);
|
||||
bus->idlecount = 0;
|
||||
|
|
|
@ -1118,7 +1118,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
|
|||
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
bool calibrating = ACCESS_ONCE(mvm->calibrating);
|
||||
bool calibrating = READ_ONCE(mvm->calibrating);
|
||||
|
||||
if (state)
|
||||
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
|
||||
|
|
|
@ -652,7 +652,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
return -1;
|
||||
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
|
||||
is_multicast_ether_addr(hdr->addr1)) {
|
||||
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
|
||||
u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
|
||||
|
||||
if (ap_sta_id != IWL_MVM_INVALID_STA)
|
||||
sta_id = ap_sta_id;
|
||||
|
@ -700,7 +700,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
|
||||
tcp_hdrlen(skb);
|
||||
|
||||
dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
|
||||
dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
|
||||
|
||||
if (!sta->max_amsdu_len ||
|
||||
!ieee80211_is_data_qos(hdr->frame_control) ||
|
||||
|
|
|
@ -1247,7 +1247,7 @@ restart:
|
|||
spin_lock(&rxq->lock);
|
||||
/* uCode's read index (stored in shared DRAM) indicates the last Rx
|
||||
* buffer that the driver may process (last buffer filled by ucode). */
|
||||
r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
|
||||
r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
|
||||
i = rxq->read;
|
||||
|
||||
/* W/A 9000 device step A0 wrap-around bug */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue