This avoid potential issues if callers were to loop on these
routines without some kind of memory barrier. Currently there
are no such users in-tree, but it seems better safe than sorry.
Also, in the tilepro case we read "current" before "next",
which gives us a slightly better guarantee that the lock was
actually unlocked at least momentarily if we return claiming
that it is not locked. None of the callers actually rely on
this behavior, as far as I know, however.
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
* to claim the lock is held, since it will be momentarily
* if not already. There's no need to wait for a "valid"
* lock->next_ticket to become available.
* to claim the lock is held, since it will be momentarily
* if not already. There's no need to wait for a "valid"
* lock->next_ticket to become available.
+ * Use READ_ONCE() to ensure that calling this in a loop is OK.
- return lock->next_ticket != lock->current_ticket;
+ int curr = READ_ONCE(lock->current_ticket);
+ int next = READ_ONCE(lock->next_ticket);
+
+ return next != curr;
}
void arch_spin_lock(arch_spinlock_t *lock);
}
void arch_spin_lock(arch_spinlock_t *lock);
#ifndef _ASM_TILE_SPINLOCK_64_H
#define _ASM_TILE_SPINLOCK_64_H
#ifndef _ASM_TILE_SPINLOCK_64_H
#define _ASM_TILE_SPINLOCK_64_H
+#include <linux/compiler.h>
+
/* Shifts and masks for the various fields in "lock". */
#define __ARCH_SPIN_CURRENT_SHIFT 17
#define __ARCH_SPIN_NEXT_MASK 0x7fff
/* Shifts and masks for the various fields in "lock". */
#define __ARCH_SPIN_CURRENT_SHIFT 17
#define __ARCH_SPIN_NEXT_MASK 0x7fff
/* The lock is locked if a task would have to wait to get it. */
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
/* The lock is locked if a task would have to wait to get it. */
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
+ /* Use READ_ONCE() to ensure that calling this in a loop is OK. */
+ u32 val = READ_ONCE(lock->lock);
return arch_spin_current(val) != arch_spin_next(val);
}
return arch_spin_current(val) != arch_spin_next(val);
}