//自选锁
static inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
#define raw_spin_lock(lock) _raw_spin_lock(lock)
void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
{
__raw_spin_lock(lock);
}
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
#define preempt_disable() \
do { \
inc_preempt_count(); \
barrier(); \
} while (0)
#define inc_preempt_count() add_preempt_count(1)
# define add_preempt_count(val) do { preempt_count() += (val); } while (0)
#define preempt_count() (current_thread_info()->preempt_count)
#define barrier() __asm__ __volatile__("": : :"memory")
# define spin_acquire(l, s, t, i) do { } while (0)
#define LOCK_CONTENDED(_lock, try, lock) \
lock(_lock)
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{
__acquire(lock);
arch_spin_lock(&lock->raw_lock);
}
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
#ifdef CONFIG_CPU_32v6K
" wfene\n"
#endif
" strexeq %0, %2, [%1]\n"
" teqeq %0, #0\n"
" bne 1b" //yuan di xuan zhuan
: "=&r" (tmp)
: "r" (&lock->lock), "r" (1)
: "cc");
smp_mb();
}
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define mb() do { dsb(); outer_sync(); } while (0)
#define rmb() dsb()
#define wmb() do { dsb(st); outer_sync(); } while (0)
#define dma_rmb() dmb(osh)
#define dma_wmb() dmb(oshst)
#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
//读写锁
#define write_lock(lock) _raw_write_lock(lock)
#define _raw_write_lock(lock) __raw_write_lock(lock)
static inline void __raw_write_lock(rwlock_t *lock)
{
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
WFE("ne")
" strexeq %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "cc");
smp_mb();
}
#define read_lock(lock) _raw_read_lock(lock)
void __lockfunc _raw_read_lock(rwlock_t *lock)
{
__raw_read_lock(lock);
}
static inline void __raw_read_lock(rwlock_t *lock)
{
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
}
void do_raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
arch_read_lock(&lock->raw_lock);
}
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
WFE("mi")
" rsbpls %0, %1, #0\n"
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
: "cc");
smp_mb();
}
//顺序锁
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
write_seqcount_begin(&sl->seqcount);
}
static inline void write_seqcount_begin(seqcount_t *s)
{
write_seqcount_begin_nested(s, 0);
}
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
raw_write_seqcount_begin(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
}
static inline void raw_write_seqcount_begin(seqcount_t *s)
{
s->sequence++;
smp_wmb();
}
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
return read_seqcount_begin(&sl->seqcount);
}
static inline unsigned read_seqcount_begin(const seqcount_t *s)
{
seqcount_lockdep_reader_access(s);
return raw_read_seqcount_begin(s);
}
# define seqcount_lockdep_reader_access(x)
static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
{
unsigned ret = __read_seqcount_begin(s);
smp_rmb();
return ret;
}
static inline unsigned __read_seqcount_begin(const seqcount_t *s)
{
unsigned ret;
repeat:
ret = ACCESS_ONCE(s->sequence);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
return ret;
}
#define cpu_relax() barrier()
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
return read_seqcount_retry(&sl->seqcount, start);
}
static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
return __read_seqcount_retry(s, start);
}
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
{
return unlikely(s->sequence != start);
}