aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2017-10-16 10:27:20 +0100
committerWill Deacon <will.deacon@arm.com>2017-10-16 10:27:20 +0100
commit61483dd12ffeb625f48a6b2fedbf5eb9e1d58d12 (patch)
tree7beca6c702288d07f45e842262ef6a619a34fca4
parent658230d9740e71b66a065bc963a39c8633e12b4c (diff)
downloadqrwlock-rmem-61483dd12ffeb625f48a6b2fedbf5eb9e1d58d12.tar.gz
Everything filled in...
Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--Makefile2
-rw-r--r--kernel.h147
2 files changed, 126 insertions, 23 deletions
diff --git a/Makefile b/Makefile
index 2c2a6d1..bc17b90 100644
--- a/Makefile
+++ b/Makefile
@@ -3,7 +3,7 @@ CFLAGS=-Wall -O2 -fno-strict-aliasing
TARGET=qrwlock
OBJS= main.o qrwlock.o
-$(TARGET) : $(OBJS)
+$(TARGET) : $(OBJS) kernel.h
all: $(TARGET)
clean:
diff --git a/kernel.h b/kernel.h
index 8c061e3..66ba11e 100644
--- a/kernel.h
+++ b/kernel.h
@@ -222,6 +222,51 @@ do { \
#define smp_load_acquire(p) __smp_load_acquire(p)
/* smp_cond_load_acquire */
+#define __CMPWAIT_CASE(w, sz, name) \
+static inline void __cmpwait_case_##name(volatile void *ptr, \
+ unsigned long val) \
+{ \
+ unsigned long tmp; \
+ \
+ asm volatile( \
+ " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
+ " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
+ " cbnz %" #w "[tmp], 1f\n" \
+/* " wfe\n" */ /* TODO: wfe support */ \
+ "1:" \
+ : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr) \
+ : [val] "r" (val)); \
+}
+
+__CMPWAIT_CASE(w, b, 1);
+__CMPWAIT_CASE(w, h, 2);
+__CMPWAIT_CASE(w, , 4);
+__CMPWAIT_CASE( , , 8);
+
+#define __CMPWAIT_GEN(sfx) \
+static inline void __cmpwait##sfx(volatile void *ptr, \
+ unsigned long val, \
+ int size) \
+{ \
+ switch (size) { \
+ case 1: \
+ return __cmpwait_case##sfx##_1(ptr, (u8)val); \
+ case 2: \
+ return __cmpwait_case##sfx##_2(ptr, (u16)val); \
+ case 4: \
+ return __cmpwait_case##sfx##_4(ptr, val); \
+ case 8: \
+ return __cmpwait_case##sfx##_8(ptr, val); \
+ default: \
+ abort(); /* hack for userspace */ \
+ } \
+}
+
+__CMPWAIT_GEN()
+
+#define __cmpwait_relaxed(ptr, val) \
+ __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
+
#define smp_cond_load_acquire(ptr, cond_expr) \
({ \
typeof(ptr) __PTR = (ptr); \
@@ -230,7 +275,7 @@ do { \
VAL = smp_load_acquire(__PTR); \
if (cond_expr) \
break; \
-/* TODO __cmpwait_relaxed(__PTR, VAL); */ \
+ __cmpwait_relaxed(__PTR, VAL); \
} \
VAL; \
})
@@ -238,27 +283,48 @@ do { \
/* Atomics */
#define atomic_read(v) READ_ONCE((v)->counter)
-static inline void atomic_add(int i, atomic_t *v)
-{
- /* TODO */
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- /* TODO */
-}
+#define ATOMIC_OP(op, asm_op) \
+static inline void \
+atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "\n" \
+" prfm pstl1strm, %2\n" \
+"1: ldxr %w0, %2\n" \
+" " #asm_op " %w0, %w0, %w3\n" \
+" stxr %w1, %w0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i)); \
+} \
-static inline int atomic_add_return_acquire(int i, atomic_t *v)
-{
- /* TODO */
- return i;
-}
+#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
+static inline int \
+atomic_##op##_return##name(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "_return" #name "\n" \
+" prfm pstl1strm, %2\n" \
+"1: ld" #acq "xr %w0, %2\n" \
+" " #asm_op " %w0, %w0, %w3\n" \
+" st" #rel "xr %w1, %w0, %2\n" \
+" cbnz %w1, 1b\n" \
+" " #mb \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i) \
+ : cl); \
+ \
+ return result; \
+} \
-static inline int atomic_sub_return_release(int i, atomic_t *v)
-{
- /* TODO */
- return i;
-}
+ATOMIC_OP(add, add)
+ATOMIC_OP(sub, sub)
+ATOMIC_OP_RETURN(_acquire, , a, , "memory", add, add)
+ATOMIC_OP_RETURN(_release, , , l, "memory", sub, sub)
#define atomic_cmpxchg_relaxed(v, old, new) \
cmpxchg_relaxed(&((v)->counter), (old), (new))
@@ -268,14 +334,51 @@ static inline int atomic_sub_return_release(int i, atomic_t *v)
#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
/* Ticket spinlock */
+#define TICKET_SHIFT 16
+
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
- /* TODO */
+ unsigned int tmp;
+ arch_spinlock_t lockval, newval;
+
+ asm volatile(
+ /* Atomically increment the next ticket. */
+" prfm pstl1strm, %3\n"
+"1: ldaxr %w0, %3\n"
+" add %w1, %w0, %w5\n"
+" stxr %w2, %w1, %3\n"
+" cbnz %w2, 1b\n"
+
+ /* Did we get the lock? */
+" eor %w1, %w0, %w0, ror #16\n"
+" cbz %w1, 3f\n"
+ /*
+ * No: spin on the owner. Send a local event to avoid missing an
+ * unlock before the exclusive load.
+ */
+/*" sevl\n" TODO: wfe/sev support */
+"2:"/* wfe\n" */
+" ldaxrh %w2, %4\n"
+" eor %w1, %w2, %w0, lsr #16\n"
+" cbnz %w1, 2b\n"
+ /* We got the lock. Critical section starts here. */
+"3:"
+ : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
+ : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
+ : "memory");
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- /* TODO */
+ unsigned long tmp;
+
+ asm volatile(
+ " ldrh %w1, %0\n"
+ " add %w1, %w1, #1\n"
+ " stlrh %w1, %0"
+ : "=Q" (lock->owner), "=&r" (tmp)
+ :
+ : "memory");
}
#endif /* __KERNEL_H */