|
10 | 10 |
|
11 | 11 | #define SEQLOCK_WRITER 1U
|
12 | 12 |
|
13 |
| -#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) |
| 13 | +#if defined(__i386__) || defined(__x86_64__) |
14 | 14 | #define spin_wait() atomic_thread_fence(memory_order_seq_cst)
|
| 15 | +#elif defined(__aarch64__) |
| 16 | +#define spin_wait() __asm__ __volatile__("isb\n") |
15 | 17 | #else
|
16 | 18 | #define spin_wait() ((void) 0)
|
17 | 19 | #endif
|
18 | 20 |
|
| 21 | +#if defined(__aarch64__) |
19 | 22 | #define SEVL() sevl()
|
20 | 23 | static inline void sevl(void)
|
21 | 24 | {
|
22 |
| - atomic_thread_fence(memory_order_seq_cst); |
| 25 | + __asm__ volatile("sevl" : : :); |
23 | 26 | }
|
24 | 27 | #define WFE() wfe()
|
25 | 28 | static inline int wfe(void)
|
26 | 29 | {
|
27 |
| - atomic_thread_fence(memory_order_seq_cst); |
| 30 | + __asm__ volatile("wfe" : : : "memory"); |
28 | 31 | return 1;
|
29 | 32 | }
|
30 | 33 | #define LDX(a, b) ldx((a), (b))
|
31 |
| -static inline uint32_t ldx(const _Atomic uint32_t *var, int mm) |
| 34 | +static inline uint32_t ldx(const uint8_t *var, int mm) |
32 | 35 | {
|
33 | 36 | uint32_t old;
|
34 |
| - |
35 | 37 | if (mm == memory_order_acquire)
|
36 |
| - old = atomic_load_explicit(var, memory_order_acquire); |
| 38 | + __asm volatile("ldaxrb %w0, [%1]" : "=&r"(old) : "r"(var) : "memory"); |
37 | 39 | else if (mm == memory_order_relaxed)
|
38 |
| - old = atomic_load_explicit(var, memory_order_relaxed); |
| 40 | + __asm volatile("ldxrb %w0, [%1]" : "=&r"(old) : "r"(var) : "memory"); |
39 | 41 | else
|
40 | 42 | abort();
|
41 |
| - |
42 | 43 | return old;
|
43 | 44 | }
|
| 45 | +#else /* generic */ |
| 46 | +#define SEVL() (void) 0 |
| 47 | +#define WFE() 1 |
| 48 | +#define LDX(a, b) atomic_load_explicit((a), (b)) |
| 49 | +#endif |
44 | 50 |
|
45 | 51 | #define UNLIKELY(x) __builtin_expect(!!(x), 0)
|
46 | 52 |
|
|
0 commit comments