Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 | /** * Copyright (c) 2021 Intel Corporation * SPDX-License-Identifier: Apache-2.0 */ #ifndef ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_ #define ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_ /* Included from <sys/atomic.h> */ /* Recent GCC versions actually do have working atomics support on * Xtensa (and so should work with CONFIG_ATOMIC_OPERATIONS_BUILTIN), * but existing versions of Xtensa's XCC do not. So we define an * inline implementation here that is more or less identical */ static ALWAYS_INLINE atomic_val_t atomic_get(const atomic_t *target) { atomic_val_t ret; /* Actual Xtensa hardware seems to have only in-order * pipelines, but the architecture does define a barrier load, * so use it. There is a matching s32ri instruction, but * nothing in the Zephyr API requires a barrier store (all the * atomic write ops have exchange semantics. */ __asm__ volatile("l32ai %0, %1, 0" : "=r"(ret) : "r"(target) : "memory"); return ret; } static ALWAYS_INLINE atomic_val_t xtensa_cas(atomic_t *addr, atomic_val_t oldval, atomic_val_t newval) { __asm__ volatile("wsr %1, SCOMPARE1; s32c1i %0, %2, 0" : "+r"(newval), "+r"(oldval) : "r"(addr) : "memory"); return newval; /* got swapped with the old memory by s32c1i */ } static ALWAYS_INLINE bool atomic_cas(atomic_t *target, atomic_val_t oldval, atomic_val_t newval) { return oldval == xtensa_cas(target, oldval, newval); } static ALWAYS_INLINE bool atomic_ptr_cas(atomic_ptr_t *target, void *oldval, void *newval) { return (atomic_val_t) oldval == xtensa_cas((atomic_t *) target, (atomic_val_t) oldval, (atomic_val_t) newval); } /* Generates an atomic exchange sequence that swaps the value at * address "target", whose old value is read to be "cur", with the * specified expression. Evaluates to the old value which was * atomically replaced. */ #define Z__GEN_ATOMXCHG(expr) ({ \ atomic_val_t res, cur; \ do { \ cur = *target; \ res = xtensa_cas(target, cur, (expr)); \ } while (res != cur); \ res; }) static ALWAYS_INLINE atomic_val_t atomic_set(atomic_t *target, atomic_val_t value) { return Z__GEN_ATOMXCHG(value); } static ALWAYS_INLINE atomic_val_t atomic_add(atomic_t *target, atomic_val_t value) { return Z__GEN_ATOMXCHG(cur + value); } static ALWAYS_INLINE atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value) { return Z__GEN_ATOMXCHG(cur - value); } static ALWAYS_INLINE atomic_val_t atomic_inc(atomic_t *target) { return Z__GEN_ATOMXCHG(cur + 1); } static ALWAYS_INLINE atomic_val_t atomic_dec(atomic_t *target) { return Z__GEN_ATOMXCHG(cur - 1); } static ALWAYS_INLINE atomic_val_t atomic_or(atomic_t *target, atomic_val_t value) { return Z__GEN_ATOMXCHG(cur | value); } static ALWAYS_INLINE atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value) { return Z__GEN_ATOMXCHG(cur ^ value); } static ALWAYS_INLINE atomic_val_t atomic_and(atomic_t *target, atomic_val_t value) { return Z__GEN_ATOMXCHG(cur & value); } static ALWAYS_INLINE atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value) { return Z__GEN_ATOMXCHG(~(cur & value)); } static ALWAYS_INLINE void *atomic_ptr_get(const atomic_ptr_t *target) { return (void *) atomic_get((atomic_t *)target); } static ALWAYS_INLINE void *atomic_ptr_set(atomic_ptr_t *target, void *value) { return (void *) atomic_set((atomic_t *) target, (atomic_val_t) value); } static ALWAYS_INLINE atomic_val_t atomic_clear(atomic_t *target) { return atomic_set(target, 0); } static ALWAYS_INLINE void *atomic_ptr_clear(atomic_ptr_t *target) { return (void *) atomic_set((atomic_t *) target, 0); } #endif /* ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_ */ |