Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 | /* * Copyright (c) 2016 Cadence Design Systems, Inc. * SPDX-License-Identifier: Apache-2.0 */ #include <arch/xtensa/xtensa_context.h> /** * * @brief Atomically clear a memory location * * This routine atomically clears the contents of <target> and returns the old * value that was in <target>. * * This routine can be used from both task and interrupt level. * * @return Contents of <target> before the atomic operation * * atomic_val_t atomic_clear * ( * atomic_t *target /@ memory location to clear @/ * ) */ .global atomic_clear .type atomic_clear,@function .align 4 atomic_clear: ENTRY(48) movi a4, 0 .L_LoopClear: l32ai a3, a2, 0 wsr a3, scompare1 s32c1i a4, a2, 0 bne a3, a4, .L_LoopClear mov a2, a3 RET(48) /** * * @brief Atomically set a memory location * * This routine atomically sets the contents of <target> to <value> and returns * the old value that was in <target>. * * This routine can be used from both task and interrupt level. * * @return Contents of <target> before the atomic operation * * atomic_val_t atomic_set * ( * atomic_t *target, /@ memory location to set @/ * atomic_val_t value /@ set with this value @/ * ) * */ .global atomic_set .type atomic_set,@function .align 4 atomic_set: ENTRY(48) .L_LoopSet: l32ai a4, a2, 0 wsr a4, scompare1 s32c1i a3, a2, 0 bne a3, a4, .L_LoopSet mov a2, a3 RET(48) /** * * @brief Get the value of a shared memory atomically * * This routine atomically retrieves the value in *target * * long atomic_get * ( * atomic_t * target /@ address of atom to be retrieved @/ * ) * * @return value read from address target. * */ .global atomic_get .type atomic_get,@function .align 4 atomic_get: ENTRY(48) l32ai a2, a2, 0 RET(48) /** * * @brief Atomically increment a memory location * * This routine atomically increments the value in <target>. The operation is * done using unsigned integer arithmetic. Various CPU architectures may * impose restrictions with regards to the alignment and cache attributes of * the atomic_t type. * * This routine can be used from both task and interrupt level. * * @return Contents of <target> before the atomic operation * * atomic_val_t atomic_inc * ( * atomic_t *target, /@ memory location to increment @/ * ) * */ .global atomic_inc .type atomic_inc,@function .align 4 atomic_inc: ENTRY(48) .L_LoopInc: l32ai a3, a2, 0 wsr a3, scompare1 addi a4, a3, 1 s32c1i a4, a2, 0 bne a3, a4, .L_LoopInc mov a2, a3 RET(48) /** * * @brief Atomically add a value to a memory location * * This routine atomically adds the contents of <target> and <value>, placing * the result in <target>. The operation is done using signed integer * arithmetic. Various CPU architectures may impose restrictions with regards * to the alignment and cache attributes of the atomic_t type. * * This routine can be used from both task and interrupt level. * * @return Contents of <target> before the atomic operation * * atomic_val_t atomic_add * ( * atomic_t *target, /@ memory location to add to @/ * atomic_val_t value /@ value to add @/ * ) */ .global atomic_add .type atomic_add,@function .align 4 atomic_add: ENTRY(48) .L_LoopAdd: l32ai a4, a2, 0 wsr a4, scompare1 add a5, a3, a4 s32c1i a5, a2, 0 bne a5, a4, .L_LoopAdd mov a2, a5 RET(48) /** * * @brief Atomically decrement a memory location * * This routine atomically decrements the value in <target>. The operation is * done using unsigned integer arithmetic. Various CPU architectures may impose * restrictions with regards to the alignment and cache attributes of the * atomic_t type. * * This routine can be used from both task and interrupt level. * * @return Contents of <target> before the atomic operation * * atomic_val_t atomic_dec * ( * atomic_t *target, /@ memory location to decrement @/ * ) * */ .global atomic_dec .type atomic_dec,@function .align 4 atomic_dec: ENTRY(48) .L_LoopDec: l32ai a3, a2, 0 wsr a3, scompare1 addi a4, a3, -1 s32c1i a4, a2, 0 bne a3, a4, .L_LoopDec mov a2, a3 RET(48) /** * * @brief Atomically subtract a value from a memory location * * This routine atomically subtracts <value> from the contents of <target>, * placing the result in <target>. The operation is done using signed integer * arithmetic. Various CPU architectures may impose restrictions with regards to * the alignment and cache attributes of the atomic_t type. * * This routine can be used from both task and interrupt level. * * @return Contents of <target> before the atomic operation * * atomic_val_t atomic_sub * ( * atomic_t *target, /@ memory location to subtract from @/ * atomic_val_t value /@ value to subtract @/ * ) * */ .global atomic_sub .type atomic_sub,@function .align 4 atomic_sub: ENTRY(48) .L_LoopSub: l32ai a4, a2, 0 wsr a4, scompare1 sub a5, a4, a3 s32c1i a5, a2, 0 bne a5, a4, .L_LoopSub mov a2, a5 RET(48) /** * * @brief Atomically perform a bitwise NAND on a memory location * * This routine atomically performs a bitwise NAND operation of the contents of * <target> and <value>, placing the result in <target>. * Various CPU architectures may impose restrictions with regards to the * alignment and cache attributes of the atomic_t type. * * This routine can be used from both task and interrupt level. * * @return Contents of <target> before the atomic operation * * atomic_val_t atomic_nand * ( * atomic_t *target, /@ memory location to NAND @/ * atomic_val_t value /@ NAND with this value @/ * ) * */ .global atomic_nand .type atomic_nand,@function .align 4 atomic_nand: ENTRY(48) .L_LoopNand: l32ai a4, a2, 0 wsr a4, scompare1 and a5, a3, a4 neg a5, a5 addi a5, a5, -1 s32c1i a5, a2, 0 bne a5, a4, .L_LoopNand mov a2, a4 RET(48) /** * * @brief Atomically perform a bitwise AND on a memory location * * This routine atomically performs a bitwise AND operation of the contents of * <target> and <value>, placing the result in <target>. * Various CPU architectures may impose restrictions with regards to the * alignment and cache attributes of the atomic_t type. * * This routine can be used from both task and interrupt level. * * @return Contents of <target> before the atomic operation * * atomic_val_t atomic_and * ( * atomic_t *target, /@ memory location to AND @/ * atomic_val_t value /@ AND with this value @/ * ) * */ .global atomic_and .type atomic_and,@function .align 4 atomic_and: ENTRY(48) .L_LoopAnd: l32ai a4, a2, 0 wsr a4, scompare1 and a5, a3, a4 s32c1i a5, a2, 0 bne a5, a4, .L_LoopAnd mov a2, a4 RET(48) /** * * @brief Atomically perform a bitwise OR on memory location * * This routine atomically performs a bitwise OR operation of the contents of * <target> and <value>, placing the result in <target>. * Various CPU architectures may impose restrictions with regards to the * alignment and cache attributes of the atomic_t type. * * This routine can be used from both task and interrupt level. * * @return Contents of <target> before the atomic operation * * atomic_val_t atomic_or * ( * atomic_t *target, /@ memory location to OR @/ * atomic_val_t value /@ OR with this value @/ * ) * */ .global atomic_or .type atomic_or,@function .align 4 atomic_or: ENTRY(48) .L_LoopOr: l32ai a4, a2, 0 wsr a4, scompare1 or a5, a3, a4 s32c1i a5, a2, 0 bne a4, a5, .L_LoopOr mov a2, a4 RET(48) /** * * @brief Atomically perform a bitwise XOR on a memory location * * This routine atomically performs a bitwise XOR operation of the contents of * <target> and <value>, placing the result in <target>. * Various CPU architectures may impose restrictions with regards to the * alignment and cache attributes of the atomic_t type. * * This routine can be used from both task and interrupt level. * * @return Contents of <target> before the atomic operation * * atomic_val_t atomic_xor * ( * atomic_t *target, /@ memory location to XOR @/ * atomic_val_t value /@ XOR with this value @/ * ) * */ .global atomic_xor .type atomic_xor,@function .align 4 atomic_xor: ENTRY(48) .L_LoopXor: l32ai a4, a2, 0 wsr a4, scompare1 xor a5, a3, a4 s32c1i a5, a2, 0 bne a5, a4, .L_LoopXor mov a2, a4 RET(48) /** * * @brief Atomically compare-and-swap the contents of a memory location * * This routine performs an atomic compare-and-swap. testing that the contents * of <target> contains <oldValue>, and if it does, setting the value of * <target> to <newValue>. Various CPU architectures may impose restrictions * with regards to the alignment and cache attributes of the atomic_t type. * * This routine can be used from both task and interrupt level. * * @return 1 if the swap is actually executed, 0 otherwise. * * int atomic_cas * ( * atomic_t *target, /@ memory location to compare-and-swap @/ * atomic_val_t oldValue, /@ compare to this value @/ * atomic_val_t newValue, /@ swap with this value @/ * ) * */ .global atomic_cas .type atomic_cas,@function .align 4 atomic_cas: ENTRY(48) l32ai a5, a2, 0 beq a5, a3, 2f 1: movi a2, 0 j 3f 2: wsr a5, scompare1 s32c1i a4, a2, 0 bne a4, a5, 1b movi a2, 1 3: RET(48) |