Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 | /* * Copyright (c) 2018 Intel Corporation. * * SPDX-License-Identifier: Apache-2.0 */ #include <zephyr.h> #include <tc_util.h> #include <ztest.h> #include <kernel.h> #include <spinlock.h> BUILD_ASSERT(CONFIG_MP_NUM_CPUS > 1); #define CPU1_STACK_SIZE 1024 K_THREAD_STACK_DEFINE(cpu1_stack, CPU1_STACK_SIZE); struct k_thread cpu1_thread; static struct k_spinlock bounce_lock; volatile int bounce_owner, bounce_done; /** * @brief Tests for spinlock * * @defgroup kernel_spinlock_tests Spinlock Tests * * @ingroup all_tests * * @{ * @} */ /** * @brief Test basic spinlock * * @ingroup kernel_spinlock_tests * * @see k_spin_lock(), k_spin_unlock() */ void test_spinlock_basic(void) { k_spinlock_key_t key; static struct k_spinlock l; zassert_true(!l.locked, "Spinlock initialized to locked"); key = k_spin_lock(&l); zassert_true(l.locked, "Spinlock failed to lock"); k_spin_unlock(&l, key); zassert_true(!l.locked, "Spinlock failed to unlock"); } void bounce_once(int id) { int i, locked; k_spinlock_key_t key; /* Take the lock, check last owner and release if it was us. * Wait for us to get the lock "after" another CPU */ locked = 0; for (i = 0; i < 10000; i++) { key = k_spin_lock(&bounce_lock); if (bounce_owner != id) { locked = 1; break; } k_spin_unlock(&bounce_lock, key); k_busy_wait(100); } if (!locked && bounce_done) { return; } zassert_true(locked, "Other cpu did not get lock in 10000 tries"); /* Mark us as the owner, spin for a while validating that we * never see another owner write to the protected data. */ bounce_owner = id; for (i = 0; i < 100; i++) { zassert_true(bounce_owner == id, "Locked data changed"); } /* Release the lock */ k_spin_unlock(&bounce_lock, key); } void cpu1_fn(void *p1, void *p2, void *p3) { ARG_UNUSED(p1); ARG_UNUSED(p2); ARG_UNUSED(p3); while (1) { bounce_once(4321); } } /** * @brief Test spinlock with bounce * * @ingroup kernel_spinlock_tests * * @see arch_start_cpu() */ void test_spinlock_bounce(void) { int i; k_thread_create(&cpu1_thread, cpu1_stack, CPU1_STACK_SIZE, cpu1_fn, NULL, NULL, NULL, 0, 0, K_NO_WAIT); k_busy_wait(10); for (i = 0; i < 10000; i++) { bounce_once(1234); } bounce_done = 1; } void test_main(void) { ztest_test_suite(spinlock, ztest_unit_test(test_spinlock_basic), ztest_unit_test(test_spinlock_bounce)); ztest_run_test_suite(spinlock); } |