Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 | /* * Copyright (c) 2016 Intel Corporation * * SPDX-License-Identifier: Apache-2.0 */ /** * @addtogroup t_stack_api * @{ * @defgroup t_stack_api_basic test_stack_api_basic * @brief TestPurpose: verify zephyr stack apis under different context * - API coverage * -# k_stack_init K_STACK_DEFINE * -# k_stack_push * -# k_stack_pop * @} */ #include <ztest.h> #include <irq_offload.h> #define STACK_SIZE 512 #define STACK_LEN 2 /**TESTPOINT: init via K_STACK_DEFINE*/ K_STACK_DEFINE(kstack, STACK_LEN); __kernel struct k_stack stack; K_THREAD_STACK_DEFINE(threadstack, STACK_SIZE); __kernel struct k_thread thread_data; static u32_t data[STACK_LEN] = { 0xABCD, 0x1234 }; __kernel struct k_sem end_sema; static void tstack_push(struct k_stack *pstack) { for (int i = 0; i < STACK_LEN; i++) { /**TESTPOINT: stack push*/ k_stack_push(pstack, data[i]); } } static void tstack_pop(struct k_stack *pstack) { u32_t rx_data; for (int i = STACK_LEN - 1; i >= 0; i--) { /**TESTPOINT: stack pop*/ zassert_false(k_stack_pop(pstack, &rx_data, K_NO_WAIT), NULL); zassert_equal(rx_data, data[i], NULL); } } /*entry of contexts*/ static void tIsr_entry_push(void *p) { tstack_push((struct k_stack *)p); } static void tIsr_entry_pop(void *p) { tstack_pop((struct k_stack *)p); } static void tThread_entry(void *p1, void *p2, void *p3) { tstack_pop((struct k_stack *)p1); k_sem_give(&end_sema); tstack_push((struct k_stack *)p1); k_sem_give(&end_sema); } static void tstack_thread_thread(struct k_stack *pstack) { k_sem_init(&end_sema, 0, 1); /**TESTPOINT: thread-thread data passing via stack*/ k_tid_t tid = k_thread_create(&thread_data, threadstack, STACK_SIZE, tThread_entry, pstack, NULL, NULL, K_PRIO_PREEMPT(0), K_USER | K_INHERIT_PERMS, 0); tstack_push(pstack); k_sem_take(&end_sema, K_FOREVER); k_sem_take(&end_sema, K_FOREVER); tstack_pop(pstack); /* clear the spawn thread to avoid side effect */ k_thread_abort(tid); } static void tstack_thread_isr(struct k_stack *pstack) { k_sem_init(&end_sema, 0, 1); /**TESTPOINT: thread-isr data passing via stack*/ irq_offload(tIsr_entry_push, pstack); tstack_pop(pstack); tstack_push(pstack); irq_offload(tIsr_entry_pop, pstack); } /*test cases*/ void test_stack_thread2thread(void) { /**TESTPOINT: test k_stack_init stack*/ k_stack_init(&stack, data, STACK_LEN); tstack_thread_thread(&stack); /**TESTPOINT: test K_STACK_INIT stack*/ tstack_thread_thread(&kstack); } void test_stack_thread2isr(void) { /**TESTPOINT: test k_stack_init stack*/ k_stack_init(&stack, data, STACK_LEN); tstack_thread_isr(&stack); /**TESTPOINT: test K_STACK_INIT stack*/ tstack_thread_isr(&kstack); } |