Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 | /* * Copyright (c) 2016 Intel Corporation * * SPDX-License-Identifier: Apache-2.0 */ /** * @brief Thread Tests * @defgroup kernel_thread_tests Threads * @ingroup all_tests * @{ * @} */ #include <zephyr/ztest.h> #include <zephyr/kernel_structs.h> #include <zephyr/kernel.h> #include <kernel_internal.h> #include <string.h> #include <ksched.h> struct k_thread tdata; #define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE) K_THREAD_STACK_DEFINE(tstack, STACK_SIZE); size_t tstack_size = K_THREAD_STACK_SIZEOF(tstack); /*local variables*/ static K_THREAD_STACK_DEFINE(tstack_custom, STACK_SIZE); static K_THREAD_STACK_DEFINE(tstack_name, STACK_SIZE); static struct k_thread tdata_custom; static struct k_thread tdata_name; static int main_prio; static ZTEST_DMEM int tp = 10; /** * @ingroup kernel_thread_tests * @brief Verify main thread */ ZTEST(threads_lifecycle, test_systhreads_main) { zassert_true(main_prio == CONFIG_MAIN_THREAD_PRIORITY); } /** * @ingroup kernel_thread_tests * @brief Verify idle thread */ ZTEST(threads_lifecycle, test_systhreads_idle) { k_msleep(100); /** TESTPOINT: check working thread priority should */ zassert_true(k_thread_priority_get(k_current_get()) < K_IDLE_PRIO, NULL); } static void customdata_entry(void *p1, void *p2, void *p3) { long data = 1U; zassert_is_null(k_thread_custom_data_get(), NULL); while (1) { k_thread_custom_data_set((void *)data); /* relinquish cpu for a while */ k_msleep(50); /** TESTPOINT: custom data comparison */ zassert_equal(data, (long)k_thread_custom_data_get()); data++; } } /** * @ingroup kernel_thread_tests * @brief test thread custom data get/set from coop thread * * @see k_thread_custom_data_get(), k_thread_custom_data_set() */ ZTEST(threads_lifecycle_1cpu, test_customdata_get_set_coop) { k_tid_t tid = k_thread_create(&tdata_custom, tstack_custom, STACK_SIZE, customdata_entry, NULL, NULL, NULL, K_PRIO_COOP(1), 0, K_NO_WAIT); k_msleep(500); /* cleanup environment */ k_thread_abort(tid); } static void thread_name_entry(void *p1, void *p2, void *p3) { /* Do nothing and exit */ } /** * @ingroup kernel_thread_tests * @brief test thread name get/set from supervisor thread * @see k_thread_name_get(), k_thread_name_copy(), k_thread_name_set() */ ZTEST(threads_lifecycle, test_thread_name_get_set) { int ret; const char *thread_name; char thread_buf[CONFIG_THREAD_MAX_NAME_LEN]; /* Set and get current thread's name */ ret = k_thread_name_set(NULL, "parent_thread"); zassert_equal(ret, 0, "k_thread_name_set() failed"); thread_name = k_thread_name_get(k_current_get()); zassert_true(thread_name != NULL, "thread name was null"); ret = strcmp(thread_name, "parent_thread"); zassert_equal(ret, 0, "parent thread name does not match"); /* Set and get child thread's name */ k_tid_t tid = k_thread_create(&tdata_name, tstack_name, STACK_SIZE, thread_name_entry, NULL, NULL, NULL, K_PRIO_PREEMPT(1), 0, K_NO_WAIT); ret = k_thread_name_set(tid, "customdata"); zassert_equal(ret, 0, "k_thread_name_set() failed"); ret = k_thread_name_copy(tid, thread_buf, sizeof(thread_buf)); zassert_equal(ret, 0, "couldn't get copied thread name"); ret = strcmp(thread_buf, "customdata"); zassert_equal(ret, 0, "child thread name does not match"); /* cleanup environment */ k_thread_abort(tid); } #ifdef CONFIG_USERSPACE static char unreadable_string[64]; static char not_my_buffer[CONFIG_THREAD_MAX_NAME_LEN]; struct k_sem sem; #endif /* CONFIG_USERSPACE */ /** * @ingroup kernel_thread_tests * @brief test thread name get/set from user thread * @see k_thread_name_copy(), k_thread_name_set() */ ZTEST_USER(threads_lifecycle, test_thread_name_user_get_set) { #ifdef CONFIG_USERSPACE int ret; char thread_name[CONFIG_THREAD_MAX_NAME_LEN]; char too_small[2]; /* Some memory-related error cases for k_thread_name_set() */ #if !defined(CONFIG_TRUSTED_EXECUTION_NONSECURE) /* Non-Secure images cannot normally access memory outside the image * flash and ram. */ ret = k_thread_name_set(NULL, (const char *)0xFFFFFFF0); zassert_equal(ret, -EFAULT, "accepted nonsense string (%d)", ret); #endif ret = k_thread_name_set(NULL, unreadable_string); zassert_equal(ret, -EFAULT, "accepted unreadable string"); ret = k_thread_name_set((struct k_thread *)&sem, "some name"); zassert_equal(ret, -EINVAL, "accepted non-thread object"); ret = k_thread_name_set(&z_main_thread, "some name"); zassert_equal(ret, -EINVAL, "no permission on thread object"); /* Set and get current thread's name */ ret = k_thread_name_set(NULL, "parent_thread"); zassert_equal(ret, 0, "k_thread_name_set() failed"); ret = k_thread_name_copy(k_current_get(), thread_name, sizeof(thread_name)); zassert_equal(ret, 0, "k_thread_name_copy() failed"); ret = strcmp(thread_name, "parent_thread"); zassert_equal(ret, 0, "parent thread name does not match"); /* memory-related cases for k_thread_name_get() */ ret = k_thread_name_copy(k_current_get(), too_small, sizeof(too_small)); zassert_equal(ret, -ENOSPC, "wrote to too-small buffer"); ret = k_thread_name_copy(k_current_get(), not_my_buffer, sizeof(not_my_buffer)); zassert_equal(ret, -EFAULT, "wrote to buffer without permission"); ret = k_thread_name_copy((struct k_thread *)&sem, thread_name, sizeof(thread_name)); zassert_equal(ret, -EINVAL, "not a thread object"); ret = k_thread_name_copy(&z_main_thread, thread_name, sizeof(thread_name)); zassert_equal(ret, 0, "couldn't get main thread name"); printk("Main thread name is '%s'\n", thread_name); /* Set and get child thread's name */ k_tid_t tid = k_thread_create(&tdata_name, tstack_name, STACK_SIZE, thread_name_entry, NULL, NULL, NULL, K_PRIO_PREEMPT(1), K_USER, K_NO_WAIT); ret = k_thread_name_set(tid, "customdata"); zassert_equal(ret, 0, "k_thread_name_set() failed"); ret = k_thread_name_copy(tid, thread_name, sizeof(thread_name)); zassert_equal(ret, 0, "couldn't get copied thread name"); ret = strcmp(thread_name, "customdata"); zassert_equal(ret, 0, "child thread name does not match"); /* cleanup environment */ k_thread_abort(tid); #else ztest_test_skip(); #endif /* CONFIG_USERSPACE */ } /** * @ingroup kernel_thread_tests * @brief test thread custom data get/set from preempt thread * @see k_thread_custom_data_get(), k_thread_custom_data_set() */ ZTEST_USER(threads_lifecycle_1cpu, test_customdata_get_set_preempt) { /** TESTPOINT: custom data of preempt thread */ k_tid_t tid = k_thread_create(&tdata_custom, tstack_custom, STACK_SIZE, customdata_entry, NULL, NULL, NULL, K_PRIO_PREEMPT(0), K_USER, K_NO_WAIT); k_msleep(500); /* cleanup environment */ k_thread_abort(tid); } static void umode_entry(void *thread_id, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); if (!z_is_thread_essential() && (k_current_get() == (k_tid_t)thread_id)) { ztest_test_pass(); } else { zassert_unreachable("User thread is essential or thread" " structure is corrupted\n"); } } /** * @ingroup kernel_thread_tests * @brief Test k_thread_user_mode_enter() to cover when userspace * is not supported/enabled * @see k_thread_user_mode_enter() */ static void enter_user_mode_entry(void *p1, void *p2, void *p3) { z_thread_essential_set(); zassert_true(z_is_thread_essential(), "Thread isn't set" " as essential\n"); k_thread_user_mode_enter((k_thread_entry_t)umode_entry, k_current_get(), NULL, NULL); } ZTEST_USER(threads_lifecycle, test_user_mode) { k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE, enter_user_mode_entry, NULL, NULL, NULL, main_prio, K_INHERIT_PERMS, K_NO_WAIT); k_msleep(100); k_thread_abort(tid); } struct k_thread join_thread; K_THREAD_STACK_DEFINE(join_stack, STACK_SIZE); struct k_thread control_thread; K_THREAD_STACK_DEFINE(control_stack, STACK_SIZE); enum control_method { TIMEOUT, NO_WAIT, SELF_ABORT, OTHER_ABORT, OTHER_ABORT_TIMEOUT, ALREADY_EXIT, ISR_ALREADY_EXIT, ISR_RUNNING }; void join_entry(void *p1, void *p2, void *p3) { enum control_method m = (enum control_method)(intptr_t)p1; switch (m) { case TIMEOUT: case NO_WAIT: case OTHER_ABORT: case OTHER_ABORT_TIMEOUT: case ISR_RUNNING: printk("join_thread: sleeping forever\n"); k_sleep(K_FOREVER); break; case SELF_ABORT: case ALREADY_EXIT: case ISR_ALREADY_EXIT: printk("join_thread: self-exiting\n"); return; } } void control_entry(void *p1, void *p2, void *p3) { printk("control_thread: killing join thread\n"); k_thread_abort(&join_thread); } void do_join_from_isr(const void *arg) { int *ret = (int *)arg; zassert_true(k_is_in_isr()); printk("isr: joining join_thread\n"); *ret = k_thread_join(&join_thread, K_NO_WAIT); printk("isr: k_thread_join() returned with %d\n", *ret); } #define JOIN_TIMEOUT_MS 100 int join_scenario_interval(enum control_method m, int64_t *interval) { k_timeout_t timeout = K_FOREVER; int ret; printk("ztest_thread: method %d, create join_thread\n", m); k_thread_create(&join_thread, join_stack, STACK_SIZE, join_entry, (void *)m, NULL, NULL, K_PRIO_PREEMPT(1), K_USER | K_INHERIT_PERMS, K_NO_WAIT); switch (m) { case ALREADY_EXIT: case ISR_ALREADY_EXIT: /* Let join_thread run first */ k_msleep(50); break; case OTHER_ABORT_TIMEOUT: timeout = K_MSEC(JOIN_TIMEOUT_MS); __fallthrough; case OTHER_ABORT: printk("ztest_thread: create control_thread\n"); k_thread_create(&control_thread, control_stack, STACK_SIZE, control_entry, NULL, NULL, NULL, K_PRIO_PREEMPT(2), K_USER | K_INHERIT_PERMS, K_NO_WAIT); break; case TIMEOUT: timeout = K_MSEC(50); break; case NO_WAIT: timeout = K_NO_WAIT; break; default: break; } if (m == ISR_ALREADY_EXIT || m == ISR_RUNNING) { irq_offload(do_join_from_isr, (const void *)&ret); } else { printk("ztest_thread: joining join_thread\n"); if (interval != NULL) { *interval = k_uptime_get(); } ret = k_thread_join(&join_thread, timeout); if (interval != NULL) { *interval = k_uptime_get() - *interval; } printk("ztest_thread: k_thread_join() returned with %d\n", ret); } if (ret != 0) { k_thread_abort(&join_thread); } if (m == OTHER_ABORT || m == OTHER_ABORT_TIMEOUT) { k_thread_join(&control_thread, K_FOREVER); } return ret; } static inline int join_scenario(enum control_method m) { return join_scenario_interval(m, NULL); } ZTEST_USER(threads_lifecycle, test_thread_join) { int64_t interval; #ifdef CONFIG_USERSPACE /* scenario: thread never started */ zassert_equal(k_thread_join(&join_thread, K_FOREVER), 0, "failed case thread never started"); #endif zassert_equal(join_scenario(TIMEOUT), -EAGAIN, "failed timeout case"); zassert_equal(join_scenario(NO_WAIT), -EBUSY, "failed no-wait case"); zassert_equal(join_scenario(SELF_ABORT), 0, "failed self-abort case"); zassert_equal(join_scenario(OTHER_ABORT), 0, "failed other-abort case"); zassert_equal(join_scenario_interval(OTHER_ABORT_TIMEOUT, &interval), 0, "failed other-abort case with timeout"); zassert_true(interval < JOIN_TIMEOUT_MS, "join took too long (%lld ms)", interval); zassert_equal(join_scenario(ALREADY_EXIT), 0, "failed already exit case"); } ZTEST(threads_lifecycle, test_thread_join_isr) { zassert_equal(join_scenario(ISR_RUNNING), -EBUSY, "failed isr running"); zassert_equal(join_scenario(ISR_ALREADY_EXIT), 0, "failed isr exited"); } struct k_thread deadlock1_thread; K_THREAD_STACK_DEFINE(deadlock1_stack, STACK_SIZE); struct k_thread deadlock2_thread; K_THREAD_STACK_DEFINE(deadlock2_stack, STACK_SIZE); void deadlock1_entry(void *p1, void *p2, void *p3) { int ret; k_msleep(500); ret = k_thread_join(&deadlock2_thread, K_FOREVER); zassert_equal(ret, -EDEADLK, "failed mutual join case"); } void deadlock2_entry(void *p1, void *p2, void *p3) { int ret; /* deadlock1_thread is active but currently sleeping */ ret = k_thread_join(&deadlock1_thread, K_FOREVER); zassert_equal(ret, 0, "couldn't join deadlock2_thread"); } ZTEST_USER(threads_lifecycle, test_thread_join_deadlock) { /* Deadlock scenarios */ zassert_equal(k_thread_join(k_current_get(), K_FOREVER), -EDEADLK, "failed self-deadlock case"); k_thread_create(&deadlock1_thread, deadlock1_stack, STACK_SIZE, deadlock1_entry, NULL, NULL, NULL, K_PRIO_PREEMPT(1), K_USER | K_INHERIT_PERMS, K_NO_WAIT); k_thread_create(&deadlock2_thread, deadlock2_stack, STACK_SIZE, deadlock2_entry, NULL, NULL, NULL, K_PRIO_PREEMPT(1), K_USER | K_INHERIT_PERMS, K_NO_WAIT); zassert_equal(k_thread_join(&deadlock1_thread, K_FOREVER), 0, "couldn't join deadlock1_thread"); zassert_equal(k_thread_join(&deadlock2_thread, K_FOREVER), 0, "couldn't join deadlock2_thread"); } #define WAIT_TO_START_MS 100 /* * entry for a delayed thread, do nothing. After the thread is created, * just check how many ticks expires and how many ticks remain before * the thread start */ static void user_start_thread(void *p1, void *p2, void *p3) { /* do nothing */ } ZTEST_USER(threads_lifecycle, test_thread_timeout_remaining_expires) { k_ticks_t r, e, r1, ticks, expected_expires_ticks; ticks = k_ms_to_ticks_ceil32(WAIT_TO_START_MS); expected_expires_ticks = k_uptime_ticks() + ticks; k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE, user_start_thread, k_current_get(), NULL, NULL, 0, K_USER, K_MSEC(WAIT_TO_START_MS)); k_msleep(10); e = k_thread_timeout_expires_ticks(tid); TC_PRINT("thread_expires_ticks: %d, expect: %d\n", (int)e, (int)expected_expires_ticks); zassert_true(e >= expected_expires_ticks); k_msleep(10); r = k_thread_timeout_remaining_ticks(tid); zassert_true(r < ticks); r1 = r; k_msleep(10); r = k_thread_timeout_remaining_ticks(tid); zassert_true(r < r1); k_thread_abort(tid); } static void foreach_callback(const struct k_thread *thread, void *user_data) { k_thread_runtime_stats_t stats; int ret; if (z_is_idle_thread_object((k_tid_t)thread)) { return; } /* Check NULL parameters */ ret = k_thread_runtime_stats_get(NULL, &stats); zassert_true(ret == -EINVAL); ret = k_thread_runtime_stats_get((k_tid_t)thread, NULL); zassert_true(ret == -EINVAL); k_thread_runtime_stats_get((k_tid_t)thread, &stats); ((k_thread_runtime_stats_t *)user_data)->execution_cycles += stats.execution_cycles; } /* This case accumulates every thread's execution_cycles first, then * get the total execution_cycles from a global * k_thread_runtime_stats_t to see that all time is reflected in the * total. */ ZTEST(threads_lifecycle, test_thread_runtime_stats_get) { k_thread_runtime_stats_t stats, stats_all; int ret; stats.execution_cycles = 0; k_thread_foreach(foreach_callback, &stats); /* Check NULL parameters */ ret = k_thread_runtime_stats_all_get(NULL); zassert_true(ret == -EINVAL); k_thread_runtime_stats_all_get(&stats_all); zassert_true(stats.execution_cycles <= stats_all.execution_cycles); } ZTEST(threads_lifecycle, test_k_busy_wait) { uint64_t cycles, dt; k_thread_runtime_stats_t test_stats; k_thread_runtime_stats_get(k_current_get(), &test_stats); cycles = test_stats.execution_cycles; k_busy_wait(0); k_thread_runtime_stats_get(k_current_get(), &test_stats); /* execution_cycles doesn't increase significantly after 0 * usec (10ms slop experimentally determined, * non-deterministic software emulators are VERY slow wrt * their cycle rate) */ dt = test_stats.execution_cycles - cycles; zassert_true(dt < k_ms_to_cyc_ceil64(10)); cycles = test_stats.execution_cycles; k_busy_wait(100); k_thread_runtime_stats_get(k_current_get(), &test_stats); /* execution_cycles increases correctly */ dt = test_stats.execution_cycles - cycles; zassert_true(dt >= k_us_to_cyc_floor64(100)); } static void tp_entry(void *p1, void *p2, void *p3) { tp = 100; } ZTEST_USER(threads_lifecycle_1cpu, test_k_busy_wait_user) { k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE, tp_entry, NULL, NULL, NULL, 0, K_USER, K_NO_WAIT); k_busy_wait(1000); /* this is a 1cpu test case, the new thread has no chance to be * scheduled and value of tp not changed */ zassert_false(tp == 100); /* give up cpu, the new thread will change value of tp to 100 */ k_msleep(100); zassert_true(tp == 100); k_thread_abort(tid); } #define INT_ARRAY_SIZE 128 int large_stack(size_t *space) { /* use "volatile" to protect this variable from being optimized out */ volatile int a[INT_ARRAY_SIZE]; /* to avoid unused variable error */ a[0] = 1; return k_thread_stack_space_get(k_current_get(), space); } int small_stack(size_t *space) { return k_thread_stack_space_get(k_current_get(), space); } /* test k_thread_stack_sapce_get(), unused stack space in large_stack_space() * is smaller than that in small_stack() because the former function has a * large local variable */ ZTEST_USER(threads_lifecycle, test_k_thread_stack_space_get_user) { size_t a, b; small_stack(&a); large_stack(&b); /* FIXME: Ideally, the follow condition will assert true: * (a - b) == INT_ARRAY_SIZE * sizeof(int) * but it is not the case in native_posix, qemu_leon3 and * qemu_cortex_a53. Relax check condition here */ zassert_true(b <= a); } void *thread_test_setup(void) { k_thread_access_grant(k_current_get(), &tdata, tstack, &tdata_custom, tstack_custom, &tdata_name, tstack_name, &join_thread, join_stack, &control_thread, control_stack, &deadlock1_thread, deadlock1_stack, &deadlock2_thread, deadlock2_stack); main_prio = k_thread_priority_get(k_current_get()); #ifdef CONFIG_USERSPACE strncpy(unreadable_string, "unreadable string", sizeof(unreadable_string)); #endif return NULL; } ZTEST_SUITE(threads_lifecycle, NULL, thread_test_setup, NULL, NULL, NULL); ZTEST_SUITE(threads_lifecycle_1cpu, NULL, thread_test_setup, ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL); |