Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 | /*
* Copyright (c) 2017 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ztest.h>
#include "test_sched.h"
/* nrf 51 has lower ram, so creating less number of threads */
#if CONFIG_SRAM_SIZE <= 24
#define NUM_THREAD 2
#elif (CONFIG_SRAM_SIZE <= 32) \
|| defined(CONFIG_SOC_EMSK_EM7D)
#define NUM_THREAD 3
#else
#define NUM_THREAD 10
#endif
#define BASE_PRIORITY 0
#define ITRERATION_COUNT 5
BUILD_ASSERT(NUM_THREAD <= MAX_NUM_THREAD);
/* slice size in millisecond*/
#define SLICE_SIZE 200
/* busy for more than one slice*/
#define BUSY_MS (SLICE_SIZE + 20)
static struct k_thread t[NUM_THREAD];
static K_SEM_DEFINE(sema1, 0, NUM_THREAD);
/*elapsed_slice taken by last thread*/
static s64_t elapsed_slice;
static int thread_idx;
static void thread_tslice(void *p1, void *p2, void *p3)
{
/*Print New line for last thread*/
int thread_parameter = ((int)p1 == (NUM_THREAD - 1)) ? '\n' :
((int)p1 + 'A');
s64_t expected_slice_min = __ticks_to_ms(z_ms_to_ticks(SLICE_SIZE));
s64_t expected_slice_max = __ticks_to_ms(z_ms_to_ticks(SLICE_SIZE) + 1);
while (1) {
s64_t tdelta = k_uptime_delta(&elapsed_slice);
TC_PRINT("%c", thread_parameter);
/* Test Fails if thread exceed allocated time slice or
* Any thread is scheduled out of order.
*/
zassert_true(((tdelta >= expected_slice_min) &&
(tdelta <= expected_slice_max) &&
((int)p1 == thread_idx)), NULL);
thread_idx = (thread_idx + 1) % (NUM_THREAD);
/* Keep the current thread busy for more than one slice,
* even though, when timeslice used up the next thread
* should be scheduled in.
*/
spin_for_ms(BUSY_MS);
k_sem_give(&sema1);
}
}
/*test cases*/
/**
* @brief Check the behavior of preemptive threads when the
* time slice is disabled and enabled
*
* @details Create multiple preemptive threads with same priorities
* priorities and few with same priorities and enable the time slice.
* Ensure that each thread is given the time slice period to execute.
*
* @ingroup kernel_sched_tests
*/
void test_slice_scheduling(void)
{
k_tid_t tid[NUM_THREAD];
int old_prio = k_thread_priority_get(k_current_get());
int count = 0;
/*disable timeslice*/
k_sched_time_slice_set(0, K_PRIO_PREEMPT(0));
/* update priority for current thread*/
k_thread_priority_set(k_current_get(), K_PRIO_PREEMPT(BASE_PRIORITY));
/* create threads with equal preemptive priority*/
for (int i = 0; i < NUM_THREAD; i++) {
tid[i] = k_thread_create(&t[i], tstacks[i], STACK_SIZE,
thread_tslice, (void *)(intptr_t) i, NULL, NULL,
K_PRIO_PREEMPT(BASE_PRIORITY), 0, 0);
}
/* enable time slice*/
k_sched_time_slice_set(SLICE_SIZE, K_PRIO_PREEMPT(BASE_PRIORITY));
while (count < ITRERATION_COUNT) {
k_uptime_delta(&elapsed_slice);
/* Keep the current thread busy for more than one slice,
* even though, when timeslice used up the next thread
* should be scheduled in.
*/
spin_for_ms(BUSY_MS);
/* relinquish CPU and wait for each thread to complete*/
for (int i = 0; i < NUM_THREAD; i++) {
k_sem_take(&sema1, K_FOREVER);
}
count++;
}
/* test case teardown*/
for (int i = 0; i < NUM_THREAD; i++) {
k_thread_abort(tid[i]);
}
/* disable time slice*/
k_sched_time_slice_set(0, K_PRIO_PREEMPT(0));
k_thread_priority_set(k_current_get(), old_prio);
}
|