Boot Linux faster!

Check our new training course

Boot Linux faster!

Check our new training course
and Creative Commons CC-BY-SA
lecture and lab materials

Bootlin logo

Elixir Cross Referencer

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
/*
 * Copyright (c) 2010-2016 Wind River Systems, Inc.
 *
 * SPDX-License-Identifier: Apache-2.0
 */

/**
 * @file
 *
 * @brief Kernel semaphore object.
 *
 * The semaphores are of the 'counting' type, i.e. each 'give' operation will
 * increment the internal count by 1, if no fiber is pending on it. The 'init'
 * call initializes the count to 0. Following multiple 'give' operations, the
 * same number of 'take' operations can be performed without the calling fiber
 * having to pend on the semaphore, or the calling task having to poll.
 */

#include <kernel.h>
#include <kernel_structs.h>
#include <debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <misc/dlist.h>
#include <ksched.h>
#include <init.h>

#ifdef CONFIG_SEMAPHORE_GROUPS
struct sem_desc {

	/* node in list of semaphores */
	sys_dnode_t semg_node;

	/* thread waiting for semaphores */
	struct k_thread *thread;

	/* semaphore on which to wait */
	struct k_sem *sem;
};

struct sem_thread {

	/* dummy thread, only the thread base */
	struct _thread_base dummy;

	/* descriptor containing real thread , sem, and group info */
	struct sem_desc desc;
};
#endif

extern struct k_sem _k_sem_list_start[];
extern struct k_sem _k_sem_list_end[];

struct k_sem *_trace_list_k_sem;

#ifdef CONFIG_OBJECT_TRACING

/*
 * Complete initialization of statically defined semaphores.
 */
static int init_sem_module(struct device *dev)
{
	ARG_UNUSED(dev);

	struct k_sem *sem;

	for (sem = _k_sem_list_start; sem < _k_sem_list_end; sem++) {
		SYS_TRACING_OBJ_INIT(k_sem, sem);
	}
	return 0;
}

SYS_INIT(init_sem_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);

#endif /* CONFIG_OBJECT_TRACING */

void k_sem_init(struct k_sem *sem, unsigned int initial_count,
		unsigned int limit)
{
	__ASSERT(limit != 0, "limit cannot be zero");

	sem->count = initial_count;
	sem->limit = limit;
	sys_dlist_init(&sem->wait_q);

	_INIT_OBJ_POLL_EVENT(sem);

	SYS_TRACING_OBJ_INIT(k_sem, sem);
}

#ifdef CONFIG_SEMAPHORE_GROUPS
int k_sem_group_take(struct k_sem *sem_array[], struct k_sem **sem,
		     int32_t timeout)
{
	unsigned int key;
	struct k_sem *item = *sem_array;
	int num = 0;

	__ASSERT(sem_array[0] != K_END, "Empty semaphore list");

	key = irq_lock();

	do {
		if (item->count > 0) {
			item->count--;       /* Available semaphore found */
			irq_unlock(key);
			*sem = item;
			return 0;
		}
		num++;
		item = sem_array[num];
	} while (item != K_END);

	if (timeout == K_NO_WAIT) {
		irq_unlock(key);
		*sem = NULL;
		return -EBUSY;
	}

	struct sem_thread wait_objects[num];
	int32_t priority = k_thread_priority_get(_current);
	sys_dlist_t list;

	sys_dlist_init(&list);
	_current->base.swap_data = &list;

	for (int i = 0; i < num; i++) {

		_init_thread_base(&wait_objects[i].dummy, priority,
				  _THREAD_DUMMY, 0);

		sys_dlist_append(&list, &wait_objects[i].desc.semg_node);
		wait_objects[i].desc.thread = _current;
		wait_objects[i].desc.sem = sem_array[i];

		_pend_thread((struct k_thread *)&wait_objects[i].dummy,
			     &sem_array[i]->wait_q, timeout);
	}

	/*
	 * Pend the current thread on a dummy wait queue, adding it _after_ all
	 * the dummy threads on the _timeout_q, but expiring on the same tick,
	 * which will cause it to be _prepended_ to the dummy threads. See
	 * description of _add_timeout() for details.
	 */

	_wait_q_t wait_q;

	sys_dlist_init(&wait_q);
	_pend_current_thread(&wait_q, timeout);

	if (_Swap(key) != 0) {
		*sem = NULL;
		return -EAGAIN;
	}

	/* The accepted semaphore is the only one left on the list */

	struct sem_desc *desc = (struct sem_desc *)sys_dlist_get(&list);

	*sem = desc->sem;
	return 0;
}

/* cancel all but specified semaphore in list if part of a semphore group */
static void handle_sem_group(struct k_sem *sem, struct sem_thread *sem_thread)
{
	struct sem_desc *desc = NULL;
	sys_dlist_t *list;
	sys_dnode_t *node;
	sys_dnode_t *next;

	list = (sys_dlist_t *)sem_thread->desc.thread->base.swap_data;
	node = sys_dlist_peek_head(list);

	__ASSERT(node != NULL, "");

	do {
		next = sys_dlist_peek_next(list, node);
		desc = (struct sem_desc *)node;

		sem_thread = CONTAINER_OF(desc, struct sem_thread, desc);
		struct k_thread *dummy = (struct k_thread *)&sem_thread->dummy;

		/*
		 * This is tricky: due to the fact that the timeouts expiring
		 * at the same time are queued in reverse order, we know that,
		 * since the caller of this function has already verified that
		 * the timeout of the real thread has not expired and since it
		 * was queued after the dummy threads, causing it to be the
		 * first to be unpended, that the timeouts of the dummy threads
		 * have not expired. Thus, we do not have to handle the case
		 * where the timeout of the dummy thread might have expired.
		 */
		_abort_thread_timeout(dummy);
		_unpend_thread(dummy);

		if (desc->sem != sem) {
			sys_dlist_remove(node);
		}

		node = next;
	} while (node != NULL);

	/* if node was not NULL, desc is not NULL: no need to check */

	/*
	 * As this code may be executed several times by a semaphore group give
	 * operation, it is important to ensure that the attempt to ready the
	 * master thread is done only once.
	 */

	if (!_is_thread_ready(desc->thread)) {
		_abort_thread_timeout(desc->thread);
		_mark_thread_as_not_pending(desc->thread);
		if (_is_thread_ready(desc->thread)) {
			_add_thread_to_ready_q(desc->thread);
		}
	}
	_set_thread_return_value(desc->thread, 0);
}

#else
#define handle_sem_group(sem, thread) 0
#endif

/* returns 1 if a reschedule must take place, 0 otherwise */
static inline int handle_poll_event(struct k_sem *sem)
{
#ifdef CONFIG_POLL
	uint32_t state = K_POLL_STATE_SEM_AVAILABLE;

	return sem->poll_event ?
	       _handle_obj_poll_event(&sem->poll_event, state) : 0;
#else
	return 0;
#endif
}

static inline void increment_count_up_to_limit(struct k_sem *sem)
{
	sem->count += (sem->count != sem->limit);
}

/* returns 1 if _Swap() will need to be invoked, 0 otherwise */
static int do_sem_give(struct k_sem *sem)
{
#ifdef CONFIG_SEMAPHORE_GROUPS
	struct k_thread *thread = NULL;

again:
	thread = _find_first_thread_to_unpend(&sem->wait_q, thread);
	if (!thread) {
		increment_count_up_to_limit(sem);
		return handle_poll_event(sem);
	}

	if (unlikely(_is_thread_dummy(thread))) {
		/*
		 * The awakened thread is a dummy struct sem_thread and thus
		 * was involved in a semaphore group operation.
		 */
		struct sem_thread *sem_thread = (struct sem_thread *)thread;
		struct k_thread *real_thread = sem_thread->desc.thread;

		/*
		 * This is an extremely tricky way of handling the fact that
		 * the current sem_give might have happened from an ISR while
		 * the timeout handling code is running, going through the list
		 * of expired timeouts.
		 *
		 * We have to be able to handle all timeouts on a
		 * k_sem_group_take operation as one. We do that by checking if
		 * the timeout of the real thread has expired or not. We can do
		 * this, because of the way the timeouts are queued in the
		 * kernel's timeout_q: timeouts expiring on the same tick are
		 * queued in the _reverse_ order that they arrive. It is done
		 * this way to save time with interrupts locked. By knowing
		 * this, and by adding the real thread _last_ to the timeout_q,
		 * we know that it is queued _before_ all the dummy threads
		 * from the k_sem_group_take operation. This allows us to check
		 * that, if the real thread's timeout has not expired, then all
		 * dummy threads' timeouts have not expired either. If the real
		 * thread's timeout has expired, then the dummy threads'
		 * timeouts will expire or have expired already during the
		 * current handling of timeouts, and the timeout code will take
		 * care of signalling the waiter that its operation has
		 * timedout. In that case, we look for the next thread not part
		 * of the same k_sem_group_take operation to give it the
		 * semaphore.
		 */
		if (_is_thread_timeout_expired(real_thread)) {
			goto again;
		}

		/*
		 * Do not dequeue the dummy thread: that will be done when
		 * looping through the list of dummy waiters in
		 * handle_sem_group().
		 */
		handle_sem_group(sem, sem_thread);
	} else {
		_unpend_thread(thread);
		(void)_abort_thread_timeout(thread);
		_ready_thread(thread);
		_set_thread_return_value(thread, 0);
	}
#else
	struct k_thread *thread = _unpend_first_thread(&sem->wait_q);

	if (!thread) {
		increment_count_up_to_limit(sem);
		return handle_poll_event(sem);
	}
	(void)_abort_thread_timeout(thread);
	_ready_thread(thread);
	_set_thread_return_value(thread, 0);
#endif

	return !_is_in_isr() && _must_switch_threads();
}

/*
 * This function is meant to be called only by
 * _sys_event_logger_put_non_preemptible(), which itself is really meant to be
 * called only by _sys_k_event_logger_context_switch(), used within a context
 * switch to log the event.
 *
 * WARNING:
 * It must be called with interrupts already locked.
 * It cannot be called for a sempahore part of a group.
 */
void _sem_give_non_preemptible(struct k_sem *sem)
{
	struct k_thread *thread;

	thread = _unpend_first_thread(&sem->wait_q);
	if (!thread) {
		increment_count_up_to_limit(sem);
		return;
	}

	_abort_thread_timeout(thread);

	_ready_thread(thread);
	_set_thread_return_value(thread, 0);
}

#ifdef CONFIG_SEMAPHORE_GROUPS
void k_sem_group_give(struct k_sem *sem_array[])
{
	int swap_needed = 0;
	unsigned int key;

	__ASSERT(sem_array[0] != K_END, "Empty semaphore list");

	key = irq_lock();

	for (int i = 0; sem_array[i] != K_END; i++) {
		swap_needed |= do_sem_give(sem_array[i]);
	}

	if (swap_needed) {
		_Swap(key);
	} else {
		irq_unlock(key);
	}
}

void k_sem_group_reset(struct k_sem *sem_array[])
{
	unsigned int key;

	key = irq_lock();
	for (int i = 0; sem_array[i] != K_END; i++) {
		sem_array[i]->count = 0;
	}
	irq_unlock(key);
}
#endif

void k_sem_give(struct k_sem *sem)
{
	unsigned int key;

	key = irq_lock();

	if (do_sem_give(sem)) {
		_Swap(key);
	} else {
		irq_unlock(key);
	}
}

int k_sem_take(struct k_sem *sem, int32_t timeout)
{
	__ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, "");

	unsigned int key = irq_lock();

	if (likely(sem->count > 0)) {
		sem->count--;
		irq_unlock(key);
		return 0;
	}

	if (timeout == K_NO_WAIT) {
		irq_unlock(key);
		return -EBUSY;
	}

	_pend_current_thread(&sem->wait_q, timeout);

	return _Swap(key);
}