summaryrefslogtreecommitdiff
path: root/kernel/sched.c
blob: c6674c0131600514eba58e058e6e6c7add10de6d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
/*
 * Copyright (c) 2016 Wind River Systems, Inc.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <kernel.h>
#include <kernel_structs.h>
#include <atomic.h>
#include <ksched.h>
#include <wait_q.h>
#include <misc/util.h>

/* the only struct _kernel instance */
struct _kernel _kernel = {0};

/* set the bit corresponding to prio in ready q bitmap */
#ifdef CONFIG_MULTITHREADING
static void _set_ready_q_prio_bit(int prio)
{
	int bmap_index = _get_ready_q_prio_bmap_index(prio);
	uint32_t *bmap = &_ready_q.prio_bmap[bmap_index];

	*bmap |= _get_ready_q_prio_bit(prio);
}
#endif

/* clear the bit corresponding to prio in ready q bitmap */
#ifdef CONFIG_MULTITHREADING
static void _clear_ready_q_prio_bit(int prio)
{
	int bmap_index = _get_ready_q_prio_bmap_index(prio);
	uint32_t *bmap = &_ready_q.prio_bmap[bmap_index];

	*bmap &= ~_get_ready_q_prio_bit(prio);
}
#endif

#ifdef CONFIG_MULTITHREADING
/*
 * Find the next thread to run when there is no thread in the cache and update
 * the cache.
 */
static struct k_thread *_get_ready_q_head(void)
{
	int prio = _get_highest_ready_prio();
	int q_index = _get_ready_q_q_index(prio);
	sys_dlist_t *list = &_ready_q.q[q_index];

	__ASSERT(!sys_dlist_is_empty(list),
		 "no thread to run (prio: %d, queue index: %u)!\n",
		 prio, q_index);

	struct k_thread *thread =
		(struct k_thread *)sys_dlist_peek_head_not_empty(list);

	return thread;
}
#endif

/*
 * Add thread to the ready queue, in the slot for its priority; the thread
 * must not be on a wait queue.
 *
 * This function, along with _move_thread_to_end_of_prio_q(), are the _only_
 * places where a thread is put on the ready queue.
 *
 * Interrupts must be locked when calling this function.
 */

void _add_thread_to_ready_q(struct k_thread *thread)
{
#ifdef CONFIG_MULTITHREADING
	int q_index = _get_ready_q_q_index(thread->base.prio);
	sys_dlist_t *q = &_ready_q.q[q_index];

	_set_ready_q_prio_bit(thread->base.prio);
	sys_dlist_append(q, &thread->base.k_q_node);

	struct k_thread **cache = &_ready_q.cache;

	*cache = _is_t1_higher_prio_than_t2(thread, *cache) ? thread : *cache;
#else
	sys_dlist_append(&_ready_q.q[0], &thread->base.k_q_node);
	_ready_q.prio_bmap[0] = 1;
	_ready_q.cache = thread;
#endif
}

/*
 * This function, along with _move_thread_to_end_of_prio_q(), are the _only_
 * places where a thread is taken off the ready queue.
 *
 * Interrupts must be locked when calling this function.
 */

void _remove_thread_from_ready_q(struct k_thread *thread)
{
#ifdef CONFIG_MULTITHREADING
	int q_index = _get_ready_q_q_index(thread->base.prio);
	sys_dlist_t *q = &_ready_q.q[q_index];

	sys_dlist_remove(&thread->base.k_q_node);
	if (sys_dlist_is_empty(q)) {
		_clear_ready_q_prio_bit(thread->base.prio);
	}

	struct k_thread **cache = &_ready_q.cache;

	*cache = *cache == thread ? _get_ready_q_head() : *cache;
#else
	_ready_q.prio_bmap[0] = 0;
	_ready_q.cache = NULL;
	sys_dlist_remove(&thread->base.k_q_node);
#endif
}

/* reschedule threads if the scheduler is not locked */
/* not callable from ISR */
/* must be called with interrupts locked */
void _reschedule_threads(int key)
{
#ifdef CONFIG_PREEMPT_ENABLED
	K_DEBUG("rescheduling threads\n");

	if (_must_switch_threads()) {
		K_DEBUG("context-switching out %p\n", _current);
		_Swap(key);
	} else {
		irq_unlock(key);
	}
#else
	irq_unlock(key);
#endif
}

void k_sched_lock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
	__ASSERT(!_is_in_isr(), "");

	atomic_inc(&_current->base.sched_locked);

	K_DEBUG("scheduler locked (%p:%d)\n",
		_current, _current->base.sched_locked);
#endif
}

void k_sched_unlock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
	__ASSERT(_current->base.sched_locked > 0, "");
	__ASSERT(!_is_in_isr(), "");

	int key = irq_lock();

	atomic_dec(&_current->base.sched_locked);

	K_DEBUG("scheduler unlocked (%p:%d)\n",
		_current, _current->base.sched_locked);

	_reschedule_threads(key);
#endif
}

/*
 * Callback for sys_dlist_insert_at() to find the correct insert point in a
 * wait queue (priority-based).
 */
#ifdef CONFIG_MULTITHREADING
static int _is_wait_q_insert_point(sys_dnode_t *node, void *insert_prio)
{
	struct k_thread *waitq_node =
		CONTAINER_OF(
			CONTAINER_OF(node, struct _thread_base, k_q_node),
			struct k_thread,
			base);

	return _is_prio_higher((int)insert_prio, waitq_node->base.prio);
}
#endif

/* convert milliseconds to ticks */

#ifdef _NON_OPTIMIZED_TICKS_PER_SEC
int32_t _ms_to_ticks(int32_t ms)
{
	int64_t ms_ticks_per_sec = (int64_t)ms * sys_clock_ticks_per_sec;

	return (int32_t)ceiling_fraction(ms_ticks_per_sec, MSEC_PER_SEC);
}
#endif

/* pend the specified thread: it must *not* be in the ready queue */
/* must be called with interrupts locked */
void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, int32_t timeout)
{
#ifdef CONFIG_MULTITHREADING
	sys_dlist_t *dlist = (sys_dlist_t *)wait_q;

	sys_dlist_insert_at(dlist, &thread->base.k_q_node,
			    _is_wait_q_insert_point,
			    (void *)thread->base.prio);

	_mark_thread_as_pending(thread);

	if (timeout != K_FOREVER) {
		int32_t ticks = _TICK_ALIGN + _ms_to_ticks(timeout);

		_add_thread_timeout(thread, wait_q, ticks);
	}
#endif
}

/* pend the current thread */
/* must be called with interrupts locked */
void _pend_current_thread(_wait_q_t *wait_q, int32_t timeout)
{
	_remove_thread_from_ready_q(_current);
	_pend_thread(_current, wait_q, timeout);
}

/*
 * Check if there is a thread of higher prio than the current one. Should only
 * be called if we already know that the current thread is preemptible.
 */
int __must_switch_threads(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
	K_DEBUG("current prio: %d, highest prio: %d\n",
		_current->base.prio, _get_highest_ready_prio());

	extern void _dump_ready_q(void);
	_dump_ready_q();

	return _is_prio_higher(_get_highest_ready_prio(), _current->base.prio);
#else
	return 0;
#endif
}

int  k_thread_priority_get(k_tid_t thread)
{
	return thread->base.prio;
}

void k_thread_priority_set(k_tid_t tid, int prio)
{
	/*
	 * Use NULL, since we cannot know what the entry point is (we do not
	 * keep track of it) and idle cannot change its priority.
	 */
	_ASSERT_VALID_PRIO(prio, NULL);
	__ASSERT(!_is_in_isr(), "");

	struct k_thread *thread = (struct k_thread *)tid;
	int key = irq_lock();

	_thread_priority_set(thread, prio);
	_reschedule_threads(key);
}

/*
 * Interrupts must be locked when calling this function.
 *
 * This function, along with _add_thread_to_ready_q() and
 * _remove_thread_from_ready_q(), are the _only_ places where a thread is
 * taken off or put on the ready queue.
 */
void _move_thread_to_end_of_prio_q(struct k_thread *thread)
{
#ifdef CONFIG_MULTITHREADING
	int q_index = _get_ready_q_q_index(thread->base.prio);
	sys_dlist_t *q = &_ready_q.q[q_index];

	if (sys_dlist_is_tail(q, &thread->base.k_q_node)) {
		return;
	}

	sys_dlist_remove(&thread->base.k_q_node);
	sys_dlist_append(q, &thread->base.k_q_node);

	struct k_thread **cache = &_ready_q.cache;

	*cache = *cache == thread ? _get_ready_q_head() : *cache;
#endif
}

void k_yield(void)
{
	__ASSERT(!_is_in_isr(), "");

	int key = irq_lock();

	_move_thread_to_end_of_prio_q(_current);

	if (_current == _get_next_ready_thread()) {
		irq_unlock(key);
	} else {
		_Swap(key);
	}
}

void k_sleep(int32_t duration)
{
#ifdef CONFIG_MULTITHREADING
	/* volatile to guarantee that irq_lock() is executed after ticks is
	 * populated
	 */
	volatile int32_t ticks;
	unsigned int key;

	__ASSERT(!_is_in_isr(), "");
	__ASSERT(duration != K_FOREVER, "");

	K_DEBUG("thread %p for %d ns\n", _current, duration);

	/* wait of 0 ms is treated as a 'yield' */
	if (duration == 0) {
		k_yield();
		return;
	}

	ticks = _TICK_ALIGN + _ms_to_ticks(duration);
	key = irq_lock();

	_remove_thread_from_ready_q(_current);
	_add_thread_timeout(_current, NULL, ticks);

	_Swap(key);
#endif
}

void k_wakeup(k_tid_t thread)
{
	int key = irq_lock();

	/* verify first if thread is not waiting on an object */
	if (_is_thread_pending(thread)) {
		irq_unlock(key);
		return;
	}

	if (_abort_thread_timeout(thread) == _INACTIVE) {
		irq_unlock(key);
		return;
	}

	_ready_thread(thread);

	if (_is_in_isr()) {
		irq_unlock(key);
	} else {
		_reschedule_threads(key);
	}
}

k_tid_t k_current_get(void)
{
	return _current;
}

/* debug aid */
void _dump_ready_q(void)
{
	K_DEBUG("bitmaps: ");
	for (int bitmap = 0; bitmap < K_NUM_PRIO_BITMAPS; bitmap++) {
		K_DEBUG("%x", _ready_q.prio_bmap[bitmap]);
	}
	K_DEBUG("\n");
	for (int prio = 0; prio < K_NUM_PRIORITIES; prio++) {
		K_DEBUG("prio: %d, head: %p\n",
			prio - CONFIG_NUM_COOP_PRIORITIES,
			sys_dlist_peek_head(&_ready_q.q[prio]));
	}
}

#ifdef CONFIG_TIMESLICING
extern int32_t _time_slice_duration;    /* Measured in ms */
extern int32_t _time_slice_elapsed;     /* Measured in ms */
extern int _time_slice_prio_ceiling;

void k_sched_time_slice_set(int32_t duration_in_ms, int prio)
{
	__ASSERT(duration_in_ms >= 0, "");
	__ASSERT((prio >= 0) && (prio < CONFIG_NUM_PREEMPT_PRIORITIES), "");

	_time_slice_duration = duration_in_ms;
	_time_slice_elapsed = 0;
	_time_slice_prio_ceiling = prio;
}
#endif /* CONFIG_TIMESLICING */

int k_is_preempt_thread(void)
{
	return !_is_in_isr() && _is_preempt(_current);
}