summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Mitsis <peter.mitsis@windriver.com>2016-10-11 12:06:25 -0400
committerBenjamin Walsh <benjamin.walsh@windriver.com>2016-10-17 17:52:33 +0000
commitb2fd5be4dcad7e811c82182cb85a4a3883a50387 (patch)
treefa7920dcbc593664b3328f8299b4e2ff6f26db29 /kernel
parent41a4caadfc756773c77df484501ecb68b3ba68ea (diff)
unified: Rework K_THREAD_DEFINE()
K_THREAD_DEFINE() can no longer specify a thread group. However, it now accepts a 'delay' parameter just as k_thread_spawn() does. To create a statically defined thread that may belong to one or more thread groups the new internal _MDEF_THREAD_DEFINE() macro is used. It is only used for legacy purposes. Threads can not both have a delayed start AND belong to a thread group. Jira: ZEP-916 Change-Id: Ia6e59ddcb4fc68f1f60f9c6b0f4f227f161ad1bb Signed-off-by: Peter Mitsis <peter.mitsis@windriver.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/unified/thread.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/kernel/unified/thread.c b/kernel/unified/thread.c
index 28f30eece..06e247aa1 100644
--- a/kernel/unified/thread.c
+++ b/kernel/unified/thread.c
@@ -424,6 +424,8 @@ void _k_thread_single_abort(struct k_thread *thread)
void _init_static_threads(void)
{
+ unsigned int key;
+
_FOREACH_STATIC_THREAD(thread_data) {
_new_thread(
thread_data->init_stack,
@@ -438,7 +440,29 @@ void _init_static_threads(void)
thread_data->thread->init_data = thread_data;
}
+
+ k_sched_lock();
+ /* Start all (legacy) threads that are part of the EXE group */
_k_thread_group_op(K_THREAD_GROUP_EXE, _k_thread_single_start);
+
+ /*
+ * Non-legacy static threads may be started immediately or after a
+ * previously specified delay. Even though the scheduler is locked,
+ * ticks can still be delivered and processed. Lock interrupts so
+ * that the countdown until execution begins from the same tick.
+ *
+ * Note that static threads defined using the legacy API have a
+ * delay of K_FOREVER.
+ */
+ key = irq_lock();
+ _FOREACH_STATIC_THREAD(thread_data) {
+ if (thread_data->init_delay != K_FOREVER) {
+ schedule_new_thread(thread_data->thread,
+ thread_data->init_delay);
+ }
+ }
+ irq_unlock(key);
+ k_sched_unlock();
}
uint32_t _k_thread_group_mask_get(struct k_thread *thread)