summaryrefslogtreecommitdiff
path: root/kernel/include
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/include')
-rw-r--r--kernel/include/gen_offset.h92
-rw-r--r--kernel/include/kernel_offsets.h74
-rw-r--r--kernel/include/kernel_structs.h225
-rw-r--r--kernel/include/ksched.h466
-rw-r--r--kernel/include/nano_internal.h89
-rw-r--r--kernel/include/offsets_short.h71
-rw-r--r--kernel/include/timeout_q.h252
-rw-r--r--kernel/include/wait_q.h59
8 files changed, 1328 insertions, 0 deletions
diff --git a/kernel/include/gen_offset.h b/kernel/include/gen_offset.h
new file mode 100644
index 000000000..9b8286da6
--- /dev/null
+++ b/kernel/include/gen_offset.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2010, 2012, 2014 Wind River Systems, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ * @brief Macros to generate structure member offset definitions
+ *
+ * This header contains macros to allow a nanokernel implementation to
+ * generate absolute symbols whose values represents the member offsets for
+ * various nanokernel structures. These absolute symbols are typically
+ * utilized by assembly source files rather than hardcoding the values in
+ * some local header file.
+ *
+ * WARNING: Absolute symbols can potentially be utilized by external tools --
+ * for example, to locate a specific field within a data structure.
+ * Consequently, changes made to such symbols may require modifications to the
+ * associated tool(s). Typically, relocating a member of a structure merely
+ * requires that a tool be rebuilt; however, moving a member to another
+ * structure (or to a new sub-structure within an existing structure) may
+ * require that the tool itself be modified. Likewise, deleting, renaming, or
+ * changing the meaning of an absolute symbol may require modifications to a
+ * tool.
+ *
+ * The macro "GEN_OFFSET_SYM(structure, member)" is used to generate a single
+ * absolute symbol. The absolute symbol will appear in the object module
+ * generated from the source file that utilizes the GEN_OFFSET_SYM() macro.
+ * Absolute symbols representing a structure member offset have the following
+ * form:
+ *
+ * __<structure>_<member>_OFFSET
+ *
+ * This header also defines the GEN_ABSOLUTE_SYM macro to simply define an
+ * absolute symbol, irrespective of whether the value represents a structure
+ * or offset.
+ *
+ * The following sample file illustrates the usage of the macros available
+ * in this file:
+ *
+ * <START of sample source file: offsets.c>
+ *
+ * #include <gen_offset.h>
+ * /@ include struct definitions for which offsets symbols are to be
+ * generated @/
+ *
+ * #include <kernel_structs.h>
+ * GEN_ABS_SYM_BEGIN (_OffsetAbsSyms) /@ the name parameter is arbitrary @/
+ * /@ _kernel_t structure member offsets @/
+ *
+ * GEN_OFFSET_SYM (_kernel_t, nested);
+ * GEN_OFFSET_SYM (_kernel_t, irq_stack);
+ * GEN_OFFSET_SYM (_kernel_t, current);
+ * GEN_OFFSET_SYM (_kernel_t, idle);
+ *
+ * GEN_ABSOLUTE_SYM (___kernel_t_SIZEOF, sizeof(_kernel_t));
+ *
+ * GEN_ABS_SYM_END
+ * <END of sample source file: offsets.c>
+ *
+ * Compiling the sample offsets.c results in the following symbols in offsets.o:
+ *
+ * $ nm offsets.o
+ * 00000000 A ___kernel_t_nested_OFFSET
+ * 00000004 A ___kernel_t_irq_stack_OFFSET
+ * 00000008 A ___kernel_t_current_OFFSET
+ * 0000000c A ___kernel_t_idle_OFFSET
+ */
+
+#ifndef _GEN_OFFSET_H
+#define _GEN_OFFSET_H
+
+#include <toolchain.h>
+#include <stddef.h>
+
+/* definition of the GEN_OFFSET_SYM() macros is toolchain independent */
+
+#define GEN_OFFSET_SYM(S, M) \
+ GEN_ABSOLUTE_SYM(__##S##_##M##_##OFFSET, offsetof(S, M))
+
+#endif /* _GEN_OFFSET_H */
diff --git a/kernel/include/kernel_offsets.h b/kernel/include/kernel_offsets.h
new file mode 100644
index 000000000..73553c007
--- /dev/null
+++ b/kernel/include/kernel_offsets.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2013-2014 Wind River Systems, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <device.h>
+
+#ifndef _kernel_offsets__h_
+#define _kernel_offsets__h_
+
+/*
+ * The final link step uses the symbol _OffsetAbsSyms to force the linkage of
+ * offsets.o into the ELF image.
+ */
+
+GEN_ABS_SYM_BEGIN(_OffsetAbsSyms)
+
+GEN_OFFSET_SYM(_kernel_t, current);
+
+#if defined(CONFIG_THREAD_MONITOR)
+GEN_OFFSET_SYM(_kernel_t, threads);
+#endif
+
+GEN_OFFSET_SYM(_kernel_t, nested);
+GEN_OFFSET_SYM(_kernel_t, irq_stack);
+#ifdef CONFIG_SYS_POWER_MANAGEMENT
+GEN_OFFSET_SYM(_kernel_t, idle);
+#endif
+
+GEN_OFFSET_SYM(_kernel_t, ready_q);
+GEN_OFFSET_SYM(_kernel_t, arch);
+
+GEN_OFFSET_SYM(_ready_q_t, cache);
+
+#ifdef CONFIG_FP_SHARING
+GEN_OFFSET_SYM(_kernel_t, current_fp);
+#endif
+
+GEN_ABSOLUTE_SYM(_STRUCT_KERNEL_SIZE, sizeof(struct _kernel));
+
+GEN_OFFSET_SYM(_thread_base_t, flags);
+GEN_OFFSET_SYM(_thread_base_t, prio);
+GEN_OFFSET_SYM(_thread_base_t, sched_locked);
+GEN_OFFSET_SYM(_thread_base_t, swap_data);
+
+GEN_OFFSET_SYM(_thread_t, base);
+GEN_OFFSET_SYM(_thread_t, caller_saved);
+GEN_OFFSET_SYM(_thread_t, callee_saved);
+GEN_OFFSET_SYM(_thread_t, arch);
+
+#if defined(CONFIG_THREAD_MONITOR)
+GEN_OFFSET_SYM(_thread_t, next_thread);
+#endif
+
+#ifdef CONFIG_THREAD_CUSTOM_DATA
+GEN_OFFSET_SYM(_thread_t, custom_data);
+#endif
+
+GEN_ABSOLUTE_SYM(K_THREAD_SIZEOF, sizeof(struct k_thread));
+
+/* size of the device structure. Used by linker scripts */
+GEN_ABSOLUTE_SYM(_DEVICE_STRUCT_SIZE, sizeof(struct device));
+
+#endif /* _kernel_offsets__h_ */
diff --git a/kernel/include/kernel_structs.h b/kernel/include/kernel_structs.h
new file mode 100644
index 000000000..c82fc61a5
--- /dev/null
+++ b/kernel/include/kernel_structs.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2016 Wind River Systems, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _kernel_structs__h_
+#define _kernel_structs__h_
+
+#if !defined(_ASMLANGUAGE)
+#include <kernel.h>
+#include <atomic.h>
+#include <misc/dlist.h>
+#endif
+
+/*
+ * Common bitmask definitions for the struct tcs->flags bit field.
+ *
+ * Must be before kerneL_arch_data.h because it might need them to be already
+ * defined.
+ */
+
+/* thread is defined statically */
+#define K_STATIC (1 << 8)
+
+/* Thread is waiting on an object */
+#define K_PENDING (1 << 13)
+
+/* Thread has not yet started */
+#define K_PRESTART (1 << 14)
+
+/* Thread has terminated */
+#define K_DEAD (1 << 15)
+
+/* Thread is suspended */
+#define K_SUSPENDED (1 << 16)
+
+/* Not a real thread */
+#define K_DUMMY (1 << 17)
+
+#if defined(CONFIG_FP_SHARING)
+/* thread uses floating point registers */
+#define K_FP_REGS (1 << 4)
+#endif
+
+/* system thread that must not abort */
+#define K_ESSENTIAL (1 << 9)
+
+#include <kernel_arch_data.h>
+
+#if !defined(_ASMLANGUAGE)
+
+#ifdef CONFIG_THREAD_MONITOR
+struct __thread_entry {
+ _thread_entry_t pEntry;
+ void *parameter1;
+ void *parameter2;
+ void *parameter3;
+};
+#endif
+
+/* can be used for creating 'dummy' threads, e.g. for pending on objects */
+struct _thread_base {
+
+ /* this thread's entry in a ready/wait queue */
+ sys_dnode_t k_q_node;
+
+ /* execution flags */
+ int flags;
+
+ /* thread priority used to sort linked list */
+ int prio;
+
+ /* scheduler lock count */
+ atomic_t sched_locked;
+
+ /* data returned by APIs */
+ void *swap_data;
+
+#ifdef CONFIG_NANO_TIMEOUTS
+ /* this thread's entry in a timeout queue */
+ struct _timeout timeout;
+#endif
+
+};
+
+typedef struct _thread_base _thread_base_t;
+
+struct k_thread {
+
+ struct _thread_base base;
+
+ /* defined by the architecture, but all archs need these */
+ struct _caller_saved caller_saved;
+ struct _callee_saved callee_saved;
+
+ /* static thread init data */
+ void *init_data;
+
+ /* abort function */
+ void (*fn_abort)(void);
+
+#if defined(CONFIG_THREAD_MONITOR)
+ /* thread entry and parameters description */
+ struct __thread_entry *entry;
+
+ /* next item in list of all threads */
+ struct k_thread *next_thread;
+#endif
+
+#ifdef CONFIG_THREAD_CUSTOM_DATA
+ /* crude thread-local storage */
+ void *custom_data;
+#endif
+
+#ifdef CONFIG_ERRNO
+ /* per-thread errno variable */
+ int errno_var;
+#endif
+
+ /* arch-specifics: must always be at the end */
+ struct _thread_arch arch;
+};
+
+typedef struct k_thread _thread_t;
+
+struct _ready_q {
+
+ /* next thread to run if known, NULL otherwise */
+ struct k_thread *cache;
+
+ /* bitmap of priorities that contain at least one ready thread */
+ uint32_t prio_bmap[K_NUM_PRIO_BITMAPS];
+
+ /* ready queues, one per priority */
+ sys_dlist_t q[K_NUM_PRIORITIES];
+};
+
+typedef struct _ready_q _ready_q_t;
+
+struct _kernel {
+
+ /* nested interrupt count */
+ uint32_t nested;
+
+ /* interrupt stack pointer base */
+ char *irq_stack;
+
+ /* currently scheduled thread */
+ struct k_thread *current;
+
+#ifdef CONFIG_SYS_CLOCK_EXISTS
+ /* queue of timeouts */
+ sys_dlist_t timeout_q;
+#endif
+
+#ifdef CONFIG_SYS_POWER_MANAGEMENT
+ int32_t idle; /* Number of ticks for kernel idling */
+#endif
+
+ /*
+ * ready queue: can be big, keep after small fields, since some
+ * assembly (e.g. ARC) are limited in the encoding of the offset
+ */
+ struct _ready_q ready_q;
+
+#ifdef CONFIG_FP_SHARING
+ /*
+ * A 'current_sse' field does not exist in addition to the 'current_fp'
+ * field since it's not possible to divide the IA-32 non-integer
+ * registers into 2 distinct blocks owned by differing threads. In
+ * other words, given that the 'fxnsave/fxrstor' instructions
+ * save/restore both the X87 FPU and XMM registers, it's not possible
+ * for a thread to only "own" the XMM registers.
+ */
+
+ /* thread (fiber or task) that owns the FP regs */
+ struct k_thread *current_fp;
+#endif
+
+#if defined(CONFIG_THREAD_MONITOR)
+ struct k_thread *threads; /* singly linked list of ALL fiber+tasks */
+#endif
+
+ /* arch-specific part of _kernel */
+ struct _kernel_arch arch;
+};
+
+typedef struct _kernel _kernel_t;
+
+extern struct _kernel _kernel;
+
+#define _current _kernel.current
+#define _ready_q _kernel.ready_q
+#define _timeout_q _kernel.timeout_q
+#define _threads _kernel.threads
+
+#include <kernel_arch_func.h>
+
+static ALWAYS_INLINE void
+_set_thread_return_value_with_data(struct k_thread *thread,
+ unsigned int value,
+ void *data)
+{
+ _set_thread_return_value(thread, value);
+ thread->base.swap_data = data;
+}
+
+extern void _init_thread_base(struct _thread_base *thread_base,
+ int priority, uint32_t initial_state,
+ unsigned int options);
+
+#endif /* _ASMLANGUAGE */
+
+#endif /* _kernel_structs__h_ */
diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h
new file mode 100644
index 000000000..6816c1e66
--- /dev/null
+++ b/kernel/include/ksched.h
@@ -0,0 +1,466 @@
+/*
+ * Copyright (c) 2016 Wind River Systems, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _ksched__h_
+#define _ksched__h_
+
+#include <kernel_structs.h>
+
+extern k_tid_t const _main_thread;
+extern k_tid_t const _idle_thread;
+
+extern void _add_thread_to_ready_q(struct k_thread *thread);
+extern void _remove_thread_from_ready_q(struct k_thread *thread);
+extern void _reschedule_threads(int key);
+extern void k_sched_unlock(void);
+extern void _pend_thread(struct k_thread *thread,
+ _wait_q_t *wait_q, int32_t timeout);
+extern void _pend_current_thread(_wait_q_t *wait_q, int32_t timeout);
+extern void _move_thread_to_end_of_prio_q(struct k_thread *thread);
+extern int __must_switch_threads(void);
+extern int32_t _ms_to_ticks(int32_t ms);
+extern void idle(void *, void *, void *);
+
+/* find which one is the next thread to run */
+/* must be called with interrupts locked */
+static ALWAYS_INLINE struct k_thread *_get_next_ready_thread(void)
+{
+ return _ready_q.cache;
+}
+
+static inline int _is_idle_thread(void *entry_point)
+{
+ return entry_point == idle;
+}
+
+#ifdef CONFIG_MULTITHREADING
+#define _ASSERT_VALID_PRIO(prio, entry_point) do { \
+ __ASSERT(((prio) == K_IDLE_PRIO && _is_idle_thread(entry_point)) || \
+ (_is_prio_higher_or_equal((prio), \
+ K_LOWEST_APPLICATION_THREAD_PRIO) && \
+ _is_prio_lower_or_equal((prio), \
+ K_HIGHEST_APPLICATION_THREAD_PRIO)), \
+ "invalid priority (%d); allowed range: %d to %d", \
+ (prio), \
+ K_LOWEST_APPLICATION_THREAD_PRIO, \
+ K_HIGHEST_APPLICATION_THREAD_PRIO); \
+ } while ((0))
+#else
+#define _ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
+#endif
+
+/*
+ * The _is_prio_higher family: I created this because higher priorities are
+ * lower numerically and I always found somewhat confusing seeing, e.g.:
+ *
+ * if (t1.prio < t2.prio) /# is t1's priority higher then t2's priority ? #/
+ *
+ * in code. And the fact that most of the time that kind of code has this
+ * exact comment warrants a function where it is embedded in the name.
+ *
+ * IMHO, feel free to remove them and do the comparison directly if this feels
+ * like overkill.
+ */
+
+static inline int _is_prio1_higher_than_or_equal_to_prio2(int prio1, int prio2)
+{
+ return prio1 <= prio2;
+}
+
+static inline int _is_prio_higher_or_equal(int prio1, int prio2)
+{
+ return _is_prio1_higher_than_or_equal_to_prio2(prio1, prio2);
+}
+
+static inline int _is_prio1_higher_than_prio2(int prio1, int prio2)
+{
+ return prio1 < prio2;
+}
+
+static inline int _is_prio_higher(int prio, int test_prio)
+{
+ return _is_prio1_higher_than_prio2(prio, test_prio);
+}
+
+static inline int _is_prio1_lower_than_or_equal_to_prio2(int prio1, int prio2)
+{
+ return prio1 >= prio2;
+}
+
+static inline int _is_prio_lower_or_equal(int prio1, int prio2)
+{
+ return _is_prio1_lower_than_or_equal_to_prio2(prio1, prio2);
+}
+
+static inline int _is_prio1_lower_than_prio2(int prio1, int prio2)
+{
+ return prio1 > prio2;
+}
+
+static inline int _is_prio_lower(int prio1, int prio2)
+{
+ return _is_prio1_lower_than_prio2(prio1, prio2);
+}
+
+static inline int _is_t1_higher_prio_than_t2(struct k_thread *t1,
+ struct k_thread *t2)
+{
+ return _is_prio1_higher_than_prio2(t1->base.prio, t2->base.prio);
+}
+
+static inline int _is_higher_prio_than_current(struct k_thread *thread)
+{
+ return _is_t1_higher_prio_than_t2(thread, _current);
+}
+
+/* is thread currenlty cooperative ? */
+static inline int _is_coop(struct k_thread *thread)
+{
+#if defined(CONFIG_PREEMPT_ENABLED) && defined(CONFIG_COOP_ENABLED)
+ return thread->base.prio < 0;
+#elif defined(CONFIG_COOP_ENABLED)
+ return 1;
+#elif defined(CONFIG_PREEMPT_ENABLED)
+ return 0;
+#else
+#error "Impossible configuration"
+#endif
+}
+
+/* is thread currently preemptible ? */
+static inline int _is_preempt(struct k_thread *thread)
+{
+#ifdef CONFIG_PREEMPT_ENABLED
+ return !_is_coop(thread) && !atomic_get(&thread->base.sched_locked);
+#else
+ return 0;
+#endif
+}
+
+/* is current thread preemptible and we are not running in ISR context */
+static inline int _is_current_execution_context_preemptible(void)
+{
+#ifdef CONFIG_PREEMPT_ENABLED
+ return !_is_in_isr() && _is_preempt(_current);
+#else
+ return 0;
+#endif
+}
+
+/* find out if priority is under priority inheritance ceiling */
+static inline int _is_under_prio_ceiling(int prio)
+{
+ return prio >= CONFIG_PRIORITY_CEILING;
+}
+
+/*
+ * Find out what priority to set a thread to taking the prio ceiling into
+ * consideration.
+ */
+static inline int _get_new_prio_with_ceiling(int prio)
+{
+ return _is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING;
+}
+
+/* find out the prio bitmap index for a given prio */
+static inline int _get_ready_q_prio_bmap_index(int prio)
+{
+ return (prio + CONFIG_NUM_COOP_PRIORITIES) >> 5;
+}
+
+/* find out the prio bit for a given prio */
+static inline int _get_ready_q_prio_bit(int prio)
+{
+ return (1 << ((prio + CONFIG_NUM_COOP_PRIORITIES) & 0x1f));
+}
+
+/* find out the ready queue array index for a given prio */
+static inline int _get_ready_q_q_index(int prio)
+{
+ return prio + CONFIG_NUM_COOP_PRIORITIES;
+}
+
+/* find out the currently highest priority where a thread is ready to run */
+/* interrupts must be locked */
+static inline int _get_highest_ready_prio(void)
+{
+ int bitmap = 0;
+ uint32_t ready_range;
+
+#if (K_NUM_PRIORITIES <= 32)
+ ready_range = _ready_q.prio_bmap[0];
+#else
+ for (;; bitmap++) {
+
+ __ASSERT(bitmap < K_NUM_PRIO_BITMAPS, "prio out-of-range\n");
+
+ if (_ready_q.prio_bmap[bitmap]) {
+ ready_range = _ready_q.prio_bmap[bitmap];
+ break;
+ }
+ }
+#endif
+
+ int abs_prio = (find_lsb_set(ready_range) - 1) + (bitmap << 5);
+
+ __ASSERT(abs_prio < K_NUM_PRIORITIES, "prio out-of-range\n");
+
+ return abs_prio - CONFIG_NUM_COOP_PRIORITIES;
+}
+
+/*
+ * Checks if current thread must be context-switched out. The caller must
+ * already know that the execution context is a thread.
+ */
+static inline int _must_switch_threads(void)
+{
+ return _is_preempt(_current) && __must_switch_threads();
+}
+
+/*
+ * Internal equivalent to k_sched_lock so that it does not incur a function
+ * call penalty in the kernel guts.
+ *
+ * Must be kept in sync until the header files are cleaned-up and the
+ * applications have access to the kernel internal deta structures (through
+ * APIs of course).
+ */
+static inline void _sched_lock(void)
+{
+#ifdef CONFIG_PREEMPT_ENABLED
+ __ASSERT(!_is_in_isr(), "");
+
+ atomic_inc(&_current->base.sched_locked);
+
+ K_DEBUG("scheduler locked (%p:%d)\n",
+ _current, _current->base.sched_locked);
+#endif
+}
+
+/**
+ * @brief Unlock the scheduler but do NOT reschedule
+ *
+ * It is incumbent upon the caller to ensure that the reschedule occurs
+ * sometime after the scheduler is unlocked.
+ */
+static inline void _sched_unlock_no_reschedule(void)
+{
+#ifdef CONFIG_PREEMPT_ENABLED
+ __ASSERT(!_is_in_isr(), "");
+
+ atomic_dec(&_current->base.sched_locked);
+#endif
+}
+
+static inline void _set_thread_states(struct k_thread *thread, uint32_t states)
+{
+ thread->base.flags |= states;
+}
+
+static inline void _reset_thread_states(struct k_thread *thread,
+ uint32_t states)
+{
+ thread->base.flags &= ~states;
+}
+
+/* mark a thread as being suspended */
+static inline void _mark_thread_as_suspended(struct k_thread *thread)
+{
+ thread->base.flags |= K_SUSPENDED;
+}
+
+/* mark a thread as not being suspended */
+static inline void _mark_thread_as_not_suspended(struct k_thread *thread)
+{
+ thread->base.flags &= ~K_SUSPENDED;
+}
+
+static ALWAYS_INLINE int _is_thread_timeout_expired(struct k_thread *thread)
+{
+#ifdef CONFIG_SYS_CLOCK_EXISTS
+ return thread->base.timeout.delta_ticks_from_prev == _EXPIRED;
+#else
+ return 0;
+#endif
+}
+
+/* check if a thread is on the timeout queue */
+static inline int _is_thread_timeout_active(struct k_thread *thread)
+{
+#ifdef CONFIG_SYS_CLOCK_EXISTS
+ return thread->base.timeout.delta_ticks_from_prev != _INACTIVE;
+#else
+ return 0;
+#endif
+}
+
+static inline int _has_thread_started(struct k_thread *thread)
+{
+ return !(thread->base.flags & K_PRESTART);
+}
+
+static inline int _is_thread_prevented_from_running(struct k_thread *thread)
+{
+ return thread->base.flags & (K_PENDING | K_PRESTART |
+ K_DEAD | K_DUMMY |
+ K_SUSPENDED);
+
+}
+
+/* check if a thread is ready */
+static inline int _is_thread_ready(struct k_thread *thread)
+{
+ return !(_is_thread_prevented_from_running(thread) ||
+ _is_thread_timeout_active(thread));
+}
+
+/* mark a thread as pending in its TCS */
+static inline void _mark_thread_as_pending(struct k_thread *thread)
+{
+ thread->base.flags |= K_PENDING;
+}
+
+/* mark a thread as not pending in its TCS */
+static inline void _mark_thread_as_not_pending(struct k_thread *thread)
+{
+ thread->base.flags &= ~K_PENDING;
+}
+
+/* check if a thread is pending */
+static inline int _is_thread_pending(struct k_thread *thread)
+{
+ return !!(thread->base.flags & K_PENDING);
+}
+
+/**
+ * @brief Mark a thread as started
+ *
+ * This routine must be called with interrupts locked.
+ */
+static inline void _mark_thread_as_started(struct k_thread *thread)
+{
+ thread->base.flags &= ~K_PRESTART;
+}
+
+/*
+ * Put the thread in the ready queue according to its priority if it is not
+ * blocked for another reason (eg. suspended).
+ *
+ * Must be called with interrupts locked.
+ */
+static inline void _ready_thread(struct k_thread *thread)
+{
+ __ASSERT(_is_prio_higher(thread->base.prio, K_LOWEST_THREAD_PRIO) ||
+ ((thread->base.prio == K_LOWEST_THREAD_PRIO) &&
+ (thread == _idle_thread)),
+ "thread %p prio too low (is %d, cannot be lower than %d)",
+ thread, thread->base.prio,
+ thread == _idle_thread ? K_LOWEST_THREAD_PRIO :
+ K_LOWEST_APPLICATION_THREAD_PRIO);
+
+ __ASSERT(!_is_prio_higher(thread->base.prio, K_HIGHEST_THREAD_PRIO),
+ "thread %p prio too high (id %d, cannot be higher than %d)",
+ thread, thread->base.prio, K_HIGHEST_THREAD_PRIO);
+
+ /* needed to handle the start-with-delay case */
+ _mark_thread_as_started(thread);
+
+ if (_is_thread_ready(thread)) {
+ _add_thread_to_ready_q(thread);
+ }
+}
+
+/**
+ * @brief Mark thread as dead
+ *
+ * This routine must be called with interrupts locked.
+ */
+static inline void _mark_thread_as_dead(struct k_thread *thread)
+{
+ thread->base.flags |= K_DEAD;
+}
+
+/*
+ * Set a thread's priority. If the thread is ready, place it in the correct
+ * queue.
+ */
+/* must be called with interrupts locked */
+static inline void _thread_priority_set(struct k_thread *thread, int prio)
+{
+ if (_is_thread_ready(thread)) {
+ _remove_thread_from_ready_q(thread);
+ thread->base.prio = prio;
+ _add_thread_to_ready_q(thread);
+ } else {
+ thread->base.prio = prio;
+ }
+}
+
+/* check if thread is a thread pending on a particular wait queue */
+static inline struct k_thread *_peek_first_pending_thread(_wait_q_t *wait_q)
+{
+ return (struct k_thread *)sys_dlist_peek_head(wait_q);
+}
+
+static inline struct k_thread *_get_thread_to_unpend(_wait_q_t *wait_q)
+{
+#ifdef CONFIG_SYS_CLOCK_EXISTS
+ if (_is_in_isr()) {
+ /* skip threads that have an expired timeout */
+ sys_dlist_t *q = (sys_dlist_t *)wait_q;
+ sys_dnode_t *cur, *next;
+
+ SYS_DLIST_FOR_EACH_NODE_SAFE(q, cur, next) {
+ struct k_thread *thread = (struct k_thread *)cur;
+
+ if (_is_thread_timeout_expired(thread)) {
+ continue;
+ }
+
+ sys_dlist_remove(cur);
+ return thread;
+ }
+ return NULL;
+ }
+#endif
+
+ return (struct k_thread *)sys_dlist_get(wait_q);
+}
+
+/* unpend the first thread from a wait queue */
+/* must be called with interrupts locked */
+static inline struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
+{
+ struct k_thread *thread = _get_thread_to_unpend(wait_q);
+
+ if (thread) {
+ _mark_thread_as_not_pending(thread);
+ }
+
+ return thread;
+}
+
+/* Unpend a thread from the wait queue it is on. Thread must be pending. */
+/* must be called with interrupts locked */
+static inline void _unpend_thread(struct k_thread *thread)
+{
+ __ASSERT(thread->base.flags & K_PENDING, "");
+
+ sys_dlist_remove(&thread->base.k_q_node);
+ _mark_thread_as_not_pending(thread);
+}
+
+#endif /* _ksched__h_ */
diff --git a/kernel/include/nano_internal.h b/kernel/include/nano_internal.h
new file mode 100644
index 000000000..5396133a3
--- /dev/null
+++ b/kernel/include/nano_internal.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ * @brief Architecture-independent private nanokernel APIs
+ *
+ * This file contains private nanokernel APIs that are not
+ * architecture-specific.
+ */
+
+#ifndef _NANO_INTERNAL__H_
+#define _NANO_INTERNAL__H_
+
+#define K_NUM_PRIORITIES \
+ (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES + 1)
+
+#define K_NUM_PRIO_BITMAPS ((K_NUM_PRIORITIES + 31) >> 5)
+
+#ifndef _ASMLANGUAGE
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Early boot functions */
+
+void _bss_zero(void);
+#ifdef CONFIG_XIP
+void _data_copy(void);
+#else
+static inline void _data_copy(void)
+{
+ /* Do nothing */
+}
+#endif
+FUNC_NORETURN void _Cstart(void);
+
+/* helper type alias for thread control structure */
+
+typedef void (*_thread_entry_t)(void *, void *, void *);
+
+extern void _thread_entry(void (*)(void *, void *, void *),
+ void *, void *, void *);
+
+extern void _new_thread(char *pStack, size_t stackSize,
+ void (*pEntry)(void *, void *, void *),
+ void *p1, void *p2, void *p3,
+ int prio, unsigned options);
+
+/* context switching and scheduling-related routines */
+
+extern unsigned int _Swap(unsigned int);
+
+/* set and clear essential fiber/task flag */
+
+extern void _thread_essential_set(void);
+extern void _thread_essential_clear(void);
+
+/* clean up when a thread is aborted */
+
+#if defined(CONFIG_THREAD_MONITOR)
+extern void _thread_monitor_exit(struct k_thread *thread);
+#else
+#define _thread_monitor_exit(thread) \
+ do {/* nothing */ \
+ } while (0)
+#endif /* CONFIG_THREAD_MONITOR */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ASMLANGUAGE */
+
+#endif /* _NANO_INTERNAL__H_ */
diff --git a/kernel/include/offsets_short.h b/kernel/include/offsets_short.h
new file mode 100644
index 000000000..32918648f
--- /dev/null
+++ b/kernel/include/offsets_short.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016 Wind River Systems, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _offsets_short__h_
+#define _offsets_short__h_
+
+#include <offsets.h>
+#include <offsets_short_arch.h>
+
+/* kernel */
+
+/* main */
+
+#define _kernel_offset_to_nested \
+ (___kernel_t_nested_OFFSET)
+
+#define _kernel_offset_to_irq_stack \
+ (___kernel_t_irq_stack_OFFSET)
+
+#define _kernel_offset_to_current \
+ (___kernel_t_current_OFFSET)
+
+#define _kernel_offset_to_idle \
+ (___kernel_t_idle_OFFSET)
+
+#define _kernel_offset_to_current_fp \
+ (___kernel_t_current_fp_OFFSET)
+
+#define _kernel_offset_to_ready_q_cache \
+ (___kernel_t_ready_q_OFFSET + ___ready_q_t_cache_OFFSET)
+
+/* end - kernel */
+
+/* threads */
+
+/* main */
+
+#define _thread_offset_to_callee_saved \
+ (___thread_t_callee_saved_OFFSET)
+
+/* base */
+
+#define _thread_offset_to_flags \
+ (___thread_t_base_OFFSET + ___thread_base_t_flags_OFFSET)
+
+#define _thread_offset_to_prio \
+ (___thread_t_base_OFFSET + ___thread_base_t_prio_OFFSET)
+
+#define _thread_offset_to_sched_locked \
+ (___thread_t_base_OFFSET + ___thread_base_t_sched_locked_OFFSET)
+
+#define _thread_offset_to_esf \
+ (___thread_t_arch_OFFSET + ___thread_arch_t_esf_OFFSET)
+
+
+/* end - threads */
+
+#endif /* _offsets_short__h_ */
diff --git a/kernel/include/timeout_q.h b/kernel/include/timeout_q.h
new file mode 100644
index 000000000..731effc10
--- /dev/null
+++ b/kernel/include/timeout_q.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2015 Wind River Systems, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _kernel_include_timeout_q__h_
+#define _kernel_include_timeout_q__h_
+
+/**
+ * @file
+ * @brief timeout queue for threads on kernel objects
+ *
+ * This file is meant to be included by kernel/include/wait_q.h only
+ */
+
+#include <misc/dlist.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* initialize the timeouts part of k_thread when enabled in the kernel */
+
+static inline void _init_timeout(struct _timeout *t, _timeout_func_t func)
+{
+ /*
+ * Must be initialized here and when dequeueing a timeout so that code
+ * not dealing with timeouts does not have to handle this, such as when
+ * waiting forever on a semaphore.
+ */
+ t->delta_ticks_from_prev = _INACTIVE;
+
+ /*
+ * Must be initialized here so that the _fiber_wakeup family of APIs can
+ * verify the fiber is not on a wait queue before aborting a timeout.
+ */
+ t->wait_q = NULL;
+
+ /*
+ * Must be initialized here, so the _handle_one_timeout()
+ * routine can check if there is a fiber waiting on this timeout
+ */
+ t->thread = NULL;
+
+ /*
+ * Function must be initialized before being potentially called.
+ */
+ t->func = func;
+
+ /*
+ * These are initialized when enqueing on the timeout queue:
+ *
+ * thread->timeout.node.next
+ * thread->timeout.node.prev
+ */
+}
+
+static ALWAYS_INLINE void
+_init_thread_timeout(struct _thread_base *thread_base)
+{
+ _init_timeout(&thread_base->timeout, NULL);
+}
+
+/* remove a thread timing out from kernel object's wait queue */
+
+static inline void _unpend_thread_timing_out(struct k_thread *thread,
+ struct _timeout *timeout_obj)
+{
+ if (timeout_obj->wait_q) {
+ _unpend_thread(thread);
+ thread->base.timeout.wait_q = NULL;
+ }
+}
+
+/*
+ * Handle one timeout from the expired timeout queue. Removes it from the wait
+ * queue it is on if waiting for an object; in this case, the return value is
+ * kept as -EAGAIN, set previously in _Swap().
+ */
+
+static inline void _handle_one_expired_timeout(struct _timeout *timeout)
+{
+ struct k_thread *thread = timeout->thread;
+ unsigned int key = irq_lock();
+
+ timeout->delta_ticks_from_prev = _INACTIVE;
+
+ K_DEBUG("timeout %p\n", timeout);
+ if (thread) {
+ _unpend_thread_timing_out(thread, timeout);
+ _ready_thread(thread);
+ irq_unlock(key);
+ } else {
+ irq_unlock(key);
+ if (timeout->func) {
+ timeout->func(timeout);
+ }
+ }
+}
+
+/*
+ * Loop over all expired timeouts and handle them one by one. Should be called
+ * with interrupts unlocked: interrupts will be locked on each interation only
+ * for the amount of time necessary.
+ */
+
+static inline void _handle_expired_timeouts(sys_dlist_t *expired)
+{
+ sys_dnode_t *timeout, *next;
+
+ SYS_DLIST_FOR_EACH_NODE_SAFE(expired, timeout, next) {
+ sys_dlist_remove(timeout);
+ _handle_one_expired_timeout((struct _timeout *)timeout);
+ }
+}
+
+/* returns _INACTIVE if the timer is not active */
+static inline int _abort_timeout(struct _timeout *timeout)
+{
+ if (timeout->delta_ticks_from_prev == _INACTIVE) {
+ return _INACTIVE;
+ }
+
+ if (!sys_dlist_is_tail(&_timeout_q, &timeout->node)) {
+ sys_dnode_t *next_node =
+ sys_dlist_peek_next(&_timeout_q, &timeout->node);
+ struct _timeout *next = (struct _timeout *)next_node;
+
+ next->delta_ticks_from_prev += timeout->delta_ticks_from_prev;
+ }
+ sys_dlist_remove(&timeout->node);
+ timeout->delta_ticks_from_prev = _INACTIVE;
+
+ return 0;
+}
+
+/* returns _INACTIVE if the timer has already expired */
+static inline int _abort_thread_timeout(struct k_thread *thread)
+{
+ return _abort_timeout(&thread->base.timeout);
+}
+
+/*
+ * callback for sys_dlist_insert_at():
+ *
+ * Returns 1 if the timeout to insert is lower or equal than the next timeout
+ * in the queue, signifying that it should be inserted before the next.
+ * Returns 0 if it is greater.
+ *
+ * If it is greater, the timeout to insert is decremented by the next timeout,
+ * since the timeout queue is a delta queue. If it lower or equal, decrement
+ * the timeout of the insert point to update its delta queue value, since the
+ * current timeout will be inserted before it.
+ */
+
+static int _is_timeout_insert_point(sys_dnode_t *test, void *timeout)
+{
+ struct _timeout *t = (void *)test;
+ int32_t *timeout_to_insert = timeout;
+
+ if (*timeout_to_insert > t->delta_ticks_from_prev) {
+ *timeout_to_insert -= t->delta_ticks_from_prev;
+ return 0;
+ }
+
+ t->delta_ticks_from_prev -= *timeout_to_insert;
+ return 1;
+}
+
+/*
+ * Add timeout to timeout queue. Record waiting thread and wait queue if any.
+ *
+ * Cannot handle timeout == 0 and timeout == K_FOREVER.
+ *
+ * Must be called with interrupts locked.
+ */
+
+static inline void _add_timeout(struct k_thread *thread,
+ struct _timeout *timeout_obj,
+ _wait_q_t *wait_q, int32_t timeout)
+{
+ __ASSERT(timeout > 0, "");
+
+ K_DEBUG("thread %p on wait_q %p, for timeout: %d\n",
+ thread, wait_q, timeout);
+
+ sys_dlist_t *timeout_q = &_timeout_q;
+
+ K_DEBUG("timeout_q %p before: head: %p, tail: %p\n",
+ &_timeout_q,
+ sys_dlist_peek_head(&_timeout_q),
+ _timeout_q.tail);
+
+ K_DEBUG("timeout %p before: next: %p, prev: %p\n",
+ timeout_obj, timeout_obj->node.next, timeout_obj->node.prev);
+
+ timeout_obj->thread = thread;
+ timeout_obj->delta_ticks_from_prev = timeout;
+ timeout_obj->wait_q = (sys_dlist_t *)wait_q;
+ sys_dlist_insert_at(timeout_q, (void *)timeout_obj,
+ _is_timeout_insert_point,
+ &timeout_obj->delta_ticks_from_prev);
+
+ K_DEBUG("timeout_q %p after: head: %p, tail: %p\n",
+ &_timeout_q,
+ sys_dlist_peek_head(&_timeout_q),
+ _timeout_q.tail);
+
+ K_DEBUG("timeout %p after: next: %p, prev: %p\n",
+ timeout_obj, timeout_obj->node.next, timeout_obj->node.prev);
+}
+
+/*
+ * Put thread on timeout queue. Record wait queue if any.
+ *
+ * Cannot handle timeout == 0 and timeout == K_FOREVER.
+ *
+ * Must be called with interrupts locked.
+ */
+
+static inline void _add_thread_timeout(struct k_thread *thread,
+ _wait_q_t *wait_q, int32_t timeout)
+{
+ _add_timeout(thread, &thread->base.timeout, wait_q, timeout);
+}
+
+/* find the closest deadline in the timeout queue */
+
+static inline int32_t _get_next_timeout_expiry(void)
+{
+ struct _timeout *t = (struct _timeout *)
+ sys_dlist_peek_head(&_timeout_q);
+
+ return t ? t->delta_ticks_from_prev : K_FOREVER;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _kernel_include_timeout_q__h_ */
diff --git a/kernel/include/wait_q.h b/kernel/include/wait_q.h
new file mode 100644
index 000000000..df6538694
--- /dev/null
+++ b/kernel/include/wait_q.h
@@ -0,0 +1,59 @@
+/* wait queue for multiple threads on kernel objects */
+
+/*
+ * Copyright (c) 2015 Wind River Systems, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _kernel_nanokernel_include_wait_q__h_
+#define _kernel_nanokernel_include_wait_q__h_
+
+#include <kernel_structs.h>
+#include <misc/dlist.h>
+#include <ksched.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef CONFIG_SYS_CLOCK_EXISTS
+#include <timeout_q.h>
+#else
+static ALWAYS_INLINE void _init_thread_timeout(struct _thread_base *thread_base)
+{
+ ARG_UNUSED(thread_base);
+}
+
+static ALWAYS_INLINE void
+_add_thread_timeout(struct k_thread *thread, _wait_q_t *wait_q, int32_t timeout)
+{
+ ARG_UNUSED(thread);
+ ARG_UNUSED(wait_q);
+ ARG_UNUSED(timeout);
+}
+
+static ALWAYS_INLINE int _abort_thread_timeout(struct k_thread *thread)
+{
+ ARG_UNUSED(thread);
+
+ return 0;
+}
+#define _get_next_timeout_expiry() (K_FOREVER)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _kernel_nanokernel_include_wait_q__h_ */