summaryrefslogtreecommitdiff
path: root/kernel/include
diff options
context:
space:
mode:
authorBenjamin Walsh <benjamin.walsh@windriver.com>2016-12-21 15:38:54 -0500
committerAnas Nashif <nashif@linux.intel.com>2017-01-09 20:52:24 +0000
commitf9554765596b7f24ea856d44baebbe1399d590d4 (patch)
treecec00b83b15c13f141786007b53f121aff22092b /kernel/include
parent7e18ab70f9f4ad44d3c303ea4ebbaf6b28cb048c (diff)
kernel/arch: optimize memory use of some thread fields
Some thread fields were 32-bit wide, when they are not even close to using that full range of values. They are instead changed to 8-bit fields. - prio can fit in one byte, limiting the priorities range to -128 to 127 - recursive scheduler locking can be limited to 255; a rollover results most probably from a logic error - flags are split into execution flags and thread states; 8 bits is enough for each of them currently, with at worst two states and four flags to spare (on x86, on other archs, there are six flags to spare) Doing this saves 8 bytes per stack. It also sets up an incoming enhancement when checking if the current thread is preemptible on interrupt exit. Change-Id: Ieb5321a5b99f99173b0605dd4a193c3bc7ddabf4 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
Diffstat (limited to 'kernel/include')
-rw-r--r--kernel/include/kernel_offsets.h3
-rw-r--r--kernel/include/kernel_structs.h33
-rw-r--r--kernel/include/ksched.h28
-rw-r--r--kernel/include/offsets_short.h7
4 files changed, 44 insertions, 27 deletions
diff --git a/kernel/include/kernel_offsets.h b/kernel/include/kernel_offsets.h
index 73553c007..63ac010ac 100644
--- a/kernel/include/kernel_offsets.h
+++ b/kernel/include/kernel_offsets.h
@@ -48,7 +48,8 @@ GEN_OFFSET_SYM(_kernel_t, current_fp);
GEN_ABSOLUTE_SYM(_STRUCT_KERNEL_SIZE, sizeof(struct _kernel));
-GEN_OFFSET_SYM(_thread_base_t, flags);
+GEN_OFFSET_SYM(_thread_base_t, execution_flags);
+GEN_OFFSET_SYM(_thread_base_t, thread_state);
GEN_OFFSET_SYM(_thread_base_t, prio);
GEN_OFFSET_SYM(_thread_base_t, sched_locked);
GEN_OFFSET_SYM(_thread_base_t, swap_data);
diff --git a/kernel/include/kernel_structs.h b/kernel/include/kernel_structs.h
index 234ea7770..570a6398d 100644
--- a/kernel/include/kernel_structs.h
+++ b/kernel/include/kernel_structs.h
@@ -24,14 +24,17 @@
#endif
/*
- * Common bitmask definitions for the struct tcs->flags bit field.
+ * bitmask definitions for the execution_flags and state
*
* Must be before kerneL_arch_data.h because it might need them to be already
* defined.
*/
-/* thread is defined statically */
-#define K_STATIC (1 << 0)
+
+/* states: common uses low bits, arch-specific use high bits */
+
+/* system thread that must not abort */
+#define K_ESSENTIAL (1 << 0)
/* Thread is waiting on an object */
#define K_PENDING (1 << 1)
@@ -48,13 +51,20 @@
/* Not a real thread */
#define K_DUMMY (1 << 5)
-/* system thread that must not abort */
-#define K_ESSENTIAL (1 << 6)
+/* end - states */
+
+
+/* execution flags: common uses low bits, arch-specific use high bits */
+
+/* thread is defined statically */
+#define K_STATIC (1 << 0)
#if defined(CONFIG_FP_SHARING)
/* thread uses floating point registers */
-#define K_FP_REGS (1 << 7)
+#define K_FP_REGS (1 << 1)
#endif
+/* end - execution flags */
+
#include <kernel_arch_data.h>
@@ -76,13 +86,16 @@ struct _thread_base {
sys_dnode_t k_q_node;
/* execution flags */
- uint32_t flags;
+ uint8_t execution_flags;
- /* thread priority used to sort linked list */
- int prio;
+ /* thread state */
+ uint8_t thread_state;
/* scheduler lock count */
- volatile uint32_t sched_locked;
+ volatile uint8_t sched_locked;
+
+ /* thread priority used to sort linked list */
+ int8_t prio;
/* data returned by APIs */
void *swap_data;
diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h
index b9c919c45..f83613ba6 100644
--- a/kernel/include/ksched.h
+++ b/kernel/include/ksched.h
@@ -271,25 +271,25 @@ static ALWAYS_INLINE void _sched_unlock_no_reschedule(void)
static inline void _set_thread_states(struct k_thread *thread, uint32_t states)
{
- thread->base.flags |= states;
+ thread->base.thread_state |= states;
}
static inline void _reset_thread_states(struct k_thread *thread,
uint32_t states)
{
- thread->base.flags &= ~states;
+ thread->base.thread_state &= ~states;
}
/* mark a thread as being suspended */
static inline void _mark_thread_as_suspended(struct k_thread *thread)
{
- thread->base.flags |= K_SUSPENDED;
+ thread->base.thread_state |= K_SUSPENDED;
}
/* mark a thread as not being suspended */
static inline void _mark_thread_as_not_suspended(struct k_thread *thread)
{
- thread->base.flags &= ~K_SUSPENDED;
+ thread->base.thread_state &= ~K_SUSPENDED;
}
static ALWAYS_INLINE int _is_thread_timeout_expired(struct k_thread *thread)
@@ -313,14 +313,14 @@ static inline int _is_thread_timeout_active(struct k_thread *thread)
static inline int _has_thread_started(struct k_thread *thread)
{
- return !(thread->base.flags & K_PRESTART);
+ return !(thread->base.thread_state & K_PRESTART);
}
static inline int _is_thread_prevented_from_running(struct k_thread *thread)
{
- return thread->base.flags & (K_PENDING | K_PRESTART |
- K_DEAD | K_DUMMY |
- K_SUSPENDED);
+ return thread->base.thread_state & (K_PENDING | K_PRESTART |
+ K_DEAD | K_DUMMY |
+ K_SUSPENDED);
}
@@ -334,19 +334,19 @@ static inline int _is_thread_ready(struct k_thread *thread)
/* mark a thread as pending in its TCS */
static inline void _mark_thread_as_pending(struct k_thread *thread)
{
- thread->base.flags |= K_PENDING;
+ thread->base.thread_state |= K_PENDING;
}
/* mark a thread as not pending in its TCS */
static inline void _mark_thread_as_not_pending(struct k_thread *thread)
{
- thread->base.flags &= ~K_PENDING;
+ thread->base.thread_state &= ~K_PENDING;
}
/* check if a thread is pending */
static inline int _is_thread_pending(struct k_thread *thread)
{
- return !!(thread->base.flags & K_PENDING);
+ return !!(thread->base.thread_state & K_PENDING);
}
/**
@@ -356,7 +356,7 @@ static inline int _is_thread_pending(struct k_thread *thread)
*/
static inline void _mark_thread_as_started(struct k_thread *thread)
{
- thread->base.flags &= ~K_PRESTART;
+ thread->base.thread_state &= ~K_PRESTART;
}
/*
@@ -394,7 +394,7 @@ static inline void _ready_thread(struct k_thread *thread)
*/
static inline void _mark_thread_as_dead(struct k_thread *thread)
{
- thread->base.flags |= K_DEAD;
+ thread->base.thread_state |= K_DEAD;
}
/*
@@ -463,7 +463,7 @@ static inline struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
/* must be called with interrupts locked */
static inline void _unpend_thread(struct k_thread *thread)
{
- __ASSERT(thread->base.flags & K_PENDING, "");
+ __ASSERT(thread->base.thread_state & K_PENDING, "");
sys_dlist_remove(&thread->base.k_q_node);
_mark_thread_as_not_pending(thread);
diff --git a/kernel/include/offsets_short.h b/kernel/include/offsets_short.h
index 32918648f..1b7c83159 100644
--- a/kernel/include/offsets_short.h
+++ b/kernel/include/offsets_short.h
@@ -53,8 +53,11 @@
/* base */
-#define _thread_offset_to_flags \
- (___thread_t_base_OFFSET + ___thread_base_t_flags_OFFSET)
+#define _thread_offset_to_thread_state \
+ (___thread_t_base_OFFSET + ___thread_base_t_thread_state_OFFSET)
+
+#define _thread_offset_to_execution_flags \
+ (___thread_t_base_OFFSET + ___thread_base_t_execution_flags_OFFSET)
#define _thread_offset_to_prio \
(___thread_t_base_OFFSET + ___thread_base_t_prio_OFFSET)