summaryrefslogtreecommitdiff
path: root/kernel/sys_clock.c
diff options
context:
space:
mode:
authorAnas Nashif <anas.nashif@intel.com>2016-12-17 17:48:47 -0500
committerAnas Nashif <anas.nashif@intel.com>2016-12-19 14:59:35 -0500
commitd687a95611bbd737fa56ba0a99a3e65255715cac (patch)
tree7f472cc8d694f95b3dfdd5fcdcd03e824f5a43ef /kernel/sys_clock.c
parent9463dc0b8f0ae9916b410cb9e5f9ce8e758cc5ad (diff)
kernel: move kernel code to kernel/ directly
Also remove mentions of unified kernel in various places in the kernel, samples and documentation. Change-Id: Ice43bc73badbe7e14bae40fd6f2a302f6528a77d Signed-off-by: Anas Nashif <anas.nashif@intel.com>
Diffstat (limited to 'kernel/sys_clock.c')
-rw-r--r--kernel/sys_clock.c311
1 files changed, 311 insertions, 0 deletions
diff --git a/kernel/sys_clock.c b/kernel/sys_clock.c
new file mode 100644
index 000000000..9108c0191
--- /dev/null
+++ b/kernel/sys_clock.c
@@ -0,0 +1,311 @@
+/* system clock support for nanokernel-only systems */
+
+/*
+ * Copyright (c) 1997-2015 Wind River Systems, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <kernel_structs.h>
+#include <toolchain.h>
+#include <sections.h>
+#include <wait_q.h>
+#include <drivers/system_timer.h>
+
+#ifdef CONFIG_SYS_CLOCK_EXISTS
+int sys_clock_us_per_tick = 1000000 / sys_clock_ticks_per_sec;
+int sys_clock_hw_cycles_per_tick =
+ CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / sys_clock_ticks_per_sec;
+#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
+int sys_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
+#endif
+#else
+/* don't initialize to avoid division-by-zero error */
+int sys_clock_us_per_tick;
+int sys_clock_hw_cycles_per_tick;
+#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
+int sys_clock_hw_cycles_per_sec;
+#endif
+#endif
+
+/* updated by timer driver for tickless, stays at 1 for non-tickless */
+int32_t _sys_idle_elapsed_ticks = 1;
+
+int64_t _sys_clock_tick_count;
+
+/**
+ *
+ * @brief Return the lower part of the current system tick count
+ *
+ * @return the current system tick count
+ *
+ */
+uint32_t _tick_get_32(void)
+{
+ return (uint32_t)_sys_clock_tick_count;
+}
+FUNC_ALIAS(_tick_get_32, sys_tick_get_32, uint32_t);
+
+uint32_t k_uptime_get_32(void)
+{
+ return __ticks_to_ms(_tick_get_32());
+}
+
+/**
+ *
+ * @brief Return the current system tick count
+ *
+ * @return the current system tick count
+ *
+ */
+int64_t _tick_get(void)
+{
+ int64_t tmp_sys_clock_tick_count;
+ /*
+ * Lock the interrupts when reading _sys_clock_tick_count 64-bit
+ * variable. Some architectures (x86) do not handle 64-bit atomically,
+ * so we have to lock the timer interrupt that causes change of
+ * _sys_clock_tick_count
+ */
+ unsigned int imask = irq_lock();
+
+ tmp_sys_clock_tick_count = _sys_clock_tick_count;
+ irq_unlock(imask);
+ return tmp_sys_clock_tick_count;
+}
+FUNC_ALIAS(_tick_get, sys_tick_get, int64_t);
+
+int64_t k_uptime_get(void)
+{
+ return __ticks_to_ms(_tick_get());
+}
+
+/**
+ *
+ * @brief Return number of ticks since a reference time
+ *
+ * This function is meant to be used in contained fragments of code. The first
+ * call to it in a particular code fragment fills in a reference time variable
+ * which then gets passed and updated every time the function is called. From
+ * the second call on, the delta between the value passed to it and the current
+ * tick count is the return value. Since the first call is meant to only fill in
+ * the reference time, its return value should be discarded.
+ *
+ * Since a code fragment that wants to use sys_tick_delta() passes in its
+ * own reference time variable, multiple code fragments can make use of this
+ * function concurrently.
+ *
+ * e.g.
+ * uint64_t reftime;
+ * (void) sys_tick_delta(&reftime); /# prime it #/
+ * [do stuff]
+ * x = sys_tick_delta(&reftime); /# how long since priming #/
+ * [do more stuff]
+ * y = sys_tick_delta(&reftime); /# how long since [do stuff] #/
+ *
+ * @return tick count since reference time; undefined for first invocation
+ *
+ * NOTE: We use inline function for both 64-bit and 32-bit functions.
+ * Compiler optimizes out 64-bit result handling in 32-bit version.
+ */
+static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime)
+{
+ int64_t delta;
+ int64_t saved;
+
+ /*
+ * Lock the interrupts when reading _sys_clock_tick_count 64-bit
+ * variable. Some architectures (x86) do not handle 64-bit atomically,
+ * so we have to lock the timer interrupt that causes change of
+ * _sys_clock_tick_count
+ */
+ unsigned int imask = irq_lock();
+
+ saved = _sys_clock_tick_count;
+ irq_unlock(imask);
+ delta = saved - (*reftime);
+ *reftime = saved;
+
+ return delta;
+}
+
+/**
+ *
+ * @brief Return number of ticks since a reference time
+ *
+ * @return tick count since reference time; undefined for first invocation
+ */
+int64_t sys_tick_delta(int64_t *reftime)
+{
+ return _nano_tick_delta(reftime);
+}
+
+
+uint32_t sys_tick_delta_32(int64_t *reftime)
+{
+ return (uint32_t)_nano_tick_delta(reftime);
+}
+
+int64_t k_uptime_delta(int64_t *reftime)
+{
+ int64_t uptime, delta;
+
+ uptime = k_uptime_get();
+ delta = uptime - *reftime;
+ *reftime = uptime;
+
+ return delta;
+}
+
+uint32_t k_uptime_delta_32(int64_t *reftime)
+{
+ return (uint32_t)k_uptime_delta(reftime);
+}
+
+/* handle the expired timeouts in the nano timeout queue */
+
+#ifdef CONFIG_SYS_CLOCK_EXISTS
+#include <wait_q.h>
+
+/*
+ * Handle timeouts by dequeuing the expired ones from _timeout_q and queue
+ * them on a local one, then doing the real handling from that queue. This
+ * allows going through the second queue without needing to have the
+ * interrupts locked since it is a local queue. Each expired timeout is marked
+ * as _EXPIRED so that an ISR preempting us and releasing an object on which
+ * a thread was timing out and expiredwill not give the object to that thread.
+ *
+ * Always called from interrupt level, and always only from the system clock
+ * interrupt.
+ */
+static inline void handle_timeouts(int32_t ticks)
+{
+ sys_dlist_t expired;
+ unsigned int key;
+
+ /* init before locking interrupts */
+ sys_dlist_init(&expired);
+
+ key = irq_lock();
+
+ struct _timeout *head =
+ (struct _timeout *)sys_dlist_peek_head(&_timeout_q);
+
+ K_DEBUG("head: %p, delta: %d\n",
+ head, head ? head->delta_ticks_from_prev : -2112);
+
+ if (!head) {
+ irq_unlock(key);
+ return;
+ }
+
+ head->delta_ticks_from_prev -= ticks;
+
+ /*
+ * Dequeue all expired timeouts from _timeout_q, relieving irq lock
+ * pressure between each of them, allowing handling of higher priority
+ * interrupts. We know that no new timeout will be prepended in front
+ * of a timeout which delta is 0, since timeouts of 0 ticks are
+ * prohibited.
+ */
+ sys_dnode_t *next = &head->node;
+ struct _timeout *timeout = (struct _timeout *)next;
+
+ while (timeout && timeout->delta_ticks_from_prev == 0) {
+
+ sys_dlist_remove(next);
+ sys_dlist_append(&expired, next);
+ timeout->delta_ticks_from_prev = _EXPIRED;
+
+ irq_unlock(key);
+ key = irq_lock();
+
+ next = sys_dlist_peek_head(&_timeout_q);
+ timeout = (struct _timeout *)next;
+ }
+
+ irq_unlock(key);
+
+ _handle_expired_timeouts(&expired);
+}
+#else
+ #define handle_timeouts(ticks) do { } while ((0))
+#endif
+
+#ifdef CONFIG_TIMESLICING
+int32_t _time_slice_elapsed;
+int32_t _time_slice_duration = CONFIG_TIMESLICE_SIZE;
+int _time_slice_prio_ceiling = CONFIG_TIMESLICE_PRIORITY;
+
+/*
+ * Always called from interrupt level, and always only from the system clock
+ * interrupt, thus:
+ * - _current does not have to be protected, since it only changes at thread
+ * level or when exiting a non-nested interrupt
+ * - _time_slice_elapsed does not have to be protected, since it can only change
+ * in this function and at thread level
+ * - _time_slice_duration does not have to be protected, since it can only
+ * change at thread level
+ */
+static void handle_time_slicing(int32_t ticks)
+{
+ if (_time_slice_duration == 0) {
+ return;
+ }
+
+ if (_is_prio_higher(_current->base.prio, _time_slice_prio_ceiling)) {
+ return;
+ }
+
+ _time_slice_elapsed += _ticks_to_ms(ticks);
+ if (_time_slice_elapsed >= _time_slice_duration) {
+
+ unsigned int key;
+
+ _time_slice_elapsed = 0;
+
+ key = irq_lock();
+ _move_thread_to_end_of_prio_q(_current);
+ irq_unlock(key);
+ }
+}
+#else
+#define handle_time_slicing(ticks) do { } while (0)
+#endif
+/**
+ *
+ * @brief Announce a tick to the nanokernel
+ *
+ * This function is only to be called by the system clock timer driver when a
+ * tick is to be announced to the nanokernel. It takes care of dequeuing the
+ * timers that have expired and wake up the fibers pending on them.
+ *
+ * @return N/A
+ */
+void _nano_sys_clock_tick_announce(int32_t ticks)
+{
+ unsigned int key;
+
+ K_DEBUG("ticks: %d\n", ticks);
+
+ /* 64-bit value, ensure atomic access with irq lock */
+ key = irq_lock();
+ _sys_clock_tick_count += ticks;
+ irq_unlock(key);
+
+ handle_timeouts(ticks);
+
+ /* time slicing is basically handled like just yet another timeout */
+ handle_time_slicing(ticks);
+}