summaryrefslogtreecommitdiff
path: root/kernel/sys_clock.c
blob: 9108c0191f9e840e4eaedec551fa98c6677a7ece (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
/* system clock support for nanokernel-only systems */

/*
 * Copyright (c) 1997-2015 Wind River Systems, Inc.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */


#include <kernel_structs.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <drivers/system_timer.h>

#ifdef CONFIG_SYS_CLOCK_EXISTS
int sys_clock_us_per_tick = 1000000 / sys_clock_ticks_per_sec;
int sys_clock_hw_cycles_per_tick =
	CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / sys_clock_ticks_per_sec;
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
int sys_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
#endif
#else
/* don't initialize to avoid division-by-zero error */
int sys_clock_us_per_tick;
int sys_clock_hw_cycles_per_tick;
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
int sys_clock_hw_cycles_per_sec;
#endif
#endif

/* updated by timer driver for tickless, stays at 1 for non-tickless */
int32_t _sys_idle_elapsed_ticks = 1;

int64_t _sys_clock_tick_count;

/**
 *
 * @brief Return the lower part of the current system tick count
 *
 * @return the current system tick count
 *
 */
uint32_t _tick_get_32(void)
{
	return (uint32_t)_sys_clock_tick_count;
}
FUNC_ALIAS(_tick_get_32, sys_tick_get_32, uint32_t);

uint32_t k_uptime_get_32(void)
{
	return __ticks_to_ms(_tick_get_32());
}

/**
 *
 * @brief Return the current system tick count
 *
 * @return the current system tick count
 *
 */
int64_t _tick_get(void)
{
	int64_t tmp_sys_clock_tick_count;
	/*
	 * Lock the interrupts when reading _sys_clock_tick_count 64-bit
	 * variable. Some architectures (x86) do not handle 64-bit atomically,
	 * so we have to lock the timer interrupt that causes change of
	 * _sys_clock_tick_count
	 */
	unsigned int imask = irq_lock();

	tmp_sys_clock_tick_count = _sys_clock_tick_count;
	irq_unlock(imask);
	return tmp_sys_clock_tick_count;
}
FUNC_ALIAS(_tick_get, sys_tick_get, int64_t);

int64_t k_uptime_get(void)
{
	return __ticks_to_ms(_tick_get());
}

/**
 *
 * @brief Return number of ticks since a reference time
 *
 * This function is meant to be used in contained fragments of code. The first
 * call to it in a particular code fragment fills in a reference time variable
 * which then gets passed and updated every time the function is called. From
 * the second call on, the delta between the value passed to it and the current
 * tick count is the return value. Since the first call is meant to only fill in
 * the reference time, its return value should be discarded.
 *
 * Since a code fragment that wants to use sys_tick_delta() passes in its
 * own reference time variable, multiple code fragments can make use of this
 * function concurrently.
 *
 * e.g.
 * uint64_t  reftime;
 * (void) sys_tick_delta(&reftime);  /# prime it #/
 * [do stuff]
 * x = sys_tick_delta(&reftime);     /# how long since priming #/
 * [do more stuff]
 * y = sys_tick_delta(&reftime);     /# how long since [do stuff] #/
 *
 * @return tick count since reference time; undefined for first invocation
 *
 * NOTE: We use inline function for both 64-bit and 32-bit functions.
 * Compiler optimizes out 64-bit result handling in 32-bit version.
 */
static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime)
{
	int64_t  delta;
	int64_t  saved;

	/*
	 * Lock the interrupts when reading _sys_clock_tick_count 64-bit
	 * variable.  Some architectures (x86) do not handle 64-bit atomically,
	 * so we have to lock the timer interrupt that causes change of
	 * _sys_clock_tick_count
	 */
	unsigned int imask = irq_lock();

	saved = _sys_clock_tick_count;
	irq_unlock(imask);
	delta = saved - (*reftime);
	*reftime = saved;

	return delta;
}

/**
 *
 * @brief Return number of ticks since a reference time
 *
 * @return tick count since reference time; undefined for first invocation
 */
int64_t sys_tick_delta(int64_t *reftime)
{
	return _nano_tick_delta(reftime);
}


uint32_t sys_tick_delta_32(int64_t *reftime)
{
	return (uint32_t)_nano_tick_delta(reftime);
}

int64_t k_uptime_delta(int64_t *reftime)
{
	int64_t uptime, delta;

	uptime = k_uptime_get();
	delta = uptime - *reftime;
	*reftime = uptime;

	return delta;
}

uint32_t k_uptime_delta_32(int64_t *reftime)
{
	return (uint32_t)k_uptime_delta(reftime);
}

/* handle the expired timeouts in the nano timeout queue */

#ifdef CONFIG_SYS_CLOCK_EXISTS
#include <wait_q.h>

/*
 * Handle timeouts by dequeuing the expired ones from _timeout_q and queue
 * them on a local one, then doing the real handling from that queue. This
 * allows going through the second queue without needing to have the
 * interrupts locked since it is a local queue. Each expired timeout is marked
 * as _EXPIRED so that an ISR preempting us and releasing an object on which
 * a thread was timing out and expiredwill not give the object to that thread.
 *
 * Always called from interrupt level, and always only from the system clock
 * interrupt.
 */
static inline void handle_timeouts(int32_t ticks)
{
	sys_dlist_t expired;
	unsigned int key;

	/* init before locking interrupts */
	sys_dlist_init(&expired);

	key = irq_lock();

	struct _timeout *head =
		(struct _timeout *)sys_dlist_peek_head(&_timeout_q);

	K_DEBUG("head: %p, delta: %d\n",
		head, head ? head->delta_ticks_from_prev : -2112);

	if (!head) {
		irq_unlock(key);
		return;
	}

	head->delta_ticks_from_prev -= ticks;

	/*
	 * Dequeue all expired timeouts from _timeout_q, relieving irq lock
	 * pressure between each of them, allowing handling of higher priority
	 * interrupts. We know that no new timeout will be prepended in front
	 * of a timeout which delta is 0, since timeouts of 0 ticks are
	 * prohibited.
	 */
	sys_dnode_t *next = &head->node;
	struct _timeout *timeout = (struct _timeout *)next;

	while (timeout && timeout->delta_ticks_from_prev == 0) {

		sys_dlist_remove(next);
		sys_dlist_append(&expired, next);
		timeout->delta_ticks_from_prev = _EXPIRED;

		irq_unlock(key);
		key = irq_lock();

		next = sys_dlist_peek_head(&_timeout_q);
		timeout = (struct _timeout *)next;
	}

	irq_unlock(key);

	_handle_expired_timeouts(&expired);
}
#else
	#define handle_timeouts(ticks) do { } while ((0))
#endif

#ifdef CONFIG_TIMESLICING
int32_t _time_slice_elapsed;
int32_t _time_slice_duration = CONFIG_TIMESLICE_SIZE;
int  _time_slice_prio_ceiling = CONFIG_TIMESLICE_PRIORITY;

/*
 * Always called from interrupt level, and always only from the system clock
 * interrupt, thus:
 * - _current does not have to be protected, since it only changes at thread
 *   level or when exiting a non-nested interrupt
 * - _time_slice_elapsed does not have to be protected, since it can only change
 *   in this function and at thread level
 * - _time_slice_duration does not have to be protected, since it can only
 *   change at thread level
 */
static void handle_time_slicing(int32_t ticks)
{
	if (_time_slice_duration == 0) {
		return;
	}

	if (_is_prio_higher(_current->base.prio, _time_slice_prio_ceiling)) {
		return;
	}

	_time_slice_elapsed += _ticks_to_ms(ticks);
	if (_time_slice_elapsed >= _time_slice_duration) {

		unsigned int key;

		_time_slice_elapsed = 0;

		key = irq_lock();
		_move_thread_to_end_of_prio_q(_current);
		irq_unlock(key);
	}
}
#else
#define handle_time_slicing(ticks) do { } while (0)
#endif
/**
 *
 * @brief Announce a tick to the nanokernel
 *
 * This function is only to be called by the system clock timer driver when a
 * tick is to be announced to the nanokernel. It takes care of dequeuing the
 * timers that have expired and wake up the fibers pending on them.
 *
 * @return N/A
 */
void _nano_sys_clock_tick_announce(int32_t ticks)
{
	unsigned int  key;

	K_DEBUG("ticks: %d\n", ticks);

	/* 64-bit value, ensure atomic access with irq lock */
	key = irq_lock();
	_sys_clock_tick_count += ticks;
	irq_unlock(key);

	handle_timeouts(ticks);

	/* time slicing is basically handled like just yet another timeout */
	handle_time_slicing(ticks);
}