1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
|
/* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (c) 2019, Linaro Limited
*/
#include <arm.h>
#include <asm.S>
#include <generated/asm-defines.h>
#include <keep.h>
#include <kernel/thread_defs.h>
#include <sm/optee_smc.h>
#include <sm/teesmc_opteed.h>
#include <sm/teesmc_opteed_macros.h>
.arch_extension sec
/*
* If ASLR is configured the identity mapped code may be mapped at two
* locations, the identity location where virtual and physical address is
* the same and at the runtime selected location to which OP-TEE has been
* relocated. Code executing at a location different compared to the
* runtime selected location works OK as long as it doesn't do relative
* addressing outside the identity mapped range. To allow relative
* addressing this macro jumps to the runtime selected location.
*
* Note that the identity mapped range and the runtime selected range can
* only differ if ASLR is configured.
*/
.macro readjust_pc
#ifdef CFG_CORE_ASLR
ldr r12, =1111f
bx r12
1111:
#endif
.endm
FUNC vector_std_smc_entry , : , .identity_map
UNWIND( .fnstart)
UNWIND( .cantunwind)
readjust_pc
push {r4-r7}
bl thread_handle_std_smc
add sp, sp, #(4 * 4)
/*
* Normally thread_handle_std_smc() should return via
* thread_exit(), thread_rpc(), but if thread_handle_std_smc()
* hasn't switched stack (error detected) it will do a normal "C"
* return.
*/
mov r1, r0
ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC vector_std_smc_entry
FUNC vector_fast_smc_entry , : , .identity_map
UNWIND( .fnstart)
UNWIND( .cantunwind)
readjust_pc
push {r0-r7}
mov r0, sp
bl thread_handle_fast_smc
pop {r1-r8}
ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC vector_fast_smc_entry
FUNC vector_fiq_entry , : , .identity_map
UNWIND( .fnstart)
UNWIND( .cantunwind)
readjust_pc
/* Secure Monitor received a FIQ and passed control to us. */
bl thread_check_canaries
bl itr_core_handler
ldr r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC vector_fiq_entry
#if defined(CFG_WITH_ARM_TRUSTED_FW)
LOCAL_FUNC vector_cpu_on_entry , : , .identity_map
UNWIND( .fnstart)
UNWIND( .cantunwind)
bl cpu_on_handler
/* When cpu_on_handler() returns mmu is enabled */
mov r1, r0
ldr r0, =TEESMC_OPTEED_RETURN_ON_DONE
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC vector_cpu_on_entry
LOCAL_FUNC vector_cpu_off_entry , : , .identity_map
UNWIND( .fnstart)
UNWIND( .cantunwind)
readjust_pc
bl thread_cpu_off_handler
mov r1, r0
ldr r0, =TEESMC_OPTEED_RETURN_OFF_DONE
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC vector_cpu_off_entry
LOCAL_FUNC vector_cpu_suspend_entry , : , .identity_map
UNWIND( .fnstart)
UNWIND( .cantunwind)
readjust_pc
bl thread_cpu_suspend_handler
mov r1, r0
ldr r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC vector_cpu_suspend_entry
LOCAL_FUNC vector_cpu_resume_entry , : , .identity_map
UNWIND( .fnstart)
UNWIND( .cantunwind)
readjust_pc
bl thread_cpu_resume_handler
mov r1, r0
ldr r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC vector_cpu_resume_entry
LOCAL_FUNC vector_system_off_entry , : , .identity_map
UNWIND( .fnstart)
UNWIND( .cantunwind)
readjust_pc
bl thread_system_off_handler
mov r1, r0
ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC vector_system_off_entry
LOCAL_FUNC vector_system_reset_entry , : , .identity_map
UNWIND( .fnstart)
UNWIND( .cantunwind)
readjust_pc
bl thread_system_reset_handler
mov r1, r0
ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC vector_system_reset_entry
/*
* Vector table supplied to ARM Trusted Firmware (ARM-TF) at
* initialization. Also used when compiled with the internal monitor, but
* the cpu_*_entry and system_*_entry are not used then.
*
* Note that ARM-TF depends on the layout of this vector table, any change
* in layout has to be synced with ARM-TF.
*/
FUNC thread_vector_table , : , .identity_map
UNWIND( .fnstart)
UNWIND( .cantunwind)
b vector_std_smc_entry
b vector_fast_smc_entry
b vector_cpu_on_entry
b vector_cpu_off_entry
b vector_cpu_resume_entry
b vector_cpu_suspend_entry
b vector_fiq_entry
b vector_system_off_entry
b vector_system_reset_entry
UNWIND( .fnend)
END_FUNC thread_vector_table
DECLARE_KEEP_PAGER thread_vector_table
#endif /*if defined(CFG_WITH_ARM_TRUSTED_FW)*/
FUNC thread_std_smc_entry , :
UNWIND( .fnstart)
UNWIND( .cantunwind)
bl __thread_std_smc_entry
mov r4, r0 /* Save return value for later */
/* Disable interrupts before switching to temporary stack */
cpsid aif
bl thread_get_tmp_sp
mov sp, r0
bl thread_state_free
ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
mov r1, r4
mov r2, #0
mov r3, #0
mov r4, #0
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC thread_std_smc_entry
/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
FUNC thread_rpc , :
UNWIND( .fnstart)
push {r0, lr}
UNWIND( .save {r0, lr})
bl thread_save_state
mov r4, r0 /* Save original CPSR */
/*
* Switch to temporary stack and SVC mode. Save CPSR to resume into.
*/
bl thread_get_tmp_sp
ldr r5, [sp] /* Get pointer to rv[] */
cps #CPSR_MODE_SVC /* Change to SVC mode */
mov sp, r0 /* Switch to tmp stack */
mov r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
mov r1, r4 /* CPSR to restore */
ldr r2, =.thread_rpc_return
bl thread_state_suspend
mov r4, r0 /* Supply thread index */
ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
ldm r5, {r1-r3} /* Load rv[] into r0-r2 */
smc #0
b . /* SMC should not return */
.thread_rpc_return:
/*
* At this point has the stack pointer been restored to the value
* it had when thread_save_state() was called above.
*
* Jumps here from thread_resume above when RPC has returned. The
* IRQ and FIQ bits are restored to what they where when this
* function was originally entered.
*/
pop {r12, lr} /* Get pointer to rv[] */
stm r12, {r0-r3} /* Store r0-r3 into rv[] */
bx lr
UNWIND( .fnend)
END_FUNC thread_rpc
DECLARE_KEEP_PAGER thread_rpc
/*
* void thread_foreign_intr_exit(uint32_t thread_index)
*
* This function is jumped to at the end of macro foreign_intr_handler().
* The current thread as indicated by @thread_index has just been
* suspended. The job here is just to inform normal world the thread id to
* resume when returning.
*/
FUNC thread_foreign_intr_exit , :
mov r4, r0
ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
ldr r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
mov r2, #0
mov r3, #0
smc #0
b . /* SMC should not return */
END_FUNC thread_foreign_intr_exit
|