1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
|
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm_macros.S>
#include <psci.h>
#include "../suspend_private.h"
.global __tftf_suspend
.global __tftf_save_arch_context
.global __tftf_cpu_resume_ep
.section .text, "ax"
/*
* Saves CPU state for entering suspend. This saves callee registers on stack,
* and allocates space on the stack to save the CPU specific registers for
* coming out of suspend.
*
* r0 contains a pointer to tftf_suspend_context structure.
*/
func __tftf_suspend
push {r4 - r12, lr}
mov r2, sp
sub sp, sp, #SUSPEND_CTX_SZ
mov r1, sp
/*
* r1 now points to struct tftf_suspend_ctx allocated on the stack
*/
str r2, [r1, #SUSPEND_CTX_SP_OFFSET]
bl tftf_enter_suspend
/*
* If execution reaches this point, the suspend call was either
* a suspend to standby call or an invalid suspend call.
* In case of suspend to powerdown, execution will instead resume in
* __tftf_cpu_resume_ep().
*/
add sp, sp, #SUSPEND_CTX_SZ
pop {r4 - r12, lr}
bx lr
endfunc __tftf_suspend
func __tftf_save_arch_context
ldcopr r1, HMAIR0
ldcopr r2, HCR
stm r0!, {r1, r2}
ldcopr16 r1, r2, HTTBR_64
stm r0!, {r1, r2}
ldcopr r1, HTCR
ldcopr r2, HVBAR
ldcopr r3, HSCTLR
stm r0, {r1, r2, r3}
bx lr
endfunc __tftf_save_arch_context
/*
* Restore CPU register context
* r0 -- Should contain the context pointer
*/
func __tftf_cpu_resume_ep
/* Invalidate local tlb entries before turning on MMU */
stcopr r0, TLBIALLH
mov r4, r0
ldm r0!, {r1, r2}
stcopr r1, HMAIR0
stcopr r2, HCR
ldm r0!, {r1, r2}
stcopr16 r1, r2, HTTBR_64
ldm r0, {r1, r2, r3}
stcopr r1, HTCR
stcopr r2, HVBAR
/*
* TLB invalidations need to be completed before enabling MMU
*/
dsb nsh
stcopr r3, HSCTLR
/* Ensure the MMU enable takes effect immediately */
isb
mov r0, r4
ldr r2, [r0, #SUSPEND_CTX_SP_OFFSET]
mov sp, r2
ldr r1, [r0, #SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET]
cmp r1, #0
beq skip_sys_restore
bl tftf_restore_system_ctx
skip_sys_restore:
pop {r4 - r12, lr}
mov r0, #PSCI_E_SUCCESS
bx lr
endfunc __tftf_cpu_resume_ep
|