aboutsummaryrefslogtreecommitdiff
path: root/linaro/arm-virt-bl/big-little/secure_world/monmode_vectors.S
diff options
context:
space:
mode:
Diffstat (limited to 'linaro/arm-virt-bl/big-little/secure_world/monmode_vectors.S')
-rwxr-xr-xlinaro/arm-virt-bl/big-little/secure_world/monmode_vectors.S396
1 files changed, 396 insertions, 0 deletions
diff --git a/linaro/arm-virt-bl/big-little/secure_world/monmode_vectors.S b/linaro/arm-virt-bl/big-little/secure_world/monmode_vectors.S
new file mode 100755
index 0000000..0c5204c
--- /dev/null
+++ b/linaro/arm-virt-bl/big-little/secure_world/monmode_vectors.S
@@ -0,0 +1,396 @@
+ ;
+ ; Copyright (c) 2012, ARM Limited. All rights reserved.
+ ;
+ ; Redistribution and use in source and binary forms, with
+ ; or without modification, are permitted provided that the
+ ; following conditions are met:
+ ;
+ ; Redistributions of source code must retain the above
+ ; copyright notice, this list of conditions and the
+ ; following disclaimer.
+ ;
+ ; Redistributions in binary form must reproduce the
+ ; above copyright notice, this list of conditions and
+ ; the following disclaimer in the documentation
+ ; and/or other materials provided with the distribution.
+ ;
+ ; Neither the name of ARM nor the names of its
+ ; contributors may be used to endorse or promote products
+ ; derived from this software without specific prior written
+ ; permission.
+ ;
+
+
+ AREA |.text.monmode_vectors|, CODE, ALIGN=5
+ PRESERVE8
+
+SMC_SEC_INIT EQU 0x0
+SMC_SEC_SAVE EQU 0x1
+SMC_SEC_SHUTDOWN EQU 0x2
+L1 EQU 0x0
+L2 EQU 0x1
+INV EQU 0x0
+CLN EQU 0x1
+CLN_INV EQU 0x2
+CR_M EQU (1<<0)
+CR_C EQU (1<<2)
+CR_I EQU (1<<12)
+CR_Z EQU (1<<11)
+CR_U EQU (1<<22)
+CR_TRE EQU (1<<28)
+SCR_NS EQU 0x01
+PT_IRGN EQU (1<<0)
+PT_RGN EQU (1<<3)
+PT_SH EQU (1<<1)
+PT_NOS EQU (1<<5)
+TTBR0_PROP EQU (PT_NOS :OR: PT_SH :OR: PT_RGN :OR: PT_IRGN)
+SO_MEM EQU 0x0
+DV_MEM EQU 0x1
+NM_MEM EQU 0x2
+I_SH EQU 0x1
+SH EQU 0x1
+PRRR_TR0 EQU (SO_MEM<<0)
+PRRR_TR1 EQU (DV_MEM<<2)
+PRRR_TR4 EQU (NM_MEM<<8)
+PRRR_TR7 EQU (NM_MEM<<14)
+PRRR_DS1 EQU (SH<<17)
+PRRR_NS1 EQU (SH<<19)
+PRRR_NOS1 EQU (I_SH<<25)
+PRRR_NOS4 EQU (I_SH<<28)
+PRRR_NOS7 EQU (I_SH<<31)
+NC EQU 0x0
+WBWA EQU 0x1
+NMRR_OR4 EQU (NC<<24)
+NMRR_OR7 EQU (WBWA<<30)
+NMRR_IR4 EQU (NC<<8)
+NMRR_IR7 EQU (WBWA<<14)
+
+; ==============================================================================
+; These should be the same the defines in misc.h
+; ==============================================================================
+MAX_CLUSTERS EQU 2
+MAX_CPUS EQU 8
+STACK_SIZE EQU (96 << 2)
+
+; ==============================================================================
+; Simple vector table
+; ==============================================================================
+ IMPORT ns_entry_ptr
+ IMPORT secure_context_save
+ IMPORT enable_caches
+ IMPORT inv_icache_all
+ IMPORT flat_pagetables
+ IMPORT read_sctlr
+ IMPORT write_sctlr
+ IMPORT read_ttbr0
+ IMPORT write_ttbr0
+ IMPORT inv_tlb_all
+ IMPORT inv_bpred_all
+ IMPORT write_dacr
+ IMPORT write_prrr
+ IMPORT write_nmrr
+ IMPORT get_sp
+ IMPORT secure_context_restore
+ IMPORT powerdown_cluster
+ IMPORT get_powerdown_stack
+ IMPORT wfi
+ IMPORT read_cpuid
+ IMPORT add_dv_page
+ EXPORT monmode_vector_table
+ EXPORT warm_reset
+
+ ; ----------------------------------------------------
+ ; Macro to initialise MMU. Corrupts 'r0'
+ ; ----------------------------------------------------
+ MACRO
+ setup_mmu $r1, $r2
+ MOVW $r1, #0x5555
+ MOVT $r1, #0x5555
+ ; Enable our page tables if not
+ LDR r0, =flat_pagetables
+ ORR r0, #TTBR0_PROP
+ ; Write TTBR0
+ MCR p15, 0, r0, c2, c0, 0
+ ; Write DACR
+ MCR p15, 0, $r1, c3, c0, 0
+
+ ; Enable the remap registers to treat OSH memory as ISH memory
+ MOV $r1, #PRRR_TR0
+ ORR $r1, #PRRR_TR1
+ ORR $r1, #PRRR_TR4
+ ORR $r1, #PRRR_TR7
+ ORR $r1, #PRRR_NOS1
+ ORR $r1, #PRRR_NOS4
+ ORR $r1, #PRRR_NOS7
+ ORR $r1, #PRRR_NS1
+ ORR $r1, #PRRR_DS1
+
+ MOV $r2, #NMRR_IR4
+ ORR $r2, #NMRR_IR7
+ ORR $r2, #NMRR_OR4
+ ORR $r2, #NMRR_OR7
+
+ MCR p15, 0, $r1, c10, c2, 0
+ MCR p15, 0, $r2, c10, c2, 1
+
+ ; Enable Dcache, TEX Remap & MMU
+ MRC p15, 0, r0, c1, c0, 0
+ ORR r0, #CR_M
+ ORR r0, #CR_C
+ ORR r0, #CR_TRE
+ MCR p15, 0, r0, c1, c0, 0
+ DSB
+ ISB
+ MEND
+
+ ; ----------------------------------------------------
+ ; Macro to setup secure stacks, Corrupts 'r0-r3'
+ ; ----------------------------------------------------
+ MACRO
+ setup_stack
+ LDR r0, =secure_stacks
+ MOV r1, #STACK_SIZE
+ BL get_sp
+ MOV sp, r0
+ MEND
+
+ ALIGN 32
+monmode_vector_table
+monmode_reset_vec
+ B monmode_reset_vec
+monmode_undef_vec
+ B monmode_undef_vec
+monmode_smc_vec
+ B do_smc
+monmode_pabort_vec
+ B monmode_pabort_vec
+monmode_dabort_vec
+ B monmode_dabort_vec
+monmode_unused_vec
+ B monmode_unused_vec
+monmode_irq_vec
+ B monmode_irq_vec
+monmode_fiq_vec
+ B monmode_fiq_vec
+
+
+ ; SMC handler. Currently accepts three types of calls:
+ ; 1. Init: Sets up stack, mmu, caches & coherency
+ ; 2. Context Save: Saves the secure world context
+ ; 3. Powerdown: Cleans the caches and power downs the cluster
+ ; Also assumes the availability of r4-r7
+do_smc FUNCTION
+ ; Switch to non-secure banked registers
+ MRC p15, 0, r2, c1, c1, 0
+ BIC r2, #SCR_NS
+ MCR p15, 0, r2, c1, c1, 0
+ ISB
+
+ ; Check if we are being called to setup the world
+ CMP r0, #SMC_SEC_INIT
+ BEQ setup_secure
+
+ CMP r0, #SMC_SEC_SAVE
+ BEQ save_secure
+
+ CMP r0, #SMC_SEC_SHUTDOWN
+ BEQ shutdown_cluster
+
+smc_done
+ ; Return to non-secure banked registers
+ MRC p15, 0, r0, c1, c1, 0
+ ORR r0, #SCR_NS
+ MCR p15, 0, r0, c1, c1, 0
+ ISB
+ ERET
+ ENDFUNC
+
+shutdown_cluster
+ BL read_cpuid
+ BL get_powerdown_stack
+ MOV sp, r0
+ BL powerdown_cluster
+enter_wfi
+ BL wfi
+ B enter_wfi
+
+save_secure
+ PUSH {lr}
+ MOV r0, r1
+ BL secure_context_save
+ POP {lr}
+ B smc_done
+
+setup_secure
+ ; Save the LR
+ MOV r4, lr
+
+ ; Turn on the I cache, branch predictor and alingment
+ BL read_sctlr
+ ORR r0, #CR_I
+ ORR r0, #CR_U
+ ORR r0, #CR_Z
+ BL write_sctlr
+ dsb
+ isb
+
+ setup_stack
+
+ ; ----------------------------------------------------
+ ; Safely turn on caches
+ ; TODO: Expensive usage of stacks as we are executing
+ ; out of SO memory. Done only once so can live with it
+ ; ----------------------------------------------------
+ BL enable_caches
+
+ ; ----------------------------------------------------
+ ; Add a page backed by device memory for locks & stacks
+ ; ----------------------------------------------------
+ LDR r0, =flat_pagetables
+ BL add_dv_page
+ setup_mmu r1, r2
+
+ ; Restore LR
+ MOV lr, r4
+ B smc_done
+
+warm_reset FUNCTION
+ ; ----------------------------------------------------
+ ; Start the SO load of the pagetables asap
+ ; ----------------------------------------------------
+ LDR r4, =flat_pagetables
+
+ ; ----------------------------------------------------
+ ; Enable I, C, Z, U bits in the SCTLR and SMP bit in
+ ; the ACTLR right after reset
+ ; ----------------------------------------------------
+ MRC p15, 0, r0, c1, c0, 0
+ ORR r0, r0, #CR_I
+ ORR r0, r0, #CR_U
+ ORR r0, r0, #CR_Z
+ ORR r0, r0, #CR_C
+ MCR p15, 0, r0, c1, c0, 0
+ MRC p15, 0, r1, c1, c0, 1
+ ORR r1, r1, #0x40
+ MCR p15, 0, r1, c1, c0, 1
+ ISB
+
+ ; ----------------------------------------------------
+ ; Enable the MMU even though CCI snoops have not been
+ ; enabled. Should not be a problem as we will not
+ ; access any inter-cluster data till we do so
+ ; ----------------------------------------------------
+ MOVW r2, #0x5555
+ MOVT r2, #0x5555
+ ; Enable our page tables if not
+ ORR r4, #TTBR0_PROP
+ ; Write TTBR0
+ MCR p15, 0, r4, c2, c0, 0
+ ; Write DACR
+ MCR p15, 0, r2, c3, c0, 0
+
+ ; Enable the remap registers to treat OSH memory as ISH memory
+ MOV r2, #PRRR_TR0
+ ORR r2, #PRRR_TR1
+ ORR r2, #PRRR_TR4
+ ORR r2, #PRRR_TR7
+ ORR r2, #PRRR_NOS1
+ ORR r2, #PRRR_NOS4
+ ORR r2, #PRRR_NOS7
+ ORR r2, #PRRR_NS1
+ ORR r2, #PRRR_DS1
+ MOV r3, #NMRR_IR4
+ ORR r3, #NMRR_IR7
+ ORR r3, #NMRR_OR4
+ ORR r3, #NMRR_OR7
+ MCR p15, 0, r2, c10, c2, 0
+ MCR p15, 0, r3, c10, c2, 1
+
+ ; Enable Dcache, TEX Remap & MMU
+ MRC p15, 0, r0, c1, c0, 0
+ ORR r0, #CR_M
+ ORR r0, #CR_C
+ ORR r0, #CR_TRE
+ MCR p15, 0, r0, c1, c0, 0
+ ISB
+
+ ; ----------------------------------------------------
+ ; Try Preloading the literal pools before they are
+ ; accessed.
+ ; ----------------------------------------------------
+ ADR r4, warm_reset_ltrls
+ PLD [r4]
+ PLD warm_reset_ltrls
+ LDR r6, =secure_stacks
+
+ ; ----------------------------------------------------
+ ; Safely turn on CCI snoops
+ ; ----------------------------------------------------
+ MOV r4, #0x0
+ MOVT r4, #0x2c09
+ MRC p15, 0, r0, c0, c0, 5
+ UBFX r1, r0, #0, #8
+ UBFX r2, r0, #8, #8
+ MOV r3, #3
+ CMP r2, #0
+ BEQ a15_snoops
+ MOV r5, #0x5000
+ CMP r1, #0
+ BNE cci_snoop_status
+ STR r3, [r4, r5]
+ B cci_snoop_status
+a15_snoops
+ MOV r5, #0x4000
+ CMP r1, #0
+ BNE cci_snoop_status
+ STR r3, [r4, r5]
+cci_snoop_status
+ LDR r0, [r4, r5]
+ TST r0, #3
+ BEQ cci_snoop_status
+ LDR r0, [r4, #0xc]
+ TST r0, #1
+ BNE cci_snoop_status
+
+ ; ----------------------------------------------------
+ ; Switch to Monitor mode straight away as we do not want to worry
+ ; about setting up Secure SVC stacks. All Secure world save/restore
+ ; takes place in the monitor mode.
+ ; ----------------------------------------------------
+ MRS r5, cpsr ; Get current mode (SVC) in r0
+ BIC r1, r5, #0x1f ; Clear all mode bits
+ ORR r1, r1, #0x16 ; Set bits for Monitor mode
+ MSR cpsr_cxsf, r1 ; We are now in Monitor Mode
+ BIC r1, r5, #0x1f ; Clear all mode bits
+ ORR r1, r1, #0x1a ; Set bits for a return to the HYP mode
+ MSR spsr_cxsf, r1
+
+ MOV r0, r6
+ MOV r1, #STACK_SIZE
+ BL get_sp
+ MOV sp, r0
+
+ ; Restore secure world context & enable MMU
+ BL secure_context_restore
+
+ ; Switch to non-secure registers for HYP &
+ ; later non-secure world restore.
+ MRC p15, 0, r1, c1, c1, 0
+ ORR r1, #SCR_NS
+ MCR p15, 0, r1, c1, c1, 0
+ ISB
+
+ ; Setup the NS link register
+ MRC p15, 0, r0, c0, c0, 5
+ ANDS r0, r0, #0xf
+ LDR r1, =ns_entry_ptr
+ ADD r1, r1, r0, lsl #2
+ LDR lr, [r1]
+ ; Switch to Non-secure world
+ ERET
+warm_reset_ltrls
+ ENDFUNC
+
+ AREA |.bss.stacks|, DATA, NOINIT, ALIGN=6
+secure_stacks SPACE MAX_CLUSTERS*MAX_CPUS*STACK_SIZE
+ END