aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicolas Royer <nroyer@baylibre.com>2020-09-27 16:44:13 +0200
committernicola-mazzucato-arm <42373140+nicola-mazzucato-arm@users.noreply.github.com>2020-10-15 17:45:38 +0100
commitdb2d011984a280afbfdcde930ba1965db0d2b8d5 (patch)
treeb3ce0004618a9369109791b2c5c3eccdf7df9786
parent3ed98096b99953d02bf1a27006c7e4c49935a007 (diff)
arch: add armv8-a support
Change-Id: I4ba1332eeea2fd19d2ae60281e0894a1c262ea58 Signed-off-by: Tsutomu Muroya <tsutomu.muroya.jy@bp.renesas.com> Signed-off-by: Nicolas Royer <nroyer@baylibre.com>
-rw-r--r--arch/arm/armv8-a/arch.mk17
-rw-r--r--arch/arm/armv8-a/include/arch.h850
-rw-r--r--arch/arm/armv8-a/include/arch_gic.h134
-rw-r--r--arch/arm/armv8-a/include/arch_helpers.h552
-rw-r--r--arch/arm/armv8-a/include/arch_system.h25
-rw-r--r--arch/arm/armv8-a/include/asm_macros.S194
-rw-r--r--arch/arm/armv8-a/include/assert_macros.S30
-rw-r--r--arch/arm/armv8-a/include/common/asm_macros_common.S108
-rw-r--r--arch/arm/armv8-a/include/common/debug.h31
-rw-r--r--arch/arm/armv8-a/include/lib/mmio.h79
-rw-r--r--arch/arm/armv8-a/include/lib/utils_def.h170
-rw-r--r--arch/arm/armv8-a/src/arch.ld.S218
-rw-r--r--arch/arm/armv8-a/src/arch_cache_helpers.S204
-rw-r--r--arch/arm/armv8-a/src/arch_crt0.S186
-rw-r--r--arch/arm/armv8-a/src/arch_exceptions.S127
-rw-r--r--arch/arm/armv8-a/src/arch_gic.c586
-rw-r--r--arch/arm/armv8-a/src/arch_libc.c260
-rw-r--r--arch/arm/armv8-a/src/arch_main.c46
-rw-r--r--arch/arm/armv8-a/src/arch_misc_helpers.S511
-rw-r--r--arch/arm/armv8-a/src/arch_mm.c66
-rw-r--r--product/rcar/module/rcar_mstp_clock/src/mod_rcar_mstp_clock.c1
-rw-r--r--tools/build_system/cpu.mk10
-rw-r--r--tools/build_system/rules.mk26
23 files changed, 4426 insertions, 5 deletions
diff --git a/arch/arm/armv8-a/arch.mk b/arch/arm/armv8-a/arch.mk
new file mode 100644
index 00000000..156883df
--- /dev/null
+++ b/arch/arm/armv8-a/arch.mk
@@ -0,0 +1,17 @@
+#
+# Arm SCP/MCP Software
+# Copyright (c) 2018-2020, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+BS_LIB_SOURCES_$(BS_ARCH_ARCH) += arch_cache_helpers.S
+BS_LIB_SOURCES_$(BS_ARCH_ARCH) += arch_crt0.S
+BS_LIB_SOURCES_$(BS_ARCH_ARCH) += arch_exceptions.c
+BS_LIB_SOURCES_$(BS_ARCH_ARCH) += arch_gic.c
+BS_LIB_SOURCES_$(BS_ARCH_ARCH) += arch_libc.c
+BS_LIB_SOURCES_$(BS_ARCH_ARCH) += arch_main.c
+BS_LIB_SOURCES_$(BS_ARCH_ARCH) += arch_misc_helpers.S
+BS_LIB_SOURCES_$(BS_ARCH_ARCH) += arch_mm.c
+
+BS_LIB_SOURCES_$(BS_ARCH_ARCH) := $(addprefix $(ARCH_DIR)/$(BS_ARCH_VENDOR)/$(BS_ARCH_ARCH)/src/,$(BS_LIB_SOURCES_$(BS_ARCH_ARCH)))
diff --git a/arch/arm/armv8-a/include/arch.h b/arch/arm/armv8-a/include/arch.h
new file mode 100644
index 00000000..40221fae
--- /dev/null
+++ b/arch/arm/armv8-a/include/arch.h
@@ -0,0 +1,850 @@
+/*
+ * Arm SCP/MCP Software
+ * Copyright (c) 2013-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_H
+#define ARCH_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * MIDR bit definitions
+ ******************************************************************************/
+#define MIDR_IMPL_MASK U(0xff)
+#define MIDR_IMPL_SHIFT U(0x18)
+#define MIDR_VAR_SHIFT U(20)
+#define MIDR_VAR_BITS U(4)
+#define MIDR_VAR_MASK U(0xf)
+#define MIDR_REV_SHIFT U(0)
+#define MIDR_REV_BITS U(4)
+#define MIDR_REV_MASK U(0xf)
+#define MIDR_PN_MASK U(0xfff)
+#define MIDR_PN_SHIFT U(0x4)
+
+/*******************************************************************************
+ * MPIDR macros
+ ******************************************************************************/
+#define MPIDR_MT_MASK (ULL(1) << 24)
+#define MPIDR_CPU_MASK MPIDR_AFFLVL_MASK
+#define MPIDR_CLUSTER_MASK (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
+#define MPIDR_AFFINITY_BITS U(8)
+#define MPIDR_AFFLVL_MASK ULL(0xff)
+#define MPIDR_AFF0_SHIFT U(0)
+#define MPIDR_AFF1_SHIFT U(8)
+#define MPIDR_AFF2_SHIFT U(16)
+#define MPIDR_AFF3_SHIFT U(32)
+#define MPIDR_AFF_SHIFT(_n) MPIDR_AFF##_n##_SHIFT
+#define MPIDR_AFFINITY_MASK ULL(0xff00ffffff)
+#define MPIDR_AFFLVL_SHIFT U(3)
+#define MPIDR_AFFLVL0 ULL(0x0)
+#define MPIDR_AFFLVL1 ULL(0x1)
+#define MPIDR_AFFLVL2 ULL(0x2)
+#define MPIDR_AFFLVL3 ULL(0x3)
+#define MPIDR_AFFLVL(_n) MPIDR_AFFLVL##_n
+#define MPIDR_AFFLVL0_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL1_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL2_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL3_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK)
+/*
+ * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
+ * add one while using this macro to define array sizes.
+ * TODO: Support only the first 3 affinity levels for now.
+ */
+#define MPIDR_MAX_AFFLVL U(2)
+
+#define MPID_MASK \
+ (MPIDR_MT_MASK | (MPIDR_AFFLVL_MASK << MPIDR_AFF3_SHIFT) | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT) | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT) | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF0_SHIFT))
+
+#define MPIDR_AFF_ID(mpid, n) \
+ (((mpid) >> MPIDR_AFF_SHIFT(n)) & MPIDR_AFFLVL_MASK)
+
+/*
+ * An invalid MPID. This value can be used by functions that return an MPID to
+ * indicate an error.
+ */
+#define INVALID_MPID U(0xFFFFFFFF)
+
+/*******************************************************************************
+ * Definitions for CPU system register interface to GICv3
+ ******************************************************************************/
+#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
+#define ICC_SGI1R S3_0_C12_C11_5
+#define ICC_SRE_EL1 S3_0_C12_C12_5
+#define ICC_SRE_EL2 S3_4_C12_C9_5
+#define ICC_SRE_EL3 S3_6_C12_C12_5
+#define ICC_CTLR_EL1 S3_0_C12_C12_4
+#define ICC_CTLR_EL3 S3_6_C12_C12_4
+#define ICC_PMR_EL1 S3_0_C4_C6_0
+#define ICC_RPR_EL1 S3_0_C12_C11_3
+#define ICC_IGRPEN1_EL3 S3_6_c12_c12_7
+#define ICC_IGRPEN0_EL1 S3_0_c12_c12_6
+#define ICC_HPPIR0_EL1 S3_0_c12_c8_2
+#define ICC_HPPIR1_EL1 S3_0_c12_c12_2
+#define ICC_IAR0_EL1 S3_0_c12_c8_0
+#define ICC_IAR1_EL1 S3_0_c12_c12_0
+#define ICC_EOIR0_EL1 S3_0_c12_c8_1
+#define ICC_EOIR1_EL1 S3_0_c12_c12_1
+#define ICC_SGI0R_EL1 S3_0_c12_c11_7
+
+/*******************************************************************************
+ * Generic timer memory mapped registers & offsets
+ ******************************************************************************/
+#define CNTCR_OFF U(0x000)
+#define CNTFID_OFF U(0x020)
+
+#define CNTCR_EN (U(1) << 0)
+#define CNTCR_HDBG (U(1) << 1)
+#define CNTCR_FCREQ(x) ((x) << 8)
+
+/*******************************************************************************
+ * System register bit definitions
+ ******************************************************************************/
+/* CLIDR definitions */
+#define LOUIS_SHIFT U(21)
+#define LOC_SHIFT U(24)
+#define CLIDR_FIELD_WIDTH U(3)
+
+/* CSSELR definitions */
+#define LEVEL_SHIFT U(1)
+
+/* Data cache set/way op type defines */
+#define DCISW U(0x0)
+#define DCCISW U(0x1)
+#if ERRATA_A53_827319
+# define DCCSW DCCISW
+#else
+# define DCCSW U(0x2)
+#endif
+
+/* ID_AA64PFR0_EL1 definitions */
+#define ID_AA64PFR0_EL0_SHIFT U(0)
+#define ID_AA64PFR0_EL1_SHIFT U(4)
+#define ID_AA64PFR0_EL2_SHIFT U(8)
+#define ID_AA64PFR0_EL3_SHIFT U(12)
+#define ID_AA64PFR0_AMU_SHIFT U(44)
+#define ID_AA64PFR0_AMU_LENGTH U(4)
+#define ID_AA64PFR0_AMU_MASK ULL(0xf)
+#define ID_AA64PFR0_ELX_MASK ULL(0xf)
+#define ID_AA64PFR0_SVE_SHIFT U(32)
+#define ID_AA64PFR0_SVE_MASK ULL(0xf)
+#define ID_AA64PFR0_SVE_LENGTH U(4)
+#define ID_AA64PFR0_MPAM_SHIFT U(40)
+#define ID_AA64PFR0_MPAM_MASK ULL(0xf)
+#define ID_AA64PFR0_DIT_SHIFT U(48)
+#define ID_AA64PFR0_DIT_MASK ULL(0xf)
+#define ID_AA64PFR0_DIT_LENGTH U(4)
+#define ID_AA64PFR0_DIT_SUPPORTED U(1)
+#define ID_AA64PFR0_CSV2_SHIFT U(56)
+#define ID_AA64PFR0_CSV2_MASK ULL(0xf)
+#define ID_AA64PFR0_CSV2_LENGTH U(4)
+
+/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
+#define ID_AA64DFR0_PMS_SHIFT U(32)
+#define ID_AA64DFR0_PMS_LENGTH U(4)
+#define ID_AA64DFR0_PMS_MASK ULL(0xf)
+
+#define EL_IMPL_NONE ULL(0)
+#define EL_IMPL_A64ONLY ULL(1)
+#define EL_IMPL_A64_A32 ULL(2)
+
+#define ID_AA64PFR0_GIC_SHIFT U(24)
+#define ID_AA64PFR0_GIC_WIDTH U(4)
+#define ID_AA64PFR0_GIC_MASK ULL(0xf)
+
+/* ID_AA64ISAR1_EL1 definitions */
+#define ID_AA64ISAR1_EL1 S3_0_C0_C6_1
+#define ID_AA64ISAR1_GPI_SHIFT U(28)
+#define ID_AA64ISAR1_GPI_WIDTH U(4)
+#define ID_AA64ISAR1_GPI_MASK ULL(0xf)
+#define ID_AA64ISAR1_GPA_SHIFT U(24)
+#define ID_AA64ISAR1_GPA_WIDTH U(4)
+#define ID_AA64ISAR1_GPA_MASK ULL(0xf)
+#define ID_AA64ISAR1_API_SHIFT U(8)
+#define ID_AA64ISAR1_API_WIDTH U(4)
+#define ID_AA64ISAR1_API_MASK ULL(0xf)
+#define ID_AA64ISAR1_APA_SHIFT U(4)
+#define ID_AA64ISAR1_APA_WIDTH U(4)
+#define ID_AA64ISAR1_APA_MASK ULL(0xf)
+
+/* ID_AA64MMFR0_EL1 definitions */
+#define ID_AA64MMFR0_EL1_PARANGE_SHIFT U(0)
+#define ID_AA64MMFR0_EL1_PARANGE_MASK ULL(0xf)
+
+#define PARANGE_0000 U(32)
+#define PARANGE_0001 U(36)
+#define PARANGE_0010 U(40)
+#define PARANGE_0011 U(42)
+#define PARANGE_0100 U(44)
+#define PARANGE_0101 U(48)
+#define PARANGE_0110 U(52)
+
+#define ID_AA64MMFR0_EL1_TGRAN4_SHIFT U(28)
+#define ID_AA64MMFR0_EL1_TGRAN4_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN4_NOT_SUPPORTED ULL(0xf)
+
+#define ID_AA64MMFR0_EL1_TGRAN64_SHIFT U(24)
+#define ID_AA64MMFR0_EL1_TGRAN64_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN64_NOT_SUPPORTED ULL(0xf)
+
+#define ID_AA64MMFR0_EL1_TGRAN16_SHIFT U(20)
+#define ID_AA64MMFR0_EL1_TGRAN16_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN16_NOT_SUPPORTED ULL(0x0)
+
+/* ID_AA64MMFR2_EL1 definitions */
+#define ID_AA64MMFR2_EL1 S3_0_C0_C7_2
+
+#define ID_AA64MMFR2_EL1_ST_SHIFT U(28)
+#define ID_AA64MMFR2_EL1_ST_MASK ULL(0xf)
+
+#define ID_AA64MMFR2_EL1_CNP_SHIFT U(0)
+#define ID_AA64MMFR2_EL1_CNP_MASK ULL(0xf)
+
+/* ID_AA64PFR1_EL1 definitions */
+#define ID_AA64PFR1_EL1_SSBS_SHIFT U(4)
+#define ID_AA64PFR1_EL1_SSBS_MASK ULL(0xf)
+
+#define SSBS_UNAVAILABLE ULL(0) /* No architectural SSBS support */
+
+/* ID_PFR1_EL1 definitions */
+#define ID_PFR1_VIRTEXT_SHIFT U(12)
+#define ID_PFR1_VIRTEXT_MASK U(0xf)
+#define GET_VIRT_EXT(id) \
+ (((id) >> ID_PFR1_VIRTEXT_SHIFT) & ID_PFR1_VIRTEXT_MASK)
+
+/* SCTLR definitions */
+#define SCTLR_EL2_RES1 \
+ ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | (U(1) << 22) | \
+ (U(1) << 18) | (U(1) << 16) | (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
+
+#define SCTLR_EL1_RES1 \
+ ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | (U(1) << 22) | \
+ (U(1) << 20) | (U(1) << 11))
+#define SCTLR_AARCH32_EL1_RES1 \
+ ((U(1) << 23) | (U(1) << 22) | (U(1) << 11) | (U(1) << 4) | (U(1) << 3))
+
+#define SCTLR_EL3_RES1 \
+ ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | (U(1) << 22) | \
+ (U(1) << 18) | (U(1) << 16) | (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
+
+#define SCTLR_M_BIT (ULL(1) << 0)
+#define SCTLR_A_BIT (ULL(1) << 1)
+#define SCTLR_C_BIT (ULL(1) << 2)
+#define SCTLR_SA_BIT (ULL(1) << 3)
+#define SCTLR_SA0_BIT (ULL(1) << 4)
+#define SCTLR_CP15BEN_BIT (ULL(1) << 5)
+#define SCTLR_ITD_BIT (ULL(1) << 7)
+#define SCTLR_SED_BIT (ULL(1) << 8)
+#define SCTLR_UMA_BIT (ULL(1) << 9)
+#define SCTLR_I_BIT (ULL(1) << 12)
+#define SCTLR_V_BIT (ULL(1) << 13)
+#define SCTLR_DZE_BIT (ULL(1) << 14)
+#define SCTLR_UCT_BIT (ULL(1) << 15)
+#define SCTLR_NTWI_BIT (ULL(1) << 16)
+#define SCTLR_NTWE_BIT (ULL(1) << 18)
+#define SCTLR_WXN_BIT (ULL(1) << 19)
+#define SCTLR_UWXN_BIT (ULL(1) << 20)
+#define SCTLR_IESB_BIT (ULL(1) << 21)
+#define SCTLR_E0E_BIT (ULL(1) << 24)
+#define SCTLR_EE_BIT (ULL(1) << 25)
+#define SCTLR_UCI_BIT (ULL(1) << 26)
+#define SCTLR_EnIA_BIT (ULL(1) << 31)
+#define SCTLR_DSSBS_BIT (ULL(1) << 44)
+#define SCTLR_RESET_VAL SCTLR_EL3_RES1
+
+/* CPACR_El1 definitions */
+#define CPACR_EL1_FPEN(x) ((x) << 20)
+#define CPACR_EL1_FP_TRAP_EL0 U(0x1)
+#define CPACR_EL1_FP_TRAP_ALL U(0x2)
+#define CPACR_EL1_FP_TRAP_NONE U(0x3)
+
+/* SCR definitions */
+#define SCR_RES1_BITS ((U(1) << 4) | (U(1) << 5))
+#define SCR_FIEN_BIT (U(1) << 21)
+#define SCR_API_BIT (U(1) << 17)
+#define SCR_APK_BIT (U(1) << 16)
+#define SCR_TWE_BIT (U(1) << 13)
+#define SCR_TWI_BIT (U(1) << 12)
+#define SCR_ST_BIT (U(1) << 11)
+#define SCR_RW_BIT (U(1) << 10)
+#define SCR_SIF_BIT (U(1) << 9)
+#define SCR_HCE_BIT (U(1) << 8)
+#define SCR_SMD_BIT (U(1) << 7)
+#define SCR_EA_BIT (U(1) << 3)
+#define SCR_FIQ_BIT (U(1) << 2)
+#define SCR_IRQ_BIT (U(1) << 1)
+#define SCR_NS_BIT (U(1) << 0)
+#define SCR_VALID_BIT_MASK U(0x2f8f)
+#define SCR_RESET_VAL SCR_RES1_BITS
+
+/* MDCR_EL3 definitions */
+#define MDCR_SPD32(x) ((x) << 14)
+#define MDCR_SPD32_LEGACY ULL(0x0)
+#define MDCR_SPD32_DISABLE ULL(0x2)
+#define MDCR_SPD32_ENABLE ULL(0x3)
+#define MDCR_SDD_BIT (ULL(1) << 16)
+#define MDCR_NSPB(x) ((x) << 12)
+#define MDCR_NSPB_EL1 ULL(0x3)
+#define MDCR_TDOSA_BIT (ULL(1) << 10)
+#define MDCR_TDA_BIT (ULL(1) << 9)
+#define MDCR_TPM_BIT (ULL(1) << 6)
+#define MDCR_SCCD_BIT (ULL(1) << 23)
+#define MDCR_EL3_RESET_VAL ULL(0x0)
+
+/* MDCR_EL2 definitions */
+#define MDCR_EL2_TPMS (U(1) << 14)
+#define MDCR_EL2_E2PB(x) ((x) << 12)
+#define MDCR_EL2_E2PB_EL1 U(0x3)
+#define MDCR_EL2_TDRA_BIT (U(1) << 11)
+#define MDCR_EL2_TDOSA_BIT (U(1) << 10)
+#define MDCR_EL2_TDA_BIT (U(1) << 9)
+#define MDCR_EL2_TDE_BIT (U(1) << 8)
+#define MDCR_EL2_HPME_BIT (U(1) << 7)
+#define MDCR_EL2_TPM_BIT (U(1) << 6)
+#define MDCR_EL2_TPMCR_BIT (U(1) << 5)
+#define MDCR_EL2_RESET_VAL U(0x0)
+
+/* HSTR_EL2 definitions */
+#define HSTR_EL2_RESET_VAL U(0x0)
+#define HSTR_EL2_T_MASK U(0xff)
+
+/* CNTHP_CTL_EL2 definitions */
+#define CNTHP_CTL_ENABLE_BIT (U(1) << 0)
+#define CNTHP_CTL_RESET_VAL U(0x0)
+
+/* VTTBR_EL2 definitions */
+#define VTTBR_RESET_VAL ULL(0x0)
+#define VTTBR_VMID_MASK ULL(0xff)
+#define VTTBR_VMID_SHIFT U(48)
+#define VTTBR_BADDR_MASK ULL(0xffffffffffff)
+#define VTTBR_BADDR_SHIFT U(0)
+
+/* HCR definitions */
+#define HCR_API_BIT (ULL(1) << 41)
+#define HCR_APK_BIT (ULL(1) << 40)
+#define HCR_TGE_BIT (ULL(1) << 27)
+#define HCR_RW_SHIFT U(31)
+#define HCR_RW_BIT (ULL(1) << HCR_RW_SHIFT)
+#define HCR_AMO_BIT (ULL(1) << 5)
+#define HCR_IMO_BIT (ULL(1) << 4)
+#define HCR_FMO_BIT (ULL(1) << 3)
+
+/* ISR definitions */
+#define ISR_A_SHIFT U(8)
+#define ISR_I_SHIFT U(7)
+#define ISR_F_SHIFT U(6)
+
+/* CNTHCTL_EL2 definitions */
+#define CNTHCTL_RESET_VAL U(0x0)
+#define EVNTEN_BIT (U(1) << 2)
+#define EL1PCEN_BIT (U(1) << 1)
+#define EL1PCTEN_BIT (U(1) << 0)
+
+/* CNTKCTL_EL1 definitions */
+#define EL0PTEN_BIT (U(1) << 9)
+#define EL0VTEN_BIT (U(1) << 8)
+#define EL0PCTEN_BIT (U(1) << 0)
+#define EL0VCTEN_BIT (U(1) << 1)
+#define EVNTEN_BIT (U(1) << 2)
+#define EVNTDIR_BIT (U(1) << 3)
+#define EVNTI_SHIFT U(4)
+#define EVNTI_MASK U(0xf)
+
+/* CPTR_EL3 definitions */
+#define TCPAC_BIT (U(1) << 31)
+#define TAM_BIT (U(1) << 30)
+#define TTA_BIT (U(1) << 20)
+#define TFP_BIT (U(1) << 10)
+#define CPTR_EZ_BIT (U(1) << 8)
+#define CPTR_EL3_RESET_VAL U(0x0)
+
+/* CPTR_EL2 definitions */
+#define CPTR_EL2_RES1 ((U(1) << 13) | (U(1) << 12) | (U(0x3ff)))
+#define CPTR_EL2_TCPAC_BIT (U(1) << 31)
+#define CPTR_EL2_TAM_BIT (U(1) << 30)
+#define CPTR_EL2_TTA_BIT (U(1) << 20)
+#define CPTR_EL2_TFP_BIT (U(1) << 10)
+#define CPTR_EL2_TZ_BIT (U(1) << 8)
+#define CPTR_EL2_RESET_VAL CPTR_EL2_RES1
+
+/* CPSR/SPSR definitions */
+#define DAIF_FIQ_BIT (U(1) << 0)
+#define DAIF_IRQ_BIT (U(1) << 1)
+#define DAIF_ABT_BIT (U(1) << 2)
+#define DAIF_DBG_BIT (U(1) << 3)
+#define SPSR_DAIF_SHIFT U(6)
+#define SPSR_DAIF_MASK U(0xf)
+
+#define SPSR_AIF_SHIFT U(6)
+#define SPSR_AIF_MASK U(0x7)
+
+#define SPSR_E_SHIFT U(9)
+#define SPSR_E_MASK U(0x1)
+#define SPSR_E_LITTLE U(0x0)
+#define SPSR_E_BIG U(0x1)
+
+#define SPSR_T_SHIFT U(5)
+#define SPSR_T_MASK U(0x1)
+#define SPSR_T_ARM U(0x0)
+#define SPSR_T_THUMB U(0x1)
+
+#define SPSR_M_SHIFT U(4)
+#define SPSR_M_MASK U(0x1)
+#define SPSR_M_AARCH64 U(0x0)
+#define SPSR_M_AARCH32 U(0x1)
+
+#define DISABLE_ALL_EXCEPTIONS \
+ (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
+
+#define DISABLE_INTERRUPTS (DAIF_FIQ_BIT | DAIF_IRQ_BIT)
+
+/*
+ * RMR_EL3 definitions
+ */
+#define RMR_EL3_RR_BIT (U(1) << 1)
+#define RMR_EL3_AA64_BIT (U(1) << 0)
+
+/*
+ * HI-VECTOR address for AArch32 state
+ */
+#define HI_VECTOR_BASE U(0xFFFF0000)
+
+/*
+ * TCR defintions
+ */
+#define TCR_EL3_RES1 ((ULL(1) << 31) | (ULL(1) << 23))
+#define TCR_EL2_RES1 ((ULL(1) << 31) | (ULL(1) << 23))
+#define TCR_EL1_IPS_SHIFT U(32)
+#define TCR_EL2_PS_SHIFT U(16)
+#define TCR_EL3_PS_SHIFT U(16)
+
+#define TCR_TxSZ_MIN ULL(16)
+#define TCR_TxSZ_MAX ULL(39)
+#define TCR_TxSZ_MAX_TTST ULL(48)
+
+/* (internal) physical address size bits in EL3/EL1 */
+#define TCR_PS_BITS_4GB ULL(0x0)
+#define TCR_PS_BITS_64GB ULL(0x1)
+#define TCR_PS_BITS_1TB ULL(0x2)
+#define TCR_PS_BITS_4TB ULL(0x3)
+#define TCR_PS_BITS_16TB ULL(0x4)
+#define TCR_PS_BITS_256TB ULL(0x5)
+
+#define ADDR_MASK_48_TO_63 ULL(0xFFFF000000000000)
+#define ADDR_MASK_44_TO_47 ULL(0x0000F00000000000)
+#define ADDR_MASK_42_TO_43 ULL(0x00000C0000000000)
+#define ADDR_MASK_40_TO_41 ULL(0x0000030000000000)
+#define ADDR_MASK_36_TO_39 ULL(0x000000F000000000)
+#define ADDR_MASK_32_TO_35 ULL(0x0000000F00000000)
+
+#define TCR_RGN_INNER_NC (ULL(0x0) << 8)
+#define TCR_RGN_INNER_WBA (ULL(0x1) << 8)
+#define TCR_RGN_INNER_WT (ULL(0x2) << 8)
+#define TCR_RGN_INNER_WBNA (ULL(0x3) << 8)
+
+#define TCR_RGN_OUTER_NC (ULL(0x0) << 10)
+#define TCR_RGN_OUTER_WBA (ULL(0x1) << 10)
+#define TCR_RGN_OUTER_WT (ULL(0x2) << 10)
+#define TCR_RGN_OUTER_WBNA (ULL(0x3) << 10)
+
+#define TCR_SH_NON_SHAREABLE (ULL(0x0) << 12)
+#define TCR_SH_OUTER_SHAREABLE (ULL(0x2) << 12)
+#define TCR_SH_INNER_SHAREABLE (ULL(0x3) << 12)
+
+#define TCR_TG0_SHIFT U(14)
+#define TCR_TG0_MASK ULL(3)
+#define TCR_TG0_4K (ULL(0) << TCR_TG0_SHIFT)
+#define TCR_TG0_64K (ULL(1) << TCR_TG0_SHIFT)
+#define TCR_TG0_16K (ULL(2) << TCR_TG0_SHIFT)
+
+#define TCR_EPD0_BIT (ULL(1) << 7)
+#define TCR_EPD1_BIT (ULL(1) << 23)
+
+#define MODE_SP_SHIFT U(0x0)
+#define MODE_SP_MASK U(0x1)
+#define MODE_SP_EL0 U(0x0)
+#define MODE_SP_ELX U(0x1)
+
+#define MODE_RW_SHIFT U(0x4)
+#define MODE_RW_MASK U(0x1)
+#define MODE_RW_64 U(0x0)
+#define MODE_RW_32 U(0x1)
+
+#define MODE_EL_SHIFT U(0x2)
+#define MODE_EL_MASK U(0x3)
+#define MODE_EL3 U(0x3)
+#define MODE_EL2 U(0x2)
+#define MODE_EL1 U(0x1)
+#define MODE_EL0 U(0x0)
+
+#define MODE32_SHIFT U(0)
+#define MODE32_MASK U(0xf)
+#define MODE32_usr U(0x0)
+#define MODE32_fiq U(0x1)
+#define MODE32_irq U(0x2)
+#define MODE32_svc U(0x3)
+#define MODE32_mon U(0x6)
+#define MODE32_abt U(0x7)
+#define MODE32_hyp U(0xa)
+#define MODE32_und U(0xb)
+#define MODE32_sys U(0xf)
+
+#define GET_RW(mode) (((mode) >> MODE_RW_SHIFT) & MODE_RW_MASK)
+#define GET_EL(mode) (((mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
+#define GET_SP(mode) (((mode) >> MODE_SP_SHIFT) & MODE_SP_MASK)
+#define GET_M32(mode) (((mode) >> MODE32_SHIFT) & MODE32_MASK)
+
+#define SPSR_64(el, sp, daif) \
+ ((MODE_RW_64 << MODE_RW_SHIFT) | (((el)&MODE_EL_MASK) << MODE_EL_SHIFT) | \
+ (((sp)&MODE_SP_MASK) << MODE_SP_SHIFT) | \
+ (((daif)&SPSR_DAIF_MASK) << SPSR_DAIF_SHIFT))
+
+#define SPSR_MODE32(mode, isa, endian, aif) \
+ ((MODE_RW_32 << MODE_RW_SHIFT) | (((mode)&MODE32_MASK) << MODE32_SHIFT) | \
+ (((isa)&SPSR_T_MASK) << SPSR_T_SHIFT) | \
+ (((endian)&SPSR_E_MASK) << SPSR_E_SHIFT) | \
+ (((aif)&SPSR_AIF_MASK) << SPSR_AIF_SHIFT))
+
+/*
+ * TTBR Definitions
+ */
+#define TTBR_CNP_BIT ULL(0x1)
+
+/*
+ * CTR_EL0 definitions
+ */
+#define CTR_CWG_SHIFT U(24)
+#define CTR_CWG_MASK U(0xf)
+#define CTR_ERG_SHIFT U(20)
+#define CTR_ERG_MASK U(0xf)
+#define CTR_DMINLINE_SHIFT U(16)
+#define CTR_DMINLINE_MASK U(0xf)
+#define CTR_L1IP_SHIFT U(14)
+#define CTR_L1IP_MASK U(0x3)
+#define CTR_IMINLINE_SHIFT U(0)
+#define CTR_IMINLINE_MASK U(0xf)
+
+#define MAX_CACHE_LINE_SIZE U(0x800) /* 2KB */
+
+/* Physical timer control register bit fields shifts and masks */
+#define CNTP_CTL_ENABLE_SHIFT U(0)
+#define CNTP_CTL_IMASK_SHIFT U(1)
+#define CNTP_CTL_ISTATUS_SHIFT U(2)
+
+#define CNTP_CTL_ENABLE_MASK U(1)
+#define CNTP_CTL_IMASK_MASK U(1)
+#define CNTP_CTL_ISTATUS_MASK U(1)
+
+/* Exception Syndrome register bits and bobs */
+#define ESR_EC_SHIFT U(26)
+#define ESR_EC_MASK U(0x3f)
+#define ESR_EC_LENGTH U(6)
+#define EC_UNKNOWN U(0x0)
+#define EC_WFE_WFI U(0x1)
+#define EC_AARCH32_CP15_MRC_MCR U(0x3)
+#define EC_AARCH32_CP15_MRRC_MCRR U(0x4)
+#define EC_AARCH32_CP14_MRC_MCR U(0x5)
+#define EC_AARCH32_CP14_LDC_STC U(0x6)
+#define EC_FP_SIMD U(0x7)
+#define EC_AARCH32_CP10_MRC U(0x8)
+#define EC_AARCH32_CP14_MRRC_MCRR U(0xc)
+#define EC_ILLEGAL U(0xe)
+#define EC_AARCH32_SVC U(0x11)
+#define EC_AARCH32_HVC U(0x12)
+#define EC_AARCH32_SMC U(0x13)
+#define EC_AARCH64_SVC U(0x15)
+#define EC_AARCH64_HVC U(0x16)
+#define EC_AARCH64_SMC U(0x17)
+#define EC_AARCH64_SYS U(0x18)
+#define EC_IABORT_LOWER_EL U(0x20)
+#define EC_IABORT_CUR_EL U(0x21)
+#define EC_PC_ALIGN U(0x22)
+#define EC_DABORT_LOWER_EL U(0x24)
+#define EC_DABORT_CUR_EL U(0x25)
+#define EC_SP_ALIGN U(0x26)
+#define EC_AARCH32_FP U(0x28)
+#define EC_AARCH64_FP U(0x2c)
+#define EC_SERROR U(0x2f)
+
+/*
+ * External Abort bit in Instruction and Data Aborts synchronous exception
+ * syndromes.
+ */
+#define ESR_ISS_EABORT_EA_BIT U(9)
+
+#define EC_BITS(x) (((x) >> ESR_EC_SHIFT) & ESR_EC_MASK)
+
+/* Reset bit inside the Reset management register for EL3 (RMR_EL3) */
+#define RMR_RESET_REQUEST_SHIFT U(0x1)
+#define RMR_WARM_RESET_CPU (U(1) << RMR_RESET_REQUEST_SHIFT)
+
+/*******************************************************************************
+ * Definitions of register offsets, fields and macros for CPU system
+ * instructions.
+ ******************************************************************************/
+
+#define TLBI_ADDR_SHIFT U(12)
+#define TLBI_ADDR_MASK ULL(0x00000FFFFFFFFFFF)
+#define TLBI_ADDR(x) (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTCTLBase Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+#define CNTCTLBASE_CNTFRQ U(0x0)
+#define CNTNSAR U(0x4)
+#define CNTNSAR_NS_SHIFT(x) (x)
+
+#define CNTACR_BASE(x) (U(0x40) + ((x) << 2))
+#define CNTACR_RPCT_SHIFT U(0x0)
+#define CNTACR_RVCT_SHIFT U(0x1)
+#define CNTACR_RFRQ_SHIFT U(0x2)
+#define CNTACR_RVOFF_SHIFT U(0x3)
+#define CNTACR_RWVT_SHIFT U(0x4)
+#define CNTACR_RWPT_SHIFT U(0x5)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTBaseN Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+/* Physical Count register. */
+#define CNTPCT_LO U(0x0)
+/* Counter Frequency register. */
+#define CNTBASEN_CNTFRQ U(0x10)
+/* Physical Timer CompareValue register. */
+#define CNTP_CVAL_LO U(0x20)
+/* Physical Timer Control register. */
+#define CNTP_CTL U(0x2c)
+
+/* PMCR_EL0 definitions */
+#define PMCR_EL0_RESET_VAL U(0x0)
+#define PMCR_EL0_N_SHIFT U(11)
+#define PMCR_EL0_N_MASK U(0x1f)
+#define PMCR_EL0_N_BITS (PMCR_EL0_N_MASK << PMCR_EL0_N_SHIFT)
+#define PMCR_EL0_LC_BIT (U(1) << 6)
+#define PMCR_EL0_DP_BIT (U(1) << 5)
+#define PMCR_EL0_X_BIT (U(1) << 4)
+#define PMCR_EL0_D_BIT (U(1) << 3)
+
+/*******************************************************************************
+ * Definitions for system register interface to SVE
+ ******************************************************************************/
+#define ZCR_EL3 S3_6_C1_C2_0
+#define ZCR_EL2 S3_4_C1_C2_0
+
+/* ZCR_EL3 definitions */
+#define ZCR_EL3_LEN_MASK U(0xf)
+
+/* ZCR_EL2 definitions */
+#define ZCR_EL2_LEN_MASK U(0xf)
+
+/*******************************************************************************
+ * Definitions of MAIR encodings for device and normal memory
+ ******************************************************************************/
+/*
+ * MAIR encodings for device memory attributes.
+ */
+#define MAIR_DEV_nGnRnE ULL(0x0)
+#define MAIR_DEV_nGnRE ULL(0x4)
+#define MAIR_DEV_nGRE ULL(0x8)
+#define MAIR_DEV_GRE ULL(0xc)
+
+/*
+ * MAIR encodings for normal memory attributes.
+ *
+ * Cache Policy
+ * WT: Write Through
+ * WB: Write Back
+ * NC: Non-Cacheable
+ *
+ * Transient Hint
+ * NTR: Non-Transient
+ * TR: Transient
+ *
+ * Allocation Policy
+ * RA: Read Allocate
+ * WA: Write Allocate
+ * RWA: Read and Write Allocate
+ * NA: No Allocation
+ */
+#define MAIR_NORM_WT_TR_WA ULL(0x1)
+#define MAIR_NORM_WT_TR_RA ULL(0x2)
+#define MAIR_NORM_WT_TR_RWA ULL(0x3)
+#define MAIR_NORM_NC ULL(0x4)
+#define MAIR_NORM_WB_TR_WA ULL(0x5)
+#define MAIR_NORM_WB_TR_RA ULL(0x6)
+#define MAIR_NORM_WB_TR_RWA ULL(0x7)
+#define MAIR_NORM_WT_NTR_NA ULL(0x8)
+#define MAIR_NORM_WT_NTR_WA ULL(0x9)
+#define MAIR_NORM_WT_NTR_RA ULL(0xa)
+#define MAIR_NORM_WT_NTR_RWA ULL(0xb)
+#define MAIR_NORM_WB_NTR_NA ULL(0xc)
+#define MAIR_NORM_WB_NTR_WA ULL(0xd)
+#define MAIR_NORM_WB_NTR_RA ULL(0xe)
+#define MAIR_NORM_WB_NTR_RWA ULL(0xf)
+
+#define MAIR_NORM_OUTER_SHIFT U(4)
+
+#define MAKE_MAIR_NORMAL_MEMORY(inner, outer) \
+ ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
+
+/* PAR_EL1 fields */
+#define PAR_F_SHIFT U(0)
+#define PAR_F_MASK ULL(0x1)
+#define PAR_ADDR_SHIFT U(12)
+#define PAR_ADDR_MASK (BIT(40) - ULL(1)) /* 40-bits-wide page address */
+
+/*******************************************************************************
+ * Definitions for system register interface to SPE
+ ******************************************************************************/
+#define PMBLIMITR_EL1 S3_0_C9_C10_0
+
+/*******************************************************************************
+ * Definitions for system register interface to MPAM
+ ******************************************************************************/
+#define MPAMIDR_EL1 S3_0_C10_C4_4
+#define MPAM2_EL2 S3_4_C10_C5_0
+#define MPAMHCR_EL2 S3_4_C10_C4_0
+#define MPAM3_EL3 S3_6_C10_C5_0
+
+/*******************************************************************************
+ * Definitions for system register interface to AMU for ARMv8.4 onwards
+ ******************************************************************************/
+#define AMCR_EL0 S3_3_C13_C2_0
+#define AMCFGR_EL0 S3_3_C13_C2_1
+#define AMCGCR_EL0 S3_3_C13_C2_2
+#define AMUSERENR_EL0 S3_3_C13_C2_3
+#define AMCNTENCLR0_EL0 S3_3_C13_C2_4
+#define AMCNTENSET0_EL0 S3_3_C13_C2_5
+#define AMCNTENCLR1_EL0 S3_3_C13_C3_0
+#define AMCNTENSET1_EL0 S3_3_C13_C3_1
+
+/* Activity Monitor Group 0 Event Counter Registers */
+#define AMEVCNTR00_EL0 S3_3_C13_C4_0
+#define AMEVCNTR01_EL0 S3_3_C13_C4_1
+#define AMEVCNTR02_EL0 S3_3_C13_C4_2
+#define AMEVCNTR03_EL0 S3_3_C13_C4_3
+
+/* Activity Monitor Group 0 Event Type Registers */
+#define AMEVTYPER00_EL0 S3_3_C13_C6_0
+#define AMEVTYPER01_EL0 S3_3_C13_C6_1
+#define AMEVTYPER02_EL0 S3_3_C13_C6_2
+#define AMEVTYPER03_EL0 S3_3_C13_C6_3
+
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10_EL0 S3_3_C13_C12_0
+#define AMEVCNTR11_EL0 S3_3_C13_C12_1
+#define AMEVCNTR12_EL0 S3_3_C13_C12_2
+#define AMEVCNTR13_EL0 S3_3_C13_C12_3
+#define AMEVCNTR14_EL0 S3_3_C13_C12_4
+#define AMEVCNTR15_EL0 S3_3_C13_C12_5
+#define AMEVCNTR16_EL0 S3_3_C13_C12_6
+#define AMEVCNTR17_EL0 S3_3_C13_C12_7
+#define AMEVCNTR18_EL0 S3_3_C13_C13_0
+#define AMEVCNTR19_EL0 S3_3_C13_C13_1
+#define AMEVCNTR1A_EL0 S3_3_C13_C13_2
+#define AMEVCNTR1B_EL0 S3_3_C13_C13_3
+#define AMEVCNTR1C_EL0 S3_3_C13_C13_4
+#define AMEVCNTR1D_EL0 S3_3_C13_C13_5
+#define AMEVCNTR1E_EL0 S3_3_C13_C13_6
+#define AMEVCNTR1F_EL0 S3_3_C13_C13_7
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10_EL0 S3_3_C13_C14_0
+#define AMEVTYPER11_EL0 S3_3_C13_C14_1
+#define AMEVTYPER12_EL0 S3_3_C13_C14_2
+#define AMEVTYPER13_EL0 S3_3_C13_C14_3
+#define AMEVTYPER14_EL0 S3_3_C13_C14_4
+#define AMEVTYPER15_EL0 S3_3_C13_C14_5
+#define AMEVTYPER16_EL0 S3_3_C13_C14_6
+#define AMEVTYPER17_EL0 S3_3_C13_C14_7
+#define AMEVTYPER18_EL0 S3_3_C13_C15_0
+#define AMEVTYPER19_EL0 S3_3_C13_C15_1
+#define AMEVTYPER1A_EL0 S3_3_C13_C15_2
+#define AMEVTYPER1B_EL0 S3_3_C13_C15_3
+#define AMEVTYPER1C_EL0 S3_3_C13_C15_4
+#define AMEVTYPER1D_EL0 S3_3_C13_C15_5
+#define AMEVTYPER1E_EL0 S3_3_C13_C15_6
+#define AMEVTYPER1F_EL0 S3_3_C13_C15_7
+
+/* AMCGCR_EL0 definitions */
+#define AMCGCR_EL0_CG1NC_SHIFT U(8)
+#define AMCGCR_EL0_CG1NC_LENGTH U(8)
+#define AMCGCR_EL0_CG1NC_MASK U(0xff)
+
+/* MPAM register definitions */
+#define MPAM3_EL3_MPAMEN_BIT (ULL(1) << 63)
+#define MPAMHCR_EL2_TRAP_MPAMIDR_EL1 (ULL(1) << 31)
+
+#define MPAM2_EL2_TRAPMPAM0EL1 (ULL(1) << 49)
+#define MPAM2_EL2_TRAPMPAM1EL1 (ULL(1) << 48)
+
+#define MPAMIDR_HAS_HCR_BIT (ULL(1) << 17)
+
+/*******************************************************************************
+ * RAS system registers
+ ******************************************************************************/
+#define DISR_EL1 S3_0_C12_C1_1
+#define DISR_A_BIT U(31)
+
+#define ERRIDR_EL1 S3_0_C5_C3_0
+#define ERRIDR_MASK U(0xffff)
+
+#define ERRSELR_EL1 S3_0_C5_C3_1
+
+/* System register access to Standard Error Record registers */
+#define ERXFR_EL1 S3_0_C5_C4_0
+#define ERXCTLR_EL1 S3_0_C5_C4_1
+#define ERXSTATUS_EL1 S3_0_C5_C4_2
+#define ERXADDR_EL1 S3_0_C5_C4_3
+#define ERXPFGF_EL1 S3_0_C5_C4_4
+#define ERXPFGCTL_EL1 S3_0_C5_C4_5
+#define ERXPFGCDN_EL1 S3_0_C5_C4_6
+#define ERXMISC0_EL1 S3_0_C5_C5_0
+#define ERXMISC1_EL1 S3_0_C5_C5_1
+
+#define ERXCTLR_ED_BIT (U(1) << 0)
+#define ERXCTLR_UE_BIT (U(1) << 4)
+
+#define ERXPFGCTL_UC_BIT (U(1) << 1)
+#define ERXPFGCTL_UEU_BIT (U(1) << 2)
+#define ERXPFGCTL_CDEN_BIT (U(1) << 31)
+
+/*******************************************************************************
+ * Armv8.3 Pointer Authentication Registers
+ ******************************************************************************/
+#define APIAKeyLo_EL1 S3_0_C2_C1_0
+#define APIAKeyHi_EL1 S3_0_C2_C1_1
+#define APIBKeyLo_EL1 S3_0_C2_C1_2
+#define APIBKeyHi_EL1 S3_0_C2_C1_3
+#define APDAKeyLo_EL1 S3_0_C2_C2_0
+#define APDAKeyHi_EL1 S3_0_C2_C2_1
+#define APDBKeyLo_EL1 S3_0_C2_C2_2
+#define APDBKeyHi_EL1 S3_0_C2_C2_3
+#define APGAKeyLo_EL1 S3_0_C2_C3_0
+#define APGAKeyHi_EL1 S3_0_C2_C3_1
+
+/*******************************************************************************
+ * Armv8.4 Data Independent Timing Registers
+ ******************************************************************************/
+#define DIT S3_3_C4_C2_5
+#define DIT_BIT BIT(24)
+
+/*******************************************************************************
+ * Armv8.5 - new MSR encoding to directly access PSTATE.SSBS field
+ ******************************************************************************/
+#define SSBS S3_3_C4_C2_6
+
+#endif /* ARCH_H */
diff --git a/arch/arm/armv8-a/include/arch_gic.h b/arch/arm/armv8-a/include/arch_gic.h
new file mode 100644
index 00000000..68effffd
--- /dev/null
+++ b/arch/arm/armv8-a/include/arch_gic.h
@@ -0,0 +1,134 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARMV8A_GIC_H
+#define ARMV8A_GIC_H
+
+#include <fwk_arch.h>
+
+#include <arch_helpers.h>
+
+/* Constants to categorise priorities */
+#define GIC_HIGHEST_SEC_PRIORITY 0x0
+#define GIC_LOWEST_SEC_PRIORITY 0x7f
+#define GIC_HIGHEST_NS_PRIORITY 0x80
+#define GIC_LOWEST_NS_PRIORITY 0xfe /* 0xff would disable all interrupts */
+
+/*******************************************************************************
+ * GIC Distributor interface general definitions
+ ******************************************************************************/
+/* Constants to categorise interrupts */
+#define MIN_SGI_ID U(0)
+#define MIN_SEC_SGI_ID U(8)
+#define MIN_PPI_ID U(16)
+#define MIN_SPI_ID U(32)
+#define MAX_SPI_ID U(1019)
+
+/* Mask for the priority field common to all GIC interfaces */
+#define GIC_PRI_MASK U(0xff)
+
+/* Mask for the configuration field common to all GIC interfaces */
+#define GIC_CFG_MASK U(0x3)
+
+/*******************************************************************************
+ * GIC Distributor interface register offsets that are common to GICv2
+ ******************************************************************************/
+#define GICD_CTLR U(0x0)
+#define GICD_TYPER U(0x4)
+#define GICD_IIDR U(0x8)
+#define GICD_IGROUPR U(0x80)
+#define GICD_ISENABLER U(0x100)
+#define GICD_ICENABLER U(0x180)
+#define GICD_ISPENDR U(0x200)
+#define GICD_ICPENDR U(0x280)
+#define GICD_ISACTIVER U(0x300)
+#define GICD_ICACTIVER U(0x380)
+#define GICD_IPRIORITYR U(0x400)
+#define GICD_ITARGETSR U(0x800)
+#define GICD_ICFGR U(0xc00)
+#define GICD_NSACR U(0xe00)
+
+/* GICD_CTLR bit definitions */
+#define CTLR_ENABLE_G0_SHIFT 0
+#define CTLR_ENABLE_G0_MASK U(0x1)
+#define CTLR_ENABLE_G0_BIT BIT_32(CTLR_ENABLE_G0_SHIFT)
+#define CTLR_ENABLE_G1_SHIFT 1
+#define CTLR_ENABLE_G1_MASK U(0x1)
+#define CTLR_ENABLE_G1_BIT BIT_32(CTLR_ENABLE_G1_SHIFT)
+
+/*******************************************************************************
+ * GICv2 specific CPU interface register offsets and constants.
+ ******************************************************************************/
+/* Physical CPU Interface registers */
+#define GICC_CTLR U(0x0)
+#define GICC_PMR U(0x4)
+#define GICC_BPR U(0x8)
+#define GICC_IAR U(0xC)
+#define GICC_EOIR U(0x10)
+#define GICC_RPR U(0x14)
+#define GICC_HPPIR U(0x18)
+#define GICC_AHPPIR U(0x28)
+#define GICC_IIDR U(0xFC)
+#define GICC_DIR U(0x1000)
+#define GICC_PRIODROP GICC_EOIR
+
+/* Common CPU Interface definitions */
+#define INT_ID_MASK U(0x3ff)
+#define INT_ID(n) (n & INT_ID_MASK)
+
+/* GICC_CTLR bit definitions */
+#define EOI_MODE_NS (U(1) << 10)
+#define EOI_MODE_S (U(1) << 9)
+#define IRQ_BYP_DIS_GRP1 (U(1) << 8)
+#define FIQ_BYP_DIS_GRP1 (U(1) << 7)
+#define IRQ_BYP_DIS_GRP0 (U(1) << 6)
+#define FIQ_BYP_DIS_GRP0 (U(1) << 5)
+#define CBPR (U(1) << 4)
+#define FIQ_EN (U(1) << 3)
+#define ACK_CTL (U(1) << 2)
+#define ENABLE_G1 (U(1) << 1)
+#define ENABLE_G0 (U(1) << 0)
+#define FIQ_EN_SHIFT 3
+#define FIQ_EN_BIT BIT_32(FIQ_EN_SHIFT)
+
+/*******************************************************************************
+ * GIC Distributor interface register constants that are common to GICv3 & GICv2
+ ******************************************************************************/
+#define PIDR2_ARCH_REV_SHIFT 4
+#define PIDR2_ARCH_REV_MASK U(0xf)
+
+/* GICv3 revision as reported by the PIDR2 register */
+#define ARCH_REV_GICV3 U(0x3)
+/* GICv2 revision as reported by the PIDR2 register */
+#define ARCH_REV_GICV2 U(0x2)
+/* GICv1 revision as reported by the PIDR2 register */
+#define ARCH_REV_GICV1 U(0x1)
+
+#define IGROUPR_SHIFT 5
+#define ISENABLER_SHIFT 5
+#define ICENABLER_SHIFT ISENABLER_SHIFT
+#define ISPENDR_SHIFT 5
+#define ICPENDR_SHIFT ISPENDR_SHIFT
+#define ISACTIVER_SHIFT 5
+#define ICACTIVER_SHIFT ISACTIVER_SHIFT
+#define IPRIORITYR_SHIFT 2
+#define ITARGETSR_SHIFT 2
+#define ICFGR_SHIFT 4
+#define NSACR_SHIFT 4
+
+/* GIC */
+#define RCAR_GICD_BASE U(0xF1010000)
+#define RCAR_GICR_BASE U(0xF1010000)
+#define RCAR_GICC_BASE U(0xF1020000)
+#define RCAR_GICH_BASE U(0xF1040000)
+#define RCAR_GICV_BASE U(0xF1060000)
+
+void gic_init(void);
+int arm_gic_init(const struct fwk_arch_interrupt_driver **driver);
+void irq_global(uint32_t iid);
+
+#endif /* ARMV8A_GIC_H */
diff --git a/arch/arm/armv8-a/include/arch_helpers.h b/arch/arm/armv8-a/include/arch_helpers.h
new file mode 100644
index 00000000..bed0a0d2
--- /dev/null
+++ b/arch/arm/armv8-a/include/arch_helpers.h
@@ -0,0 +1,552 @@
+/*
+ * Arm SCP/MCP Software
+ * Copyright (c) 2013-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_HELPERS_H
+#define ARCH_HELPERS_H
+
+#include <arch.h>
+
+#include <fwk_noreturn.h>
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+/**********************************************************************
+ * Macros which create inline functions to read or write CPU system
+ * registers
+ *********************************************************************/
+
+#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name) \
+ static inline unsigned long read_##_name(void) \
+ { \
+ unsigned long v; \
+ __asm__ volatile("mrs %0, " #_reg_name : "=r"(v)); \
+ return v; \
+ }
+
+#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name) \
+ static inline void write_##_name(unsigned long v) \
+ { \
+ __asm__ volatile("msr " #_reg_name ", %0" : : "r"(v)); \
+ }
+
+#define SYSREG_WRITE_CONST(reg_name, v) \
+ __asm__ volatile("msr " #reg_name ", %0" : : "i"(v))
+
+/* Define read function for system register */
+#define DEFINE_SYSREG_READ_FUNC(_name) _DEFINE_SYSREG_READ_FUNC(_name, _name)
+
+/* Define read & write function for system register */
+#define DEFINE_SYSREG_RW_FUNCS(_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _name)
+
+/* Define read & write function for renamed system register */
+#define DEFINE_RENAME_SYSREG_RW_FUNCS(_name, _reg_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _reg_name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
+
+/* Define read function for renamed system register */
+#define DEFINE_RENAME_SYSREG_READ_FUNC(_name, _reg_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)
+
+/* Define write function for renamed system register */
+#define DEFINE_RENAME_SYSREG_WRITE_FUNC(_name, _reg_name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
+
+/**********************************************************************
+ * Macros to create inline functions for system instructions
+ *********************************************************************/
+
+/* Define function for simple system instruction */
+#define DEFINE_SYSOP_FUNC(_op) \
+ static inline void _op(void) \
+ { \
+ __asm__(#_op); \
+ }
+
+/* Define function for system instruction with type specifier */
+#define DEFINE_SYSOP_TYPE_FUNC(_op, _type) \
+ static inline void _op##_type(void) \
+ { \
+ __asm__(#_op " " #_type); \
+ }
+
+/* Define function for system instruction with register parameter */
+#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type) \
+ static inline void _op##_type(uint64_t v) \
+ { \
+ __asm__(#_op " " #_type ", %0" : : "r"(v)); \
+ }
+
+/*******************************************************************************
+ * TLB maintenance accessor prototypes
+ ******************************************************************************/
+
+#if ERRATA_A57_813419
+/*
+ * Define function for TLBI instruction with type specifier that implements
+ * the workaround for errata 813419 of Cortex-A57.
+ */
+# define DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(_type) \
+ static inline void tlbi##_type(void) \
+ { \
+ __asm__("tlbi " #_type \
+ "\n" \
+ "dsb ish\n" \
+ "tlbi " #_type); \
+ }
+
+/*
+ * Define function for TLBI instruction with register parameter that implements
+ * the workaround for errata 813419 of Cortex-A57.
+ */
+# define DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(_type) \
+ static inline void tlbi##_type(uint64_t v) \
+ { \
+ __asm__("tlbi " #_type \
+ ", %0\n" \
+ "dsb ish\n" \
+ "tlbi " #_type ", %0" \
+ : \
+ : "r"(v)); \
+ }
+#endif /* ERRATA_A57_813419 */
+
+#if ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319
+/*
+ * Define function for DC instruction with register parameter that enables
+ * the workaround for errata 819472, 824069 and 827319 of Cortex-A53.
+ */
+# define DEFINE_DCOP_ERRATA_A53_TYPE_PARAM_FUNC(_name, _type) \
+ static inline void dc##_name(uint64_t v) \
+ { \
+ __asm__("dc " #_type ", %0" : : "r"(v)); \
+ }
+#endif /* ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319 */
+
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2is)
+#if ERRATA_A57_813419
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(alle3)
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(alle3is)
+#else
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3is)
+#endif
+DEFINE_SYSOP_TYPE_FUNC(tlbi, vmalle1)
+
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaae1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaale1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae2is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale2is)
+#if ERRATA_A57_813419
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(vae3is)
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(vale3is)
+#else
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae3is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale3is)
+#endif
+
+/*******************************************************************************
+ * Cache maintenance accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, isw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cisw)
+#if ERRATA_A53_827319
+DEFINE_DCOP_ERRATA_A53_TYPE_PARAM_FUNC(csw, cisw)
+#else
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, csw)
+#endif
+#if ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319
+DEFINE_DCOP_ERRATA_A53_TYPE_PARAM_FUNC(cvac, civac)
+#else
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvac)
+#endif
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, ivac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, civac)
+#if ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319
+DEFINE_DCOP_ERRATA_A53_TYPE_PARAM_FUNC(cvau, civac)
+#else
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvau)
+#endif
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, zva)
+
+/*******************************************************************************
+ * Address translation accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1w)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0w)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e1r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e2r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e3r)
+
+void flush_dcache_range(uintptr_t addr, size_t size);
+void clean_dcache_range(uintptr_t addr, size_t size);
+void inv_dcache_range(uintptr_t addr, size_t size);
+
+void dcsw_op_louis(unsigned long op_type);
+void dcsw_op_all(unsigned long op_type);
+
+void disable_mmu_el1(void);
+void disable_mmu_el3(void);
+void disable_mmu_icache_el1(void);
+void disable_mmu_icache_el3(void);
+
+/*******************************************************************************
+ * Misc. accessor prototypes
+ ******************************************************************************/
+
+#define write_daifclr(val) SYSREG_WRITE_CONST(daifclr, val)
+#define write_daifset(val) SYSREG_WRITE_CONST(daifset, val)
+
+DEFINE_SYSREG_RW_FUNCS(par_el1)
+DEFINE_SYSREG_READ_FUNC(id_pfr1_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64isar1_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64pfr0_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64pfr1_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64dfr0_el1)
+DEFINE_SYSREG_READ_FUNC(id_afr0_el1)
+DEFINE_SYSREG_READ_FUNC(CurrentEl)
+DEFINE_SYSREG_READ_FUNC(ctr_el0)
+DEFINE_SYSREG_RW_FUNCS(daif)
+DEFINE_SYSREG_RW_FUNCS(spsr_el1)
+DEFINE_SYSREG_RW_FUNCS(spsr_el2)
+DEFINE_SYSREG_RW_FUNCS(spsr_el3)
+DEFINE_SYSREG_RW_FUNCS(elr_el1)
+DEFINE_SYSREG_RW_FUNCS(elr_el2)
+DEFINE_SYSREG_RW_FUNCS(elr_el3)
+
+DEFINE_SYSOP_FUNC(wfi)
+DEFINE_SYSOP_FUNC(wfe)
+DEFINE_SYSOP_FUNC(sev)
+DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, st)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, nsh)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, oshld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, oshst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, osh)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nshld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nshst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nsh)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
+DEFINE_SYSOP_FUNC(isb)
+
+static inline void enable_irq(void)
+{
+ /*
+ * The compiler memory barrier will prevent the compiler from
+ * scheduling non-volatile memory access after the write to the
+ * register.
+ *
+ * This could happen if some initialization code issues non-volatile
+ * accesses to an area used by an interrupt handler, in the assumption
+ * that it is safe as the interrupts are disabled at the time it does
+ * that (according to program order). However, non-volatile accesses
+ * are not necessarily in program order relatively with volatile inline
+ * assembly statements (and volatile accesses).
+ */
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_IRQ_BIT);
+ isb();
+}
+
+static inline void enable_fiq(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_FIQ_BIT);
+ isb();
+}
+
+static inline void enable_serror(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_ABT_BIT);
+ isb();
+}
+
+static inline void enable_debug_exceptions(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_DBG_BIT);
+ isb();
+}
+
+static inline void disable_irq(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_IRQ_BIT);
+ isb();
+}
+
+static inline void disable_fiq(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_FIQ_BIT);
+ isb();
+}
+
+static inline void disable_serror(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_ABT_BIT);
+ isb();
+}
+
+static inline void disable_debug_exceptions(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_DBG_BIT);
+ isb();
+}
+
+#if !ERROR_DEPRECATED
+uint32_t get_afflvl_shift(uint32_t);
+uint32_t mpidr_mask_lower_afflvls(uint64_t, uint32_t);
+
+noreturn void eret(
+ uint64_t x0,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ uint64_t x5,
+ uint64_t x6,
+ uint64_t x7);
+#endif
+noreturn void smc(
+ uint64_t x0,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ uint64_t x5,
+ uint64_t x6,
+ uint64_t x7);
+
+/*******************************************************************************
+ * System register accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSREG_READ_FUNC(midr_el1)
+DEFINE_SYSREG_READ_FUNC(mpidr_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64mmfr0_el1)
+
+DEFINE_SYSREG_RW_FUNCS(scr_el3)
+DEFINE_SYSREG_RW_FUNCS(hcr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(vbar_el1)
+DEFINE_SYSREG_RW_FUNCS(vbar_el2)
+DEFINE_SYSREG_RW_FUNCS(vbar_el3)
+
+DEFINE_SYSREG_RW_FUNCS(sctlr_el1)
+DEFINE_SYSREG_RW_FUNCS(sctlr_el2)
+DEFINE_SYSREG_RW_FUNCS(sctlr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(actlr_el1)
+DEFINE_SYSREG_RW_FUNCS(actlr_el2)
+DEFINE_SYSREG_RW_FUNCS(actlr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(esr_el1)
+DEFINE_SYSREG_RW_FUNCS(esr_el2)
+DEFINE_SYSREG_RW_FUNCS(esr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(afsr0_el1)
+DEFINE_SYSREG_RW_FUNCS(afsr0_el2)
+DEFINE_SYSREG_RW_FUNCS(afsr0_el3)
+
+DEFINE_SYSREG_RW_FUNCS(afsr1_el1)
+DEFINE_SYSREG_RW_FUNCS(afsr1_el2)
+DEFINE_SYSREG_RW_FUNCS(afsr1_el3)
+
+DEFINE_SYSREG_RW_FUNCS(far_el1)
+DEFINE_SYSREG_RW_FUNCS(far_el2)
+DEFINE_SYSREG_RW_FUNCS(far_el3)
+
+DEFINE_SYSREG_RW_FUNCS(mair_el1)
+DEFINE_SYSREG_RW_FUNCS(mair_el2)
+DEFINE_SYSREG_RW_FUNCS(mair_el3)
+
+DEFINE_SYSREG_RW_FUNCS(amair_el1)
+DEFINE_SYSREG_RW_FUNCS(amair_el2)
+DEFINE_SYSREG_RW_FUNCS(amair_el3)
+
+DEFINE_SYSREG_READ_FUNC(rvbar_el1)
+DEFINE_SYSREG_READ_FUNC(rvbar_el2)
+DEFINE_SYSREG_READ_FUNC(rvbar_el3)
+
+DEFINE_SYSREG_RW_FUNCS(rmr_el1)
+DEFINE_SYSREG_RW_FUNCS(rmr_el2)
+DEFINE_SYSREG_RW_FUNCS(rmr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(tcr_el1)
+DEFINE_SYSREG_RW_FUNCS(tcr_el2)
+DEFINE_SYSREG_RW_FUNCS(tcr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el1)
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el2)
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el3)
+
+DEFINE_SYSREG_RW_FUNCS(ttbr1_el1)
+
+DEFINE_SYSREG_RW_FUNCS(vttbr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(cptr_el2)
+DEFINE_SYSREG_RW_FUNCS(cptr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(cpacr_el1)
+DEFINE_SYSREG_RW_FUNCS(cntfrq_el0)
+DEFINE_SYSREG_RW_FUNCS(cnthp_ctl_el2)
+DEFINE_SYSREG_RW_FUNCS(cnthp_tval_el2)
+DEFINE_SYSREG_RW_FUNCS(cnthp_cval_el2)
+DEFINE_SYSREG_RW_FUNCS(cntps_ctl_el1)
+DEFINE_SYSREG_RW_FUNCS(cntps_tval_el1)
+DEFINE_SYSREG_RW_FUNCS(cntps_cval_el1)
+DEFINE_SYSREG_RW_FUNCS(cntp_ctl_el0)
+DEFINE_SYSREG_RW_FUNCS(cntp_tval_el0)
+DEFINE_SYSREG_RW_FUNCS(cntp_cval_el0)
+DEFINE_SYSREG_READ_FUNC(cntpct_el0)
+DEFINE_SYSREG_RW_FUNCS(cnthctl_el2)
+
+#define get_cntp_ctl_enable(x) \
+ (((x) >> CNTP_CTL_ENABLE_SHIFT) & CNTP_CTL_ENABLE_MASK)
+#define get_cntp_ctl_imask(x) \
+ (((x) >> CNTP_CTL_IMASK_SHIFT) & CNTP_CTL_IMASK_MASK)
+#define get_cntp_ctl_istatus(x) \
+ (((x) >> CNTP_CTL_ISTATUS_SHIFT) & CNTP_CTL_ISTATUS_MASK)
+
+#define set_cntp_ctl_enable(x) ((x) |= (U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define set_cntp_ctl_imask(x) ((x) |= (U(1) << CNTP_CTL_IMASK_SHIFT))
+
+#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
+
+DEFINE_SYSREG_RW_FUNCS(tpidr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(cntvoff_el2)
+
+DEFINE_SYSREG_RW_FUNCS(vpidr_el2)
+DEFINE_SYSREG_RW_FUNCS(vmpidr_el2)
+
+DEFINE_SYSREG_READ_FUNC(isr_el1)
+
+DEFINE_SYSREG_RW_FUNCS(mdcr_el2)
+DEFINE_SYSREG_RW_FUNCS(mdcr_el3)
+DEFINE_SYSREG_RW_FUNCS(hstr_el2)
+DEFINE_SYSREG_RW_FUNCS(pmcr_el0)
+
+/* GICv3 System Registers */
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el1, ICC_SRE_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el2, ICC_SRE_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el3, ICC_SRE_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_pmr_el1, ICC_PMR_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_rpr_el1, ICC_RPR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen1_el3, ICC_IGRPEN1_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen1_el1, ICC_IGRPEN1_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen0_el1, ICC_IGRPEN0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_hppir0_el1, ICC_HPPIR0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_hppir1_el1, ICC_HPPIR1_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_iar0_el1, ICC_IAR0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_iar1_el1, ICC_IAR1_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir0_el1, ICC_EOIR0_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sgi1r, ICC_SGI1R)
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcgcr_el0, AMCGCR_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr0_el0, AMCNTENCLR0_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset1_el0, AMCNTENSET1_EL0)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(mpamidr_el1, MPAMIDR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(mpam3_el3, MPAM3_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(mpam2_el2, MPAM2_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(mpamhcr_el2, MPAMHCR_EL2)
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
+
+DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el2, ZCR_EL2)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(erridr_el1, ERRIDR_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(errselr_el1, ERRSELR_EL1)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(erxfr_el1, ERXFR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxctlr_el1, ERXCTLR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxstatus_el1, ERXSTATUS_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxaddr_el1, ERXADDR_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc0_el1, ERXMISC0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc1_el1, ERXMISC1_EL1)
+
+/* Armv8.2 Registers */
+DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64mmfr2_el1, ID_AA64MMFR2_EL1)
+
+/* Armv8.3 Pointer Authentication Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(apiakeyhi_el1, APIAKeyHi_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(apiakeylo_el1, APIAKeyLo_EL1)
+
+#define IS_IN_EL(x) (GET_EL(read_CurrentEl()) == MODE_EL##x)
+
+#define IS_IN_EL1() IS_IN_EL(1)
+#define IS_IN_EL2() IS_IN_EL(2)
+#define IS_IN_EL3() IS_IN_EL(3)
+
+static inline unsigned int get_current_el(void)
+{
+ return GET_EL(read_CurrentEl());
+}
+
+/*
+ * Check if an EL is implemented from AA64PFR0 register fields.
+ */
+static inline uint64_t el_implemented(unsigned int el)
+{
+ if (el > 3U) {
+ return EL_IMPL_NONE;
+ } else {
+ unsigned int shift = ID_AA64PFR0_EL1_SHIFT * el;
+
+ return (read_id_aa64pfr0_el1() >> shift) & ID_AA64PFR0_ELX_MASK;
+ }
+}
+
+#if !ERROR_DEPRECATED
+# define EL_IMPLEMENTED(_el) el_implemented(_el)
+#endif
+
+/* Previously defined accesor functions with incomplete register names */
+
+#define read_current_el() read_CurrentEl()
+
+#define dsb() dsbsy()
+
+#define read_midr() read_midr_el1()
+
+#define read_mpidr() read_mpidr_el1()
+
+#define read_scr() read_scr_el3()
+#define write_scr(_v) write_scr_el3(_v)
+
+#define read_hcr() read_hcr_el2()
+#define write_hcr(_v) write_hcr_el2(_v)
+
+#define read_cpacr() read_cpacr_el1()
+#define write_cpacr(_v) write_cpacr_el1(_v)
+
+#endif /* ARCH_HELPERS_H */
diff --git a/arch/arm/armv8-a/include/arch_system.h b/arch/arm/armv8-a/include/arch_system.h
new file mode 100644
index 00000000..c2ab70d8
--- /dev/null
+++ b/arch/arm/armv8-a/include/arch_system.h
@@ -0,0 +1,25 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARMV8A_SYSTEM_H
+#define ARMV8A_SYSTEM_H
+
+#define R_WARMBOOT (0xAA55AA55)
+#define R_SUSPEND (0x55AA55AA)
+#define R_RESET (0x5555AAAA)
+#define R_OFF (0xAAAA5555)
+#define R_CLEAR (0)
+
+#ifdef __ASSEMBLY__
+.extern _boot_flag.extern _shutdown_request
+#else
+extern volatile uint32_t _boot_flag;
+extern volatile uint32_t _shutdown_request;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* ARMV8A_SYSTEM_H */
diff --git a/arch/arm/armv8-a/include/asm_macros.S b/arch/arm/armv8-a/include/asm_macros.S
new file mode 100644
index 00000000..bd961c44
--- /dev/null
+++ b/arch/arm/armv8-a/include/asm_macros.S
@@ -0,0 +1,194 @@
+/*
+ * Arm SCP/MCP Software
+ * Copyright (c) 2013-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef ASM_MACROS_S
+#define ASM_MACROS_S
+
+#include <common/asm_macros_common.S>
+
+/*
+ * TLBI instruction with type specifier that implements the workaround for
+ * errata 813419 of Cortex-A57.
+ */
+#if ERRATA_A57_813419
+#define TLB_INVALIDATE(_type) \
+ tlbi _type; \
+ dsb ish; \
+ tlbi _type
+#else
+#define TLB_INVALIDATE(_type) \
+ tlbi _type
+#endif
+
+
+ .macro func_prologue
+ stp x29, x30, [sp, #-0x10]!
+ mov x29,sp
+ .endm
+
+ .macro func_epilogue
+ ldp x29, x30, [sp], #0x10
+ .endm
+
+
+ .macro dcache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ ubfx \tmp, \tmp, #16, #4
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+
+ .macro icache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ and \tmp, \tmp, #0xf
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+
+ .macro smc_check label
+ mrs x0, esr_el3
+ ubfx x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+ cmp x0, #EC_AARCH64_SMC
+ b.ne $label
+ .endm
+
+ /*
+ * Declare the exception vector table, enforcing it is aligned on a
+ * 2KB boundary, as required by the ARMv8 architecture.
+ * Use zero bytes as the fill value to be stored in the padding bytes
+ * so that it inserts illegal AArch64 instructions. This increases
+ * security, robustness and potentially facilitates debugging.
+ */
+ .macro vector_base label, section_name=.vectors
+ .section \section_name, "ax"
+ .align 11, 0
+ \label:
+ .endm
+
+ /*
+ * Create an entry in the exception vector table, enforcing it is
+ * aligned on a 128-byte boundary, as required by the ARMv8 architecture.
+ * Use zero bytes as the fill value to be stored in the padding bytes
+ * so that it inserts illegal AArch64 instructions. This increases
+ * security, robustness and potentially facilitates debugging.
+ */
+ .macro vector_entry label, section_name=.vectors
+ .cfi_sections .debug_frame
+ .section \section_name, "ax"
+ .align 7, 0
+ .type \label, %function
+ .cfi_startproc
+ \label:
+ .endm
+
+ /*
+ * Add the bytes until fill the full exception vector, whose size is always
+ * 32 instructions. If there are more than 32 instructions in the
+ * exception vector then an error is emitted.
+ */
+ .macro end_vector_entry label
+ .cfi_endproc
+ .fill \label + (32 * 4) - .
+ .endm
+
+ /*
+ * This macro calculates the base address of the current CPU's MP stack
+ * using the plat_my_core_pos() index, the name of the stack storage
+ * and the size of each stack
+ * Out: X0 = physical address of stack base
+ * Clobber: X30, X1, X2
+ */
+ .macro get_my_mp_stack _name, _size
+ bl plat_my_core_pos
+ adrp x2, (\_name + \_size)
+ add x2, x2, :lo12:(\_name + \_size)
+ mov x1, #\_size
+ madd x0, x0, x1, x2
+ .endm
+
+ /*
+ * This macro calculates the base address of a UP stack using the
+ * name of the stack storage and the size of the stack
+ * Out: X0 = physical address of stack base
+ */
+ .macro get_up_stack _name, _size
+ adrp x0, (\_name + \_size)
+ add x0, x0, :lo12:(\_name + \_size)
+ .endm
+
+ /*
+ * Helper macro to generate the best mov/movk combinations according
+ * the value to be moved. The 16 bits from '_shift' are tested and
+ * if not zero, they are moved into '_reg' without affecting
+ * other bits.
+ */
+ .macro _mov_imm16 _reg, _val, _shift
+ .if (\_val >> \_shift) & 0xffff
+ .if (\_val & (1 << \_shift - 1))
+ movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
+ .else
+ mov \_reg, \_val & (0xffff << \_shift)
+ .endif
+ .endif
+ .endm
+
+ /*
+ * Helper macro to load arbitrary values into 32 or 64-bit registers
+ * which generates the best mov/movk combinations. Many base addresses
+ * are 64KB aligned the macro will eliminate updating bits 15:0 in
+ * that case
+ */
+ .macro mov_imm _reg, _val
+ .if (\_val) == 0
+ mov \_reg, #0
+ .else
+ _mov_imm16 \_reg, (\_val), 0
+ _mov_imm16 \_reg, (\_val), 16
+ _mov_imm16 \_reg, (\_val), 32
+ _mov_imm16 \_reg, (\_val), 48
+ .endif
+ .endm
+
+ /*
+ * Macro to mark instances where we're jumping to a function and don't
+ * expect a return. To provide the function being jumped to with
+ * additional information, we use 'bl' instruction to jump rather than
+ * 'b'.
+ *
+ * Debuggers infer the location of a call from where LR points to, which
+ * is usually the instruction after 'bl'. If this macro expansion
+ * happens to be the last location in a function, that'll cause the LR
+ * to point a location beyond the function, thereby misleading debugger
+ * back trace. We therefore insert a 'nop' after the function call for
+ * debug builds, unless 'skip_nop' parameter is non-zero.
+ */
+ .macro no_ret _func:req, skip_nop=0
+ bl \_func
+#if DEBUG
+ .ifeq \skip_nop
+ nop
+ .endif
+#endif
+ .endm
+
+ /*
+ * Reserve space for a spin lock in assembly file.
+ */
+ .macro define_asm_spinlock _name:req
+ .align SPINLOCK_ASM_ALIGN
+ \_name:
+ .space SPINLOCK_ASM_SIZE
+ .endm
+
+#if RAS_EXTENSION
+ .macro esb
+ .inst 0xd503221f
+ .endm
+#endif
+
+#endif /* ASM_MACROS_S */
diff --git a/arch/arm/armv8-a/include/assert_macros.S b/arch/arm/armv8-a/include/assert_macros.S
new file mode 100644
index 00000000..800f2674
--- /dev/null
+++ b/arch/arm/armv8-a/include/assert_macros.S
@@ -0,0 +1,30 @@
+/*
+ * Arm SCP/MCP Software
+ * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef ASSERT_MACROS_S
+#define ASSERT_MACROS_S
+
+ /*
+ * Assembler macro to enable asm_assert. Use this macro wherever
+ * assert is required in assembly. Please note that the macro makes
+ * use of label '300' to provide the logic and the caller
+ * should make sure that this label is not used to branch prior
+ * to calling this macro.
+ */
+#define ASM_ASSERT(_cc) \
+.ifndef .L_assert_filename ;\
+ .pushsection .rodata.str1.1, "aS" ;\
+ .L_assert_filename: ;\
+ .string __FILE__ ;\
+ .popsection ;\
+.endif ;\
+ b._cc 300f ;\
+ adr x0, .L_assert_filename ;\
+ mov x1, __LINE__ ;\
+ b asm_assert ;\
+300:
+
+#endif /* ASSERT_MACROS_S */
diff --git a/arch/arm/armv8-a/include/common/asm_macros_common.S b/arch/arm/armv8-a/include/common/asm_macros_common.S
new file mode 100644
index 00000000..ed2b7158
--- /dev/null
+++ b/arch/arm/armv8-a/include/common/asm_macros_common.S
@@ -0,0 +1,108 @@
+/*
+ * Arm SCP/MCP Software
+ * Copyright (c) 2013-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef ASM_MACROS_COMMON_S
+#define ASM_MACROS_COMMON_S
+
+ /*
+ * This macro is used to create a function label and place the
+ * code into a separate text section based on the function name
+ * to enable elimination of unused code during linking. It also adds
+ * basic debug information to enable call stack printing most of the
+ * time. The optional _align parameter can be used to force a
+ * non-standard alignment (indicated in powers of 2). The default is
+ * _align=2 because both Aarch32 and Aarch64 instructions must be
+ * word aligned. Do *not* try to use a raw .align directive. Since func
+ * switches to a new section, this would not have the desired effect.
+ */
+ .macro func _name, _align=2
+ /*
+ * Add Call Frame Information entry in the .debug_frame section for
+ * debugger consumption. This enables callstack printing in debuggers.
+ * This does not use any space in the final loaded binary, only in the
+ * ELF file.
+ * Note that a function manipulating the CFA pointer location (i.e. the
+ * x29 frame pointer on AArch64) should declare it using the
+ * appropriate .cfi* directives, or be prepared to have a degraded
+ * debugging experience.
+ */
+ .cfi_sections .debug_frame
+ .section .text.asm.\_name, "ax"
+ .type \_name, %function
+ /*
+ * .cfi_startproc and .cfi_endproc are needed to output entries in
+ * .debug_frame
+ */
+ .cfi_startproc
+ .align \_align
+ \_name:
+ .endm
+
+ /*
+ * This macro is used to mark the end of a function.
+ */
+ .macro endfunc _name
+ .cfi_endproc
+ .size \_name, . - \_name
+ .endm
+
+ /*
+ * Theses macros are used to create function labels for deprecated
+ * APIs. If ERROR_DEPRECATED is non zero, the callers of these APIs
+ * will fail to link and cause build failure.
+ */
+#if ERROR_DEPRECATED
+ .macro func_deprecated _name
+ func deprecated\_name
+ .endm
+
+ .macro endfunc_deprecated _name
+ endfunc deprecated\_name
+ .endm
+#else
+ .macro func_deprecated _name
+ func \_name
+ .endm
+
+ .macro endfunc_deprecated _name
+ endfunc \_name
+ .endm
+#endif
+
+ /*
+ * Helper assembler macro to count trailing zeros. The output is
+ * populated in the `TZ_COUNT` symbol.
+ */
+ .macro count_tz _value, _tz_count
+ .if \_value
+ count_tz "(\_value >> 1)", "(\_tz_count + 1)"
+ .else
+ .equ TZ_COUNT, (\_tz_count - 1)
+ .endif
+ .endm
+
+ /*
+ * This macro declares an array of 1 or more stacks, properly
+ * aligned and in the requested section
+ */
+#define DEFAULT_STACK_ALIGN (1 << 6) /* In case the caller doesnt provide alignment */
+
+ .macro declare_stack _name, _section, _size, _count, _align=DEFAULT_STACK_ALIGN
+ count_tz \_align, 0
+ .if (\_align - (1 << TZ_COUNT))
+ .error "Incorrect stack alignment specified (Must be a power of 2)."
+ .endif
+ .if ((\_size & ((1 << TZ_COUNT) - 1)) <> 0)
+ .error "Stack size not correctly aligned"
+ .endif
+ .section \_section, "aw", %nobits
+ .align TZ_COUNT
+ \_name:
+ .space ((\_count) * (\_size)), 0
+ .endm
+
+
+#endif /* ASM_MACROS_COMMON_S */
diff --git a/arch/arm/armv8-a/include/common/debug.h b/arch/arm/armv8-a/include/common/debug.h
new file mode 100644
index 00000000..50fb1713
--- /dev/null
+++ b/arch/arm/armv8-a/include/common/debug.h
@@ -0,0 +1,31 @@
+/*
+ * Arm SCP/MCP Software
+ * Copyright (c) 2013-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef DEBUG_H
+#define DEBUG_H
+
+#include <lib/utils_def.h>
+
+/*
+ * The log output macros print output to the console. These macros produce
+ * compiled log output only if the LOG_LEVEL defined in the makefile (or the
+ * make command line) is greater or equal than the level required for that
+ * type of log output.
+ *
+ * The format expected is the same as for printf(). For example:
+ * INFO("Info %s.\n", "message") -> INFO: Info message.
+ * WARN("Warning %s.\n", "message") -> WARNING: Warning message.
+ */
+
+#define LOG_LEVEL_NONE U(0)
+#define LOG_LEVEL_ERROR U(10)
+#define LOG_LEVEL_NOTICE U(20)
+#define LOG_LEVEL_WARNING U(30)
+#define LOG_LEVEL_INFO U(40)
+#define LOG_LEVEL_VERBOSE U(50)
+
+#endif /* DEBUG_H */
diff --git a/arch/arm/armv8-a/include/lib/mmio.h b/arch/arm/armv8-a/include/lib/mmio.h
new file mode 100644
index 00000000..331613e2
--- /dev/null
+++ b/arch/arm/armv8-a/include/lib/mmio.h
@@ -0,0 +1,79 @@
+/*
+ * Arm SCP/MCP Software
+ * Copyright (c) 2013-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MMIO_H
+#define MMIO_H
+
+#include <stdint.h>
+
+static inline void mmio_write_8(uintptr_t addr, uint8_t value)
+{
+ *(volatile uint8_t *)addr = value;
+}
+
+static inline uint8_t mmio_read_8(uintptr_t addr)
+{
+ return *(volatile uint8_t *)addr;
+}
+
+static inline void mmio_write_16(uintptr_t addr, uint16_t value)
+{
+ *(volatile uint16_t *)addr = value;
+}
+
+static inline uint16_t mmio_read_16(uintptr_t addr)
+{
+ return *(volatile uint16_t *)addr;
+}
+
+static inline void mmio_clrsetbits_16(
+ uintptr_t addr,
+ uint16_t clear,
+ uint16_t set)
+{
+ mmio_write_16(addr, (mmio_read_16(addr) & ~clear) | set);
+}
+
+static inline void mmio_write_32(uintptr_t addr, uint32_t value)
+{
+ *(volatile uint32_t *)addr = value;
+}
+
+static inline uint32_t mmio_read_32(uintptr_t addr)
+{
+ return *(volatile uint32_t *)addr;
+}
+
+static inline void mmio_write_64(uintptr_t addr, uint64_t value)
+{
+ *(volatile uint64_t *)addr = value;
+}
+
+static inline uint64_t mmio_read_64(uintptr_t addr)
+{
+ return *(volatile uint64_t *)addr;
+}
+
+static inline void mmio_clrbits_32(uintptr_t addr, uint32_t clear)
+{
+ mmio_write_32(addr, mmio_read_32(addr) & ~clear);
+}
+
+static inline void mmio_setbits_32(uintptr_t addr, uint32_t set)
+{
+ mmio_write_32(addr, mmio_read_32(addr) | set);
+}
+
+static inline void mmio_clrsetbits_32(
+ uintptr_t addr,
+ uint32_t clear,
+ uint32_t set)
+{
+ mmio_write_32(addr, (mmio_read_32(addr) & ~clear) | set);
+}
+
+#endif /* MMIO_H */
diff --git a/arch/arm/armv8-a/include/lib/utils_def.h b/arch/arm/armv8-a/include/lib/utils_def.h
new file mode 100644
index 00000000..7fe27952
--- /dev/null
+++ b/arch/arm/armv8-a/include/lib/utils_def.h
@@ -0,0 +1,170 @@
+/*
+ * Arm SCP/MCP Software
+ * Copyright (c) 2016-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef UTILS_DEF_H
+#define UTILS_DEF_H
+
+/* Compute the number of elements in the given array */
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+#define IS_POWER_OF_TWO(x) (((x) & ((x)-1)) == 0)
+
+#define SIZE_FROM_LOG2_WORDS(n) (4 << (n))
+
+#define BIT_32(nr) (U(1) << (nr))
+#define BIT_64(nr) (ULL(1) << (nr))
+
+#ifdef AARCH32
+# define BIT BIT_32
+#else
+# define BIT BIT_64
+#endif
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#if defined(__LINKER__) || defined(__ASSEMBLY__)
+# define GENMASK_32(h, l) \
+ (((0xFFFFFFFF) << (l)) & (0xFFFFFFFF >> (32 - 1 - (h))))
+
+# define GENMASK_64(h, l) ((~0 << (l)) & (~0 >> (64 - 1 - (h))))
+#else
+# define GENMASK_32(h, l) \
+ (((~UINT32_C(0)) << (l)) & (~UINT32_C(0) >> (32 - 1 - (h))))
+
+# define GENMASK_64(h, l) \
+ (((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
+#endif
+
+#ifdef AARCH32
+# define GENMASK GENMASK_32
+#else
+# define GENMASK GENMASK_64
+#endif
+
+/*
+ * This variant of div_round_up can be used in macro definition but should not
+ * be used in C code as the `div` parameter is evaluated twice.
+ */
+#define DIV_ROUND_UP_2EVAL(n, d) (((n) + (d)-1) / (d))
+
+#define div_round_up(val, div) \
+ __extension__({ \
+ __typeof__(div) _div = (div); \
+ ((val) + _div - (__typeof__(div))1) / _div; \
+ })
+
+#define MIN(x, y) \
+ __extension__({ \
+ __typeof__(x) _x = (x); \
+ __typeof__(y) _y = (y); \
+ (void)(&_x == &_y); \
+ _x < _y ? _x : _y; \
+ })
+
+#define MAX(x, y) \
+ __extension__({ \
+ __typeof__(x) _x = (x); \
+ __typeof__(y) _y = (y); \
+ (void)(&_x == &_y); \
+ _x > _y ? _x : _y; \
+ })
+
+/*
+ * The round_up() macro rounds up a value to the given boundary in a
+ * type-agnostic yet type-safe manner. The boundary must be a power of two.
+ * In other words, it computes the smallest multiple of boundary which is
+ * greater than or equal to value.
+ *
+ * round_down() is similar but rounds the value down instead.
+ */
+#define round_boundary(value, boundary) ((__typeof__(value))((boundary)-1))
+
+#define round_up(value, boundary) \
+ ((((value)-1) | round_boundary(value, boundary)) + 1)
+
+#define round_down(value, boundary) ((value) & ~round_boundary(value, boundary))
+
+/*
+ * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
+ * Both arguments must be unsigned pointer values (i.e. uintptr_t).
+ */
+#define check_uptr_overflow(_ptr, _inc) ((_ptr) > (UINTPTR_MAX - (_inc)))
+
+/*
+ * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
+ * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
+ */
+#define check_u32_overflow(_u32, _inc) ((_u32) > (UINT32_MAX - (_inc)))
+
+/*
+ * For those constants to be shared between C and other sources, apply a 'U',
+ * 'UL', 'ULL', 'L' or 'LL' suffix to the argument only in C, to avoid
+ * undefined or unintended behaviour.
+ *
+ * The GNU assembler and linker do not support these suffixes (it causes the
+ * build process to fail) therefore the suffix is omitted when used in linker
+ * scripts and assembler files.
+ */
+#if defined(__LINKER__) || defined(__ASSEMBLY__)
+# define U(_x) (_x)
+# define UL(_x) (_x)
+# define ULL(_x) (_x)
+# define L(_x) (_x)
+# define LL(_x) (_x)
+#else
+# define U(_x) (_x##U)
+# define UL(_x) (_x##UL)
+# define ULL(_x) (_x##ULL)
+# define L(_x) (_x##L)
+# define LL(_x) (_x##LL)
+#endif
+
+/* Register size of the current architecture. */
+#ifdef AARCH32
+# define REGSZ U(4)
+#else
+# define REGSZ U(8)
+#endif
+
+/*
+ * Test for the current architecture version to be at least the version
+ * expected.
+ */
+#define ARM_ARCH_AT_LEAST(_maj, _min) \
+ ((ARM_ARCH_MAJOR > (_maj)) || \
+ ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
+
+/*
+ * Import an assembly or linker symbol as a C expression with the specified
+ * type
+ */
+#define IMPORT_SYM(type, sym, name) \
+ extern char sym[]; \
+ static const __attribute__((unused)) type name = (type)sym;
+
+/*
+ * When the symbol is used to hold a pointer, its alignment can be asserted
+ * with this macro. For example, if there is a linker symbol that is going to
+ * be used as a 64-bit pointer, the value of the linker symbol must also be
+ * aligned to 64 bit. This macro makes sure this is the case.
+ */
+#define ASSERT_SYM_PTR_ALIGN(sym) \
+ assert(((size_t)(sym) % __alignof__(*(sym))) == 0)
+
+#define COMPILER_BARRIER() __asm__ volatile("" ::: "memory")
+
+/* Compiler builtin of GCC >= 9 and planned in llvm */
+#ifdef __HAVE_SPECULATION_SAFE_VALUE
+# define SPECULATION_SAFE_VALUE(var) __builtin_speculation_safe_value(var)
+#else
+# define SPECULATION_SAFE_VALUE(var) var
+#endif
+
+#endif /* UTILS_DEF_H */
diff --git a/arch/arm/armv8-a/src/arch.ld.S b/arch/arm/armv8-a/src/arch.ld.S
new file mode 100644
index 00000000..56d17239
--- /dev/null
+++ b/arch/arm/armv8-a/src/arch.ld.S
@@ -0,0 +1,218 @@
+/*
+ * Arm SCP/MCP Software
+ * Copyright (c) 2015-2020, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * There are three supported memory layouts for the ARMv7-M architecture:
+ *
+ * Layout 1 - Single region:
+ * - All sections are placed in one contiguous region.
+ * - This layout uses only the mem0 memory region.
+ * - The memory is considered RXW by the linker, but the sections can be
+ * configured later on with different attributes using the MPU.
+ * - The main stack is placed at the end of mem0.
+ * - This layout is mainly used by second-stage firmware that is loaded directly
+ * into a single RAM.
+ *
+ * Layout 2 - Dual region with relocation:
+ * - One region is used for .text and .data (storage).
+ * - A second region is used for the remaining sections.
+ * - This layout uses memory regions mem0 and mem1 as the first and second
+ * regions, respectively.
+ * - The main stack is placed at the end of mem1.
+ * - This layout is mainly used by ROM firmware which uses part of the RAM for
+ * the data sections.
+ *
+ * Layout 3 - Dual region without relocation
+ * - One region is used only for the .text section.
+ * - A second region is used for all data sections.
+ * - This layout uses memory regions mem0 and mem1 as the first and second
+ * regions, respectively.
+ * - The main stack is placed at the end of mem1.
+ * - The main difference from layout 2 is that there is no relocation of the
+ * .data section.
+ * - This layout is mainly used by second-stage firmware loaded directly into
+ * two RAM regions. One of the RAM regions is attached to the instruction bus,
+ * which improves the performance as data and instruction accesses are
+ * independent.
+ *
+ */
+
+#define ARCH_MEM_MODE_SINGLE_REGION 0
+#define ARCH_MEM_MODE_DUAL_REGION_RELOCATION 1
+#define ARCH_MEM_MODE_DUAL_REGION_NO_RELOCATION 2
+
+#include <fmw_memory.h>
+
+#define STACK_ALIGNMENT 8
+
+/*
+ * Input validation
+ */
+
+#ifndef FMW_MEM_MODE
+ #error "FMW_MEM_MODE has not been configured"
+#endif
+
+#ifndef FMW_STACK_SIZE
+ #error "FMW_STACK_SIZE has not been configured"
+#endif
+
+#ifndef FMW_MEM0_BASE
+ #error "FMW_MEM0_BASE has not been configured"
+#endif
+
+#ifndef FMW_MEM0_SIZE
+ #error "FMW_MEM0_SIZE has not been configured"
+#endif
+
+#if ((FMW_MEM_MODE != ARCH_MEM_MODE_SINGLE_REGION) && \
+ (FMW_MEM_MODE != ARCH_MEM_MODE_DUAL_REGION_RELOCATION) && \
+ (FMW_MEM_MODE != ARCH_MEM_MODE_DUAL_REGION_NO_RELOCATION))
+ #error "FMW_MEM_MODE has been configured improperly"
+#endif
+
+#if FMW_MEM_MODE != ARCH_MEM_MODE_SINGLE_REGION
+ #ifndef FIRMWARE_MEM1_BASE
+ #error "FIRMWARE_MEM1_BASE has not been configured"
+ #endif
+
+ #ifndef FIRMWARE_MEM1_SIZE
+ #error "FIRMWARE_MEM1_SIZE has not been configured"
+ #endif
+#endif
+
+/*
+ * Calculate stack region in the data memory.
+ */
+
+#if FMW_MEM_MODE == ARCH_MEM_MODE_SINGLE_REGION
+ ASSERT(FMW_STACK_SIZE < FMW_MEM0_SIZE,
+ "FMW_STACK_SIZE does not fit in MEM0")
+ #define UNALIGNED_STACK_BASE \
+ (FMW_MEM0_BASE + FMW_MEM0_SIZE - FMW_STACK_SIZE)
+#else
+ ASSERT(FMW_STACK_SIZE < FIRMWARE_MEM1_SIZE,
+ "FMW_STACK_SIZE does not fit in MEM1")
+ #define UNALIGNED_STACK_BASE \
+ (FIRMWARE_MEM1_BASE + FIRMWARE_MEM1_SIZE - FMW_STACK_SIZE)
+#endif
+
+#define STACK_BASE \
+ ( \
+ ((UNALIGNED_STACK_BASE + STACK_ALIGNMENT - 1) / STACK_ALIGNMENT) \
+ * STACK_ALIGNMENT \
+ )
+
+#define STACK_SIZE \
+ (( \
+ ((STACK_BASE + FMW_STACK_SIZE) / STACK_ALIGNMENT) \
+ * STACK_ALIGNMENT \
+ ) - STACK_BASE)
+
+ASSERT(STACK_SIZE > 0, "FMW_STACK_SIZE is too small")
+
+ENTRY(_entrypoint)
+
+MEMORY {
+#if FMW_MEM_MODE == ARCH_MEM_MODE_SINGLE_REGION
+ /* Only one memory region with read, execute and write attributes */
+ mem0 (rxw): ORIGIN = FMW_MEM0_BASE, LENGTH = FMW_MEM0_SIZE - \
+ FMW_STACK_SIZE
+#else
+ mem0 (rx): ORIGIN = FMW_MEM0_BASE, LENGTH = FMW_MEM0_SIZE
+ mem1 (rxw): ORIGIN = FIRMWARE_MEM1_BASE, LENGTH = FIRMWARE_MEM1_SIZE - \
+ FMW_STACK_SIZE
+#endif
+ stack (rw): ORIGIN = STACK_BASE, LENGTH = STACK_SIZE
+ sram (rxw): ORIGIN = ((0xE6302000)), LENGTH = (0x00001000)
+}
+
+SECTIONS {
+ .text : {
+ *(.vectors)
+ *(.entrypoint)
+ *(.text*)
+ *(.rodata*)
+ *(.note.gnu.build-id)
+ } > mem0
+
+ __text__ = .;
+
+ __sram_copy_start__ = .;
+ .system_ram : {
+ __system_ram_start__ = .;
+ *(.system_ram*)
+ *iic_dvfs.o(.rodata)
+ __system_ram_end__ = .;
+ } > sram AT> mem0
+
+ . = __text__ + SIZEOF(.system_ram);
+
+ /*
+ * Define a linker symbol to mark start of the RW memory area for this
+ * image.
+ */
+ __RW_START__ = . ;
+
+ .data : {
+ . = ALIGN(4);
+ *(.data*)
+ . = ALIGN(4);
+#if FMW_MEM_MODE == ARCH_MEM_MODE_SINGLE_REGION
+ } > mem0 /* .data follows .text in mem0 */
+#elif FMW_MEM_MODE == ARCH_MEM_MODE_DUAL_REGION_NO_RELOCATION
+ } > mem1 /* .data is the first section in mem1 */
+#elif FMW_MEM_MODE == ARCH_MEM_MODE_DUAL_REGION_RELOCATION
+ } > mem1 AT>mem0 /* Run-time image is at mem1, but loaded from mem0 */
+#else
+ ASSERT(0, "Unrecognized FMW_MEM_MODE")
+#endif
+
+ .bss : {
+ . = ALIGN(4);
+ *(.bss*)
+ . = ALIGN(4);
+#if FMW_MEM_MODE == ARCH_MEM_MODE_SINGLE_REGION
+ } > mem0 /* Run-time image is at mem1, but loaded from mem0 */
+#else
+ } > mem1 /* .bss follows .data in mem1 */
+#endif
+
+ .stack : {
+ . = . + STACK_SIZE;
+ } > stack
+ __RW_END__ = .;
+
+ /*
+ * Define a linker symbol to mark end of the RW memory area for this
+ * image.
+ */
+
+ __TEXT_START__ = LOADADDR(.text);
+ __TEXT_SIZE__ = SIZEOF(.text);
+ __TEXT_END__ = __TEXT_START__ + __TEXT_SIZE__;
+
+ __STACK_START__ = LOADADDR(.stack);
+ __STACK_SIZE__ = SIZEOF(.stack);
+ __STACK_END__ = __STACK_START__ + __STACK_SIZE__;
+ __STACK_TOP__ = __STACK_END__;
+ __STACK_SP3_SIZE__ = 0x800;
+ __STACK_SP0_TOP__ = __STACK_END__ - __STACK_SP3_SIZE__;
+
+ __DATA_LMA_START__ = LOADADDR(.data);
+ __DATA_START__ = ADDR(.data);
+ __DATA_SIZE__ = SIZEOF(.data);
+
+ __BSS_START__ = ADDR(.bss);
+ __BSS_SIZE__ = SIZEOF(.bss);
+ __BSS_END__ = __BSS_START__ + __BSS_SIZE__;
+
+ __HEAP_START__ = __BSS_START__ + __BSS_SIZE__;
+ __HEAP_END__ = __STACK_START__;
+ __HEAP_SIZE__ = __HEAP_END__ - __HEAP_START__;
+
+
+}
diff --git a/arch/arm/armv8-a/src/arch_cache_helpers.S b/arch/arm/armv8-a/src/arch_cache_helpers.S
new file mode 100644
index 00000000..2c40cb23
--- /dev/null
+++ b/arch/arm/armv8-a/src/arch_cache_helpers.S
@@ -0,0 +1,204 @@
+/*
+ * Arm SCP/MCP Software
+ * Copyright (c) 2013-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl flush_dcache_range
+ .globl clean_dcache_range
+ .globl inv_dcache_range
+ .globl dcsw_op_louis
+ .globl dcsw_op_all
+ .globl dcsw_op_level1
+ .globl dcsw_op_level2
+ .globl dcsw_op_level3
+
+/*
+ * This macro can be used for implementing various data cache operations `op`
+ */
+.macro do_dcache_maintenance_by_mva op
+ /* Exit early if size is zero */
+ cbz x1, exit_loop_\op
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+loop_\op:
+ dc \op, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo loop_\op
+ dsb sy
+exit_loop_\op:
+ ret
+.endm
+ /* ------------------------------------------
+ * Clean+Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func flush_dcache_range
+ do_dcache_maintenance_by_mva civac
+endfunc flush_dcache_range
+
+ /* ------------------------------------------
+ * Clean from base address till size.
+ * 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func clean_dcache_range
+ do_dcache_maintenance_by_mva cvac
+endfunc clean_dcache_range
+
+ /* ------------------------------------------
+ * Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func inv_dcache_range
+ do_dcache_maintenance_by_mva ivac
+endfunc inv_dcache_range
+
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way to the level specified
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * x3: The last cache level to operate on
+ * x9: clidr_el1
+ * x10: The cache level to begin operation from
+ * and will carry out the operation on each data cache from level 0
+ * to the level in x3 in sequence
+ *
+ * The dcsw_op macro sets up the x3 and x9 parameters based on
+ * clidr_el1 cache information before invoking the main function
+ * ---------------------------------------------------------------
+ */
+
+ .macro dcsw_op shift, fw, ls
+ mrs x9, clidr_el1
+ ubfx x3, x9, \shift, \fw
+ lsl x3, x3, \ls
+ mov x10, xzr
+ b do_dcsw_op
+ .endm
+
+func do_dcsw_op
+ cbz x3, exit
+ adr x14, dcsw_loop_table // compute inner loop address
+ add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
+ mov x0, x9
+ mov w8, #1
+loop1:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lo level_done // nothing to do if no cache or icache
+
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ ubfx x4, x1, #3, #10 // maximum way number
+ clz w5, w4 // bit position of way size increment
+ lsl w9, w4, w5 // w9 = aligned max way number
+ lsl w16, w8, w5 // w16 = way number loop decrement
+ orr w9, w10, w9 // w9 = combine way and cache number
+ ubfx w6, w1, #13, #15 // w6 = max set number
+ lsl w17, w8, w2 // w17 = set number loop decrement
+ dsb sy // barrier before we start this level
+ br x14 // jump to DC operation specific loop
+
+ .macro dcsw_loop _op
+loop2_\_op:
+ lsl w7, w6, w2 // w7 = aligned max set number
+
+loop3_\_op:
+ orr w11, w9, w7 // combine cache, way and set number
+ dc \_op, x11
+ subs w7, w7, w17 // decrement set number
+ b.hs loop3_\_op
+
+ subs x9, x9, x16 // decrement way number
+ b.hs loop2_\_op
+
+ b level_done
+ .endm
+
+level_done:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.hi loop1
+ msr csselr_el1, xzr // select cache level 0 in csselr
+ dsb sy // barrier to complete final cache operation
+ isb
+exit:
+ ret
+endfunc do_dcsw_op
+
+dcsw_loop_table:
+ dcsw_loop isw
+ dcsw_loop cisw
+ dcsw_loop csw
+
+
+func dcsw_op_louis
+ dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_louis
+
+
+func dcsw_op_all
+ dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_all
+
+ /* ---------------------------------------------------------------
+ * Helper macro for data cache operations by set/way for the
+ * level specified
+ * ---------------------------------------------------------------
+ */
+ .macro dcsw_op_level level
+ mrs x9, clidr_el1
+ mov x3, \level
+ sub x10, x3, #2
+ b do_dcsw_op
+ .endm
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 1 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level1
+ dcsw_op_level #(1 << LEVEL_SHIFT)
+endfunc dcsw_op_level1
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 2 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level2
+ dcsw_op_level #(2 << LEVEL_SHIFT)
+endfunc dcsw_op_level2
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 3 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level3
+ dcsw_op_level #(3 << LEVEL_SHIFT)
+endfunc dcsw_op_level3
diff --git a/arch/arm/armv8-a/src/arch_crt0.S b/arch/arm/armv8-a/src/arch_crt0.S
new file mode 100644
index 00000000..8cc238b9
--- /dev/null
+++ b/arch/arm/armv8-a/src/arch_crt0.S
@@ -0,0 +1,186 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_system.h>
+#include <asm_macros.S>
+
+ .section .entrypoint
+
+ .globl _entrypoint
+ .globl _restore_system
+ .globl _save_system
+ .globl _boot_flag
+ .globl _shutdown_request
+
+ .extern _vector_table
+ .extern arm_main
+ .extern rcar_pwrc_set_suspend_to_ram
+#ifdef BUILD_HAS_NOTIFICATION
+ .extern __fwk_notification_reset
+#endif
+
+func _entrypoint
+ ldr w0, _boot_flag
+ ldr w1, =R_WARMBOOT
+ cmp w1, w0
+ beq _restore_system
+
+ ldr x0, =__STACK_TOP__
+ mov sp, x0 /* SP_EL3 */
+
+ adr x0, _vector_table
+ msr vbar_el3, x0
+ isb
+
+ msr spsel, #0
+ ldr x0, =__STACK_SP0_TOP__
+ mov sp, x0 /* SP_EL0 */
+ stp x29, x30, [sp, #-32]!
+
+ adr x0, __RW_START__
+ adr x1, __RW_END__
+ sub x1, x1, x0
+ bl inv_dcache_range
+
+ ldr x0, =__BSS_START__
+ ldr x1, =__BSS_SIZE__
+ bl zeromem
+
+#if USE_COHERENT_MEM
+ ldr x0, =__COHERENT_RAM_START__
+ ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
+ bl zeromem
+#endif
+
+ mrs x0, scr_el3
+ /* RW[10]=1, HCE[8]=0, SMD[7]=0, EA[3]=1, FIQ[2]=1, IRQ[1]=1, NS[0]=0 */
+ mov x0, #(1<<10 | 0<<8 | 0<<7 | 1<<3 | 1<<2 | 1<<1 | 0<<0)
+ msr scr_el3, x0
+
+ /* --------------------------------------------------
+ * Initialize platform and jump to our c-entry point
+ * for this type of reset.
+ * --------------------------------------------------
+ */
+#ifdef BUILD_HAS_NOTIFICATION
+ bl __fwk_notification_reset
+#endif
+ bl arm_main
+
+ mov x0, 1
+ ldp x29, x30, [sp], #32
+
+ ret
+
+endfunc _entrypoint
+
+func _save_system /* EL3t */
+ stp x2, x3, [sp, #-0x10]!
+ ldr x2, =_save_area_top
+ stp x0, x1, [x2, #-0x10]!
+ mov x0, x2
+ ldp x2, x3, [sp], #0x10
+ stp x2, x3, [x0, #-0x10]!
+ stp x4, x5, [x0, #-0x10]!
+ stp x6, x7, [x0, #-0x10]!
+ stp x8, x9, [x0, #-0x10]!
+ stp x10, x11, [x0, #-0x10]!
+ stp x12, x13, [x0, #-0x10]!
+ stp x14, x15, [x0, #-0x10]!
+ stp x16, x17, [x0, #-0x10]!
+ stp x18, x19, [x0, #-0x10]!
+ stp x20, x21, [x0, #-0x10]!
+ stp x22, x23, [x0, #-0x10]!
+ stp x24, x25, [x0, #-0x10]!
+ stp x26, x27, [x0, #-0x10]!
+ stp x28, x29, [x0, #-0x10]!
+ stp x30, xzr, [x0, #-0x10]!
+
+ mov x2, sp
+ msr spsel, #1
+ mov x3, sp
+ msr spsel, #0
+ stp x2, x3, [x0, #-0x10]! /* Save SP_EL0, SP_EL3 */
+
+ mrs x3, scr_el3
+ mov x2, x30
+ stp x2, x3, [x0, #-0x10]! /* Save elr_el3(lr), scr_el3 */
+
+ mrs x2, nzcv
+ mrs x3, daif
+ orr x2, x2, x3
+ mrs x3, CurrentEL
+ orr x2, x2, x3
+ mrs x3, SPSel
+ orr x2, x2, x3
+ mrs x3, vbar_el3
+ stp x2, x3, [x0, #-0x10]! /* Save spsr_el3(psr), vbar_el3 */
+
+ bl rcar_pwrc_set_suspend_to_ram
+1:
+ wfi
+ b 1b
+endfunc _save_system
+
+func _restore_system /* EL3h */
+ ldr x0, =_save_area_bottom
+
+ ldp x2, x3, [x0], #0x10 /* Restore spsr_el3(psr), vbar_el3 */
+ msr spsr_el3, x2
+ msr vbar_el3, x3
+
+ ldp x2, x3, [x0], #0x10 /* Restore elr_el3(lr), scr_el3 */
+ msr elr_el3, x2
+ msr scr_el3, x3
+
+ ldp x2, x3, [x0], #0x10 /* Restore SP_EL0, SP_EL3 */
+ mov sp, x3
+ msr spsel, #0
+ mov sp, x2
+
+ ldp x30, xzr, [x0], #0x10
+ ldp x28, x29, [x0], #0x10
+ ldp x26, x27, [x0], #0x10
+ ldp x24, x25, [x0], #0x10
+ ldp x22, x23, [x0], #0x10
+ ldp x20, x21, [x0], #0x10
+ ldp x18, x19, [x0], #0x10
+ ldp x16, x17, [x0], #0x10
+ ldp x14, x15, [x0], #0x10
+ ldp x12, x13, [x0], #0x10
+ ldp x10, x11, [x0], #0x10
+ ldp x8, x9, [x0], #0x10
+ ldp x6, x7, [x0], #0x10
+ ldp x4, x5, [x0], #0x10
+ ldp x2, x3, [x0], #0x10
+ stp x2, x3, [sp, #-0x10]!
+ mov x2, x0
+ ldp x0, x1, [x2], #0x10
+ ldp x2, x3, [sp], #0x10
+
+ msr spsel, #1
+ eret
+endfunc _restore_system
+
+ .section .data.context
+ .align 4
+_boot_flag:
+ .long 0
+
+ .align 4
+_shutdown_request:
+ .long 0
+
+ .align 4
+_save_area_bottom:
+ .rept 38
+ .long 0, 0
+ .endr
+_save_area_top:
+
+ .end
diff --git a/arch/arm/armv8-a/src/arch_exceptions.S b/arch/arm/armv8-a/src/arch_exceptions.S
new file mode 100644
index 00000000..dfdfc37d
--- /dev/null
+++ b/arch/arm/armv8-a/src/arch_exceptions.S
@@ -0,0 +1,127 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ .globl _vector_table
+ .globl _freertos_vector_table
+ .extern _entrypoint
+
+.section .vectors
+.balign 2048
+_vector_table:
+
+.set VBAR, _vector_table
+ /* -----------------------------------------------------
+ * Current EL with SP0 : 0x0 - 0x200 (at EL3)
+ * -----------------------------------------------------
+ */
+.org VBAR
+ b _entrypoint
+.org (VBAR + 0x80)
+ b .
+.org (VBAR + 0x100)
+ b .
+.org (VBAR + 0x180)
+ b .
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x400 (at EL3)
+ * -----------------------------------------------------
+ */
+.org (VBAR + 0x200)
+ b .
+.org (VBAR + 0x280)
+ b .
+.org (VBAR + 0x300)
+ b .
+.org (VBAR + 0x380)
+ b .
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600 (at EL3)
+ * -----------------------------------------------------
+ */
+.org (VBAR + 0x400)
+ b .
+.org (VBAR + 0x480)
+ b .
+.org (VBAR + 0x500)
+ b .
+.org (VBAR + 0x580)
+ b .
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800 (at EL3)
+ * -----------------------------------------------------
+ */
+.org (VBAR + 0x600)
+ b .
+.org (VBAR + 0x680)
+ b .
+.org (VBAR + 0x700)
+ b .
+.org (VBAR + 0x780)
+ b .
+
+/******************************************************************************
+ * Vector table to use when FreeRTOS is running.
+ *****************************************************************************/
+.set FREERTOS_VBAR, (VBAR+0x0800)
+
+ /* -----------------------------------------------------
+ * Current EL with SP0 : 0x0 - 0x200 (at EL3)
+ * -----------------------------------------------------
+ */
+.org(FREERTOS_VBAR)
+_freertos_vector_table:
+ b FreeRTOS_SWI_Handler
+.org (FREERTOS_VBAR + 0x80)
+ b FreeRTOS_IRQ_Handler
+.org (FREERTOS_VBAR + 0x100)
+ b FreeRTOS_IRQ_Handler
+.org (FREERTOS_VBAR + 0x180)
+ b .
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x400 (at EL3)
+ * -----------------------------------------------------
+ */
+.org (FREERTOS_VBAR + 0x200)
+ b FreeRTOS_SWI_Handler
+.org (FREERTOS_VBAR + 0x280)
+ b FreeRTOS_IRQ_Handler
+.org (FREERTOS_VBAR + 0x300)
+ b FreeRTOS_IRQ_Handler
+.org (FREERTOS_VBAR + 0x380)
+ b .
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600 (at EL3)
+ * -----------------------------------------------------
+ */
+.org (FREERTOS_VBAR + 0x400)
+ b .
+.org (FREERTOS_VBAR + 0x480)
+ b .
+.org (FREERTOS_VBAR + 0x500)
+ b .
+.org (FREERTOS_VBAR + 0x580)
+ b .
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800 (at EL3)
+ * -----------------------------------------------------
+ */
+.org (FREERTOS_VBAR + 0x600)
+ b .
+.org (FREERTOS_VBAR + 0x680)
+ b .
+.org (FREERTOS_VBAR + 0x700)
+ b .
+.org (FREERTOS_VBAR + 0x780)
+ b .
+
+ .end
diff --git a/arch/arm/armv8-a/src/arch_gic.c b/arch/arm/armv8-a/src/arch_gic.c
new file mode 100644
index 00000000..30f93869
--- /dev/null
+++ b/arch/arm/armv8-a/src/arch_gic.c
@@ -0,0 +1,586 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <mmio.h>
+#include <rcar_irq.h>
+
+#include <fwk_arch.h>
+#include <fwk_interrupt.h>
+#include <fwk_macros.h>
+#include <fwk_mm.h>
+#include <fwk_status.h>
+
+#include <arch_gic.h>
+
+#define C_INT_ID (INT_ID(c_interrupt))
+#ifndef RCAR_SCMI_LIB
+# define RCAR_MFIS_MIN U(256)
+# define RCAR_MFIS_NO U(8)
+# define RCAR_MFIS_MAX (RCAR_MFIS_MIN + RCAR_MFIS_NO)
+# define IS_SUPPORT_INT(n) ((n >= RCAR_MFIS_MIN) && (n <= RCAR_MFIS_MAX))
+# define EFECTIVE_NO(n) (n - RCAR_MFIS_MIN)
+#else
+# define IS_SUPPORT_INT(n) ((n >= SMCMH_IRQ_START) && (n < SMCMH_IRQ_END))
+# define EFECTIVE_NO(n) (n & 0xff)
+#endif /* RCAR_SCMI_LIB */
+#define CHECK_BIT(d, b) ((d >> b) & 1)
+#define IID_LEN (10)
+
+/*
+ * For interrupts with parameters, their entry in the vector table points to a
+ * global handler that calls a registered function in the callback table with a
+ * corresponding parameter. Entries in the vector table for interrupts without
+ * parameters point directly to the handler functions.
+ */
+struct callback {
+ union {
+ void (*func)(uintptr_t param);
+ void (*funcn)(void);
+ };
+ uintptr_t param;
+};
+
+struct r_node {
+ int valid;
+ struct r_node *left;
+ struct r_node *right;
+ struct callback *entry;
+};
+
+struct r_tree {
+ struct r_node *root;
+ int _allocated;
+};
+
+static unsigned int c_interrupt;
+static struct r_tree *radix;
+
+struct r_tree *init_entry(struct r_tree *rt)
+{
+ if (NULL == rt) {
+ rt = fwk_mm_calloc(1, sizeof(struct r_tree));
+ if (NULL == rt) {
+ return NULL;
+ }
+ rt->_allocated = 1;
+ } else {
+ rt->_allocated = 0;
+ }
+ rt->root = NULL;
+
+ return rt;
+}
+
+static void *_lookup(
+ struct r_node *cur,
+ struct r_node *cand,
+ uint32_t iid,
+ int bitno)
+{
+ if (NULL == cur) {
+ return NULL != cand ? cand->entry : NULL;
+ }
+
+ if (cur->valid) {
+ cand = cur;
+ }
+
+ if (CHECK_BIT(iid, bitno)) {
+ return _lookup(cur->right, cand, iid, bitno + 1);
+ } else {
+ return _lookup(cur->left, cand, iid, bitno + 1);
+ }
+}
+
+static void *lookup_entry(struct r_tree *rt, uint32_t iid)
+{
+ return _lookup(rt->root, NULL, iid, 0);
+}
+
+static int _add(
+ struct r_node **cur,
+ uint32_t iid,
+ void *entry,
+ int bitsize,
+ int bitno)
+{
+ struct r_node *new;
+
+ if (NULL == *cur) {
+ new = fwk_mm_calloc(1, sizeof(struct r_node));
+ if (NULL == new) {
+ return -1;
+ }
+ memset(new, 0, sizeof(struct r_node));
+ *cur = new;
+ }
+
+ if (bitsize == bitno) {
+ if ((*cur)->valid) {
+ return -1;
+ }
+ (*cur)->valid = 1;
+ (*cur)->entry = entry;
+ return 0;
+ } else {
+ if (CHECK_BIT(iid, bitno)) {
+ return _add(&(*cur)->right, iid, entry, bitsize, bitno + 1);
+ } else {
+ return _add(&(*cur)->left, iid, entry, bitsize, bitno + 1);
+ }
+ }
+}
+
+static int add_entry(struct r_tree *rt, uint32_t iid, void *entry, int len)
+{
+ return _add(&rt->root, iid, entry, len, 0);
+}
+
+void irq_global(uint32_t iid)
+{
+ struct callback *entry;
+
+ c_interrupt = iid;
+
+ entry = (struct callback *)lookup_entry(radix, iid);
+ if (entry != NULL) {
+ if (entry->func) {
+ /* Available callback Function */
+ if (entry->param) {
+ entry->func(entry->param);
+ } else {
+ entry->funcn();
+ }
+ }
+ } else {
+ /* No interrupt entry */
+ }
+ c_interrupt = 0;
+}
+
+#ifndef RCAR_SCMI_LIB
+
+/*******************************************************************************
+ * GIC Distributor interface accessors for reading entire registers
+ ******************************************************************************/
+/*
+ * Accessor to read the GIC Distributor ISENABLER corresponding to the
+ * interrupt `id`, 32 interrupt ids at a time.
+ */
+static unsigned int gicd_read_isenabler(uintptr_t base, unsigned int id)
+{
+ unsigned int n = id >> ISENABLER_SHIFT;
+
+ return mmio_read_32(base + GICD_ISENABLER + (n << 2));
+}
+
+/*
+ * Accessor to read the GIC Distributor ISPENDR corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+static unsigned int gicd_read_ispendr(uintptr_t base, unsigned int id)
+{
+ unsigned int n = id >> ISPENDR_SHIFT;
+
+ return mmio_read_32(base + GICD_ISPENDR + (n << 2));
+}
+
+/*******************************************************************************
+ * GIC Distributor interface accessors for writing entire registers
+ ******************************************************************************/
+/*
+ * Accessor to write the GIC Distributor ISENABLER corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+static void gicd_write_isenabler(
+ uintptr_t base,
+ unsigned int id,
+ unsigned int val)
+{
+ unsigned int n = id >> ISENABLER_SHIFT;
+
+ mmio_write_32(base + GICD_ISENABLER + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor ICENABLER corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+static void gicd_write_icenabler(
+ uintptr_t base,
+ unsigned int id,
+ unsigned int val)
+{
+ unsigned int n = id >> ICENABLER_SHIFT;
+
+ mmio_write_32(base + GICD_ICENABLER + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor ISPENDR corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+static void gicd_write_ispendr(
+ uintptr_t base,
+ unsigned int id,
+ unsigned int val)
+{
+ unsigned int n = id >> ISPENDR_SHIFT;
+
+ mmio_write_32(base + GICD_ISPENDR + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor ICPENDR corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+static void gicd_write_icpendr(
+ uintptr_t base,
+ unsigned int id,
+ unsigned int val)
+{
+ unsigned int n = id >> ICPENDR_SHIFT;
+
+ mmio_write_32(base + GICD_ICPENDR + (n << 2), val);
+}
+
+/*******************************************************************************
+ * GIC Distributor functions for accessing the GIC registers
+ * corresponding to a single interrupt ID. These functions use bitwise
+ * operations or appropriate register accesses to modify or return
+ * the bit-field corresponding the single interrupt ID.
+ ******************************************************************************/
+static void gicd_set_isenabler(uintptr_t base, unsigned int id)
+{
+ unsigned int bit_num = id & ((1U << ISENABLER_SHIFT) - 1U);
+
+ gicd_write_isenabler(base, id, (1U << bit_num));
+}
+
+static void gicd_set_icenabler(uintptr_t base, unsigned int id)
+{
+ unsigned int bit_num = id & ((1U << ICENABLER_SHIFT) - 1U);
+
+ gicd_write_icenabler(base, id, (1U << bit_num));
+}
+
+static void gicd_set_ipriorityr(
+ uintptr_t base,
+ unsigned int id,
+ unsigned int pri)
+{
+ uint8_t val = pri & GIC_PRI_MASK;
+
+ mmio_write_8(base + GICD_IPRIORITYR + id, val);
+}
+
+static unsigned int gicd_get_isenabler(uintptr_t base, unsigned int id)
+{
+ unsigned int bit_num = id & ((1U << ISENABLER_SHIFT) - 1U);
+
+ return ((gicd_read_isenabler(base, id) >> bit_num) & 1U);
+}
+
+/*******************************************************************************
+ * GIC CPU interface accessors for writing entire registers
+ ******************************************************************************/
+static inline unsigned int gicc_read_ctlr(uintptr_t base)
+{
+ return mmio_read_32(base + GICC_CTLR);
+}
+
+static void gicc_write_ctlr(uintptr_t base, unsigned int val)
+{
+ mmio_write_32(base + GICC_CTLR, val);
+}
+
+static void gicc_write_pmr(uintptr_t base, unsigned int val)
+{
+ mmio_write_32(base + GICC_PMR, val);
+}
+
+/*******************************************************************************
+ * Enable secure interrupts and use FIQs to route them. Disable legacy bypass
+ * and set the priority mask register to allow all interrupts to trickle in.
+ ******************************************************************************/
+void gic_cpuif_enable(void)
+{
+ unsigned int val;
+
+ /*
+ * Enable the Group 0 interrupts, FIQEn and disable Group 0/1
+ * bypass.
+ */
+ val = CTLR_ENABLE_G0_BIT | FIQ_EN_BIT | FIQ_BYP_DIS_GRP0;
+ val |= IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1;
+
+ /* Program the idle priority in the PMR */
+ gicc_write_pmr(RCAR_GICC_BASE, GIC_PRI_MASK);
+ gicc_write_ctlr(RCAR_GICC_BASE, val);
+}
+
+/*******************************************************************************
+ * Place the cpu interface in a state where it can never make a cpu exit wfi as
+ * as result of an asserted interrupt. This is critical for powering down a cpu
+ ******************************************************************************/
+void gic_cpuif_disable(void)
+{
+ unsigned int val;
+
+ /* Disable secure, non-secure interrupts and disable their bypass */
+ val = gicc_read_ctlr(RCAR_GICC_BASE);
+ val &= ~(CTLR_ENABLE_G0_BIT | CTLR_ENABLE_G1_BIT);
+ val |= FIQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP0;
+ val |= IRQ_BYP_DIS_GRP0 | IRQ_BYP_DIS_GRP1;
+ gicc_write_ctlr(RCAR_GICC_BASE, val);
+}
+
+void gic_init(void)
+{
+ gicd_set_ipriorityr(
+ RCAR_GICD_BASE, VIRTUAL_TIMER_IRQ, GIC_HIGHEST_SEC_PRIORITY);
+ gicd_set_isenabler(RCAR_GICD_BASE, VIRTUAL_TIMER_IRQ);
+ gic_cpuif_enable();
+}
+
+/* --------------------------------------------------- */
+
+static int global_enable(void)
+{
+ __asm__ volatile("msr DAIFClr, #1"); /* FIQ */
+ return FWK_SUCCESS;
+}
+
+static int global_disable(void)
+{
+ __asm__ volatile("msr DAIFSet, #1"); /* FIQ */
+ return FWK_SUCCESS;
+}
+
+static int is_enabled(unsigned int interrupt, bool *enabled)
+{
+ if (!IS_SUPPORT_INT(interrupt))
+ return FWK_E_PARAM;
+
+ *enabled = (bool)gicd_get_isenabler(RCAR_GICD_BASE, interrupt);
+
+ return FWK_SUCCESS;
+}
+
+static int enable(unsigned int interrupt)
+{
+ if (!IS_SUPPORT_INT(interrupt))
+ return FWK_E_PARAM;
+
+ gicd_set_isenabler(RCAR_GICD_BASE, interrupt);
+
+ return FWK_SUCCESS;
+}
+
+static int disable(unsigned int interrupt)
+{
+ if (!IS_SUPPORT_INT(interrupt))
+ return FWK_E_PARAM;
+
+ gicd_set_icenabler(RCAR_GICD_BASE, interrupt);
+
+ return FWK_SUCCESS;
+}
+
+static int is_pending(unsigned int interrupt, bool *pending)
+{
+ unsigned int bit;
+
+ if (!IS_SUPPORT_INT(interrupt))
+ return FWK_E_PARAM;
+
+ bit = interrupt % 32;
+ *pending =
+ (gicd_read_ispendr(RCAR_GICD_BASE, interrupt) & (1 << bit)) ? 1 : 0;
+
+ return FWK_SUCCESS;
+}
+
+static int set_pending(unsigned int interrupt)
+{
+ unsigned int bit;
+
+ if (!IS_SUPPORT_INT(interrupt))
+ return FWK_E_PARAM;
+
+ bit = interrupt % 32;
+ gicd_write_ispendr(RCAR_GICD_BASE, interrupt, 1 << bit);
+
+ return FWK_SUCCESS;
+}
+
+static int clear_pending(unsigned int interrupt)
+{
+ unsigned int bit;
+
+ if (!IS_SUPPORT_INT(interrupt))
+ return FWK_E_PARAM;
+
+ bit = interrupt % 32;
+ gicd_write_icpendr(RCAR_GICD_BASE, interrupt, 1 << bit);
+
+ return FWK_SUCCESS;
+}
+
+#else
+
+static int global_enable(void)
+{
+ return FWK_SUCCESS;
+}
+
+static int global_disable(void)
+{
+ return FWK_SUCCESS;
+}
+
+static int is_enabled(unsigned int interrupt, bool *enabled)
+{
+ return FWK_SUCCESS;
+}
+
+static int enable(unsigned int interrupt)
+{
+ return FWK_SUCCESS;
+}
+
+static int disable(unsigned int interrupt)
+{
+ return FWK_SUCCESS;
+}
+
+static int is_pending(unsigned int interrupt, bool *pending)
+{
+ return FWK_SUCCESS;
+}
+
+static int set_pending(unsigned int interrupt)
+{
+ return FWK_SUCCESS;
+}
+
+static int clear_pending(unsigned int interrupt)
+{
+ return FWK_SUCCESS;
+}
+
+#endif /* RCAR_SCMI_LIB */
+
+static int set_isr_irq(unsigned int interrupt, void (*isr)(void))
+{
+ struct callback *entry;
+ int ret;
+
+ if ((MIN_IRQ > interrupt) || (MAX_IRQ <= interrupt))
+ return FWK_E_PARAM;
+
+ entry = fwk_mm_calloc(1, sizeof(struct callback));
+ if (NULL == entry)
+ return FWK_E_PANIC;
+
+ entry->funcn = isr;
+ entry->param = (uintptr_t)NULL;
+ ret = add_entry(radix, interrupt, (void *)entry, IID_LEN);
+ if (ret)
+ return FWK_E_PANIC;
+
+ return FWK_SUCCESS;
+}
+
+static int set_isr_irq_param(
+ unsigned int interrupt,
+ void (*isr)(uintptr_t param),
+ uintptr_t parameter)
+{
+ struct callback *entry;
+ int ret;
+
+ if ((MIN_IRQ > interrupt) || (MAX_IRQ <= interrupt))
+ return FWK_E_PANIC;
+
+ entry = fwk_mm_calloc(1, sizeof(struct callback));
+ if (NULL == entry)
+ return FWK_E_PANIC;
+
+ entry->func = isr;
+ entry->param = parameter;
+ ret = add_entry(radix, interrupt, (void *)entry, IID_LEN);
+ if (ret)
+ return FWK_E_PARAM;
+
+ return FWK_SUCCESS;
+}
+
+static int set_isr_dummy(void (*isr)(void))
+{
+ return FWK_SUCCESS;
+}
+
+static int set_isr_dummy_param(
+ void (*isr)(uintptr_t param),
+ uintptr_t parameter)
+{
+ return FWK_SUCCESS;
+}
+
+static int get_current(unsigned int *interrupt)
+{
+ *interrupt = c_interrupt;
+
+ /* Not an interrupt */
+ if (0 == *interrupt)
+ return FWK_E_STATE;
+
+ return FWK_SUCCESS;
+}
+
+static const struct fwk_arch_interrupt_driver arm_gic_driver = {
+ .global_enable = global_enable,
+ .global_disable = global_disable,
+ .is_enabled = is_enabled,
+ .enable = enable,
+ .disable = disable,
+ .is_pending = is_pending,
+ .set_pending = set_pending,
+ .clear_pending = clear_pending,
+ .set_isr_irq = set_isr_irq,
+ .set_isr_irq_param = set_isr_irq_param,
+ .set_isr_nmi = set_isr_dummy,
+ .set_isr_nmi_param = set_isr_dummy_param,
+ .set_isr_fault = set_isr_dummy,
+ .get_current = get_current,
+};
+
+int arm_gic_init(const struct fwk_arch_interrupt_driver **driver)
+{
+ /*
+ * Allocate and initialize a table for the callback functions and their
+ * corresponding parameters.
+ */
+ radix = init_entry(NULL);
+ if (radix == NULL)
+ return FWK_E_NOMEM;
+
+ gic_init();
+
+ /*
+ * Initialize all exception entries to point to the arm_exception_invalid()
+ * handler.
+ *
+ * Note: Initialization starts from entry 1 since entry 0 is not an
+ * exception pointer but the default stack pointer.
+ */
+
+ *driver = &arm_gic_driver;
+
+ return FWK_SUCCESS;
+}
diff --git a/arch/arm/armv8-a/src/arch_libc.c b/arch/arm/armv8-a/src/arch_libc.c
new file mode 100644
index 00000000..6e0adb1f
--- /dev/null
+++ b/arch/arm/armv8-a/src/arch_libc.c
@@ -0,0 +1,260 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <fwk_arch.h>
+#include <fwk_macros.h>
+
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+void *memset(void *s, int c, size_t count)
+{
+ char *xs = s;
+ while (count--)
+ *xs++ = c;
+ return s;
+}
+
+void *memcpy(void *dst, const void *src, size_t n)
+{
+ /* copy per 1 byte */
+ const char *p = src;
+ char *q = dst;
+
+ while (n--) {
+ *q++ = *p++;
+ }
+
+ return dst;
+}
+
+char *strncpy(char *dest, const char *src, size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n && src[i] != 0; i++)
+ dest[i] = src[i];
+ for (; i < n; i++)
+ dest[i] = 0;
+
+ return dest;
+}
+
+char *strchr(const char *str, int c)
+{
+ do {
+ if (*str == (char)c)
+ return (char *)str;
+ str++;
+ } while (*str);
+
+ return NULL;
+}
+
+size_t strlen(const char *str)
+{
+ char *tmp = (char *)str;
+ size_t counter = 0;
+ while (*tmp++)
+ ++counter;
+ return counter;
+}
+
+static void uint_to_str(unsigned int i, char *buf, int base)
+{
+ char const digit_10[] = "0123456789";
+ char const digit_16[] = "0123456789abcdef";
+ unsigned int shifter = i;
+ char const *digit;
+
+ if (base == 10)
+ digit = digit_10;
+ else
+ digit = digit_16;
+
+ do {
+ ++buf;
+ shifter = shifter / base;
+ } while (shifter);
+
+ *buf = '\0';
+
+ do {
+ *--buf = digit[i % base];
+ i = i / base;
+ } while (i);
+}
+
+static void int_to_str(int i, char *buf, int base)
+{
+ int sign = i;
+
+ if (i < 0) {
+ i = -i;
+ buf++;
+ }
+
+ uint_to_str((unsigned int)i, buf, base);
+
+ if (sign < 0)
+ *--buf = '-';
+}
+
+static int isdigit(char c)
+{
+ return (c >= '0' && c <= '9');
+}
+
+static int handle_num(char type, char *buf, va_list *args)
+{
+ int int_num;
+ unsigned int uint_num;
+
+ switch (type) {
+ case 'u':
+ uint_num = va_arg(*args, unsigned int);
+ uint_to_str(uint_num, buf, 10);
+ break;
+ case 'd':
+ int_num = va_arg(*args, int);
+ int_to_str(int_num, buf, 10);
+ break;
+ case 'x':
+ uint_num = va_arg(*args, unsigned int);
+ uint_to_str(uint_num, buf, 16);
+ break;
+ default:
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+int vsnprintf(char *str, size_t n, const char *format, va_list args)
+{
+ char *pos;
+ char *s;
+ char *tmp = str;
+ size_t length = 0;
+ int num_length, min_length;
+ char num_buf[12];
+ int not_implemented;
+
+ for (pos = (char *)format; *pos != '\0'; pos++) {
+ while ((*pos != '%') && (*pos != '\0') && (length < n)) {
+ *tmp++ = *pos++;
+ length++;
+ }
+
+ if (length == n)
+ break;
+
+ if (*pos == '\0') {
+ *tmp = '\0';
+ break;
+ }
+
+ pos++;
+
+ not_implemented = 0;
+
+ switch (*pos) {
+ case 's':
+ s = va_arg(args, char *);
+ strncpy(tmp, s, n - length);
+ break;
+ case '0':
+ if (isdigit(*(pos + 1)) && (*(pos + 1) > '0')) {
+ pos++;
+ } else {
+ not_implemented = 1;
+ break;
+ }
+ case '1' ... '9':
+ min_length = (unsigned int)(*pos - '0');
+
+ if (handle_num(*(pos + 1), num_buf, &args)) {
+ if (*(pos - 1) == '0')
+ pos--;
+
+ not_implemented = 1;
+ break;
+ }
+
+ num_length = strlen(num_buf);
+
+ if (num_length < min_length) {
+ while (num_length >= 0)
+ num_buf[min_length--] = num_buf[num_length--];
+
+ if (*(pos - 1) == '0') {
+ if (num_buf[0] == '-') {
+ min_length++;
+ while (min_length > 0)
+ num_buf[min_length--] = '0';
+ } else {
+ while (min_length >= 0)
+ num_buf[min_length--] = '0';
+ }
+ } else {
+ while (min_length >= 0)
+ num_buf[min_length--] = ' ';
+ }
+ }
+ strncpy(tmp, num_buf, n - length);
+ pos++;
+ break;
+ default:
+ if (handle_num(*pos, num_buf, &args))
+ not_implemented = 1;
+ else
+ strncpy(tmp, num_buf, n - length);
+ break;
+ }
+
+ if (not_implemented) {
+ va_arg(args, unsigned int);
+ *tmp++ = '%';
+ length++;
+ pos--;
+ } else {
+ while ((*tmp != '\0') && (length < n)) {
+ tmp++;
+ length++;
+ }
+ }
+ }
+
+ if (tmp == str) {
+ *tmp = '\0';
+ } else if (length == n) {
+ tmp--;
+ if (*tmp != '\0')
+ *tmp = '\0';
+ else
+ length--;
+ } else if (*(tmp - 1) != '\0') {
+ *tmp = '\0';
+ } else {
+ length--;
+ }
+
+ return length;
+}
+
+int snprintf(char *str, size_t size, const char *format, ...)
+{
+ int counter;
+ va_list args;
+ va_start(args, format);
+ counter = vsnprintf(str, size, format, args);
+ va_end(args);
+ return counter;
+}
diff --git a/arch/arm/armv8-a/src/arch_main.c b/arch/arm/armv8-a/src/arch_main.c
new file mode 100644
index 00000000..a03bcd42
--- /dev/null
+++ b/arch/arm/armv8-a/src/arch_main.c
@@ -0,0 +1,46 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <mod_rcar_system.h>
+
+#include <fwk_arch.h>
+#include <fwk_noreturn.h>
+#include <fwk_status.h>
+
+#include <arch_gic.h>
+#include <arch_helpers.h>
+
+#include <stdbool.h>
+
+/*
+ * Error handler for failures that occur during early initialization.
+ */
+void panic(void)
+{
+ while (true)
+ wfi();
+}
+
+static struct fwk_arch_init_driver arch_init_driver = {
+ .interrupt = arm_gic_init,
+};
+
+void arm_main(void)
+{
+ int status;
+
+ rcar_system_code_copy_to_system_ram();
+
+#ifdef BUILD_MODE_DEBUG
+ uint32_t cntv_ctl = 0;
+ __asm__ volatile("msr cntv_ctl_el0, %0" ::"r"(cntv_ctl));
+#endif /* BUILD_MODE_DEBUG */
+
+ status = fwk_arch_init(&arch_init_driver);
+ if (status != FWK_SUCCESS)
+ panic();
+}
diff --git a/arch/arm/armv8-a/src/arch_misc_helpers.S b/arch/arm/armv8-a/src/arch_misc_helpers.S
new file mode 100644
index 00000000..cde1a039
--- /dev/null
+++ b/arch/arm/armv8-a/src/arch_misc_helpers.S
@@ -0,0 +1,511 @@
+/*
+ * Arm SCP/MCP Software
+ * Copyright (c) 2013-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+ .globl get_afflvl_shift
+ .globl mpidr_mask_lower_afflvls
+ .globl eret
+ .globl smc
+
+ .globl zero_normalmem
+ .globl zeromem
+ .globl zeromem16
+ .globl memcpy16
+
+ .globl disable_mmu_el1
+ .globl disable_mmu_el3
+ .globl disable_mmu_icache_el1
+ .globl disable_mmu_icache_el3
+
+#if SUPPORT_VFP
+ .globl enable_vfp
+#endif
+
+func get_afflvl_shift
+ cmp x0, #3
+ cinc x0, x0, eq
+ mov x1, #MPIDR_AFFLVL_SHIFT
+ lsl x0, x0, x1
+ ret
+endfunc get_afflvl_shift
+
+func mpidr_mask_lower_afflvls
+ cmp x1, #3
+ cinc x1, x1, eq
+ mov x2, #MPIDR_AFFLVL_SHIFT
+ lsl x2, x1, x2
+ lsr x0, x0, x2
+ lsl x0, x0, x2
+ ret
+endfunc mpidr_mask_lower_afflvls
+
+
+func eret
+ eret
+endfunc eret
+
+
+func smc
+ smc #0
+endfunc smc
+
+/* -----------------------------------------------------------------------
+ * void zeromem16(void *mem, unsigned int length);
+ *
+ * Initialise a memory region to 0.
+ * The memory address must be 16-byte aligned.
+ * NOTE: This function is deprecated and zeromem should be used instead.
+ * -----------------------------------------------------------------------
+ */
+.equ zeromem16, zeromem
+
+/* -----------------------------------------------------------------------
+ * void zero_normalmem(void *mem, unsigned int length);
+ *
+ * Initialise a region in normal memory to 0. This functions complies with the
+ * AAPCS and can be called from C code.
+ *
+ * NOTE: MMU must be enabled when using this function as it can only operate on
+ * normal memory. It is intended to be mainly used from C code when MMU
+ * is usually enabled.
+ * -----------------------------------------------------------------------
+ */
+.equ zero_normalmem, zeromem_dczva
+
+/* -----------------------------------------------------------------------
+ * void zeromem(void *mem, unsigned int length);
+ *
+ * Initialise a region of device memory to 0. This functions complies with the
+ * AAPCS and can be called from C code.
+ *
+ * NOTE: When data caches and MMU are enabled, zero_normalmem can usually be
+ * used instead for faster zeroing.
+ *
+ * -----------------------------------------------------------------------
+ */
+func zeromem
+ /* x2 is the address past the last zeroed address */
+ add x2, x0, x1
+ /*
+ * Uses the fallback path that does not use DC ZVA instruction and
+ * therefore does not need enabled MMU
+ */
+ b .Lzeromem_dczva_fallback_entry
+endfunc zeromem
+
+/* -----------------------------------------------------------------------
+ * void zeromem_dczva(void *mem, unsigned int length);
+ *
+ * Fill a region of normal memory of size "length" in bytes with null bytes.
+ * MMU must be enabled and the memory be of
+ * normal type. This is because this function internally uses the DC ZVA
+ * instruction, which generates an Alignment fault if used on any type of
+ * Device memory (see section D3.4.9 of the ARMv8 ARM, issue k). When the MMU
+ * is disabled, all memory behaves like Device-nGnRnE memory (see section
+ * D4.2.8), hence the requirement on the MMU being enabled.
+ * NOTE: The code assumes that the block size as defined in DCZID_EL0
+ * register is at least 16 bytes.
+ *
+ * -----------------------------------------------------------------------
+ */
+func zeromem_dczva
+
+ /*
+ * The function consists of a series of loops that zero memory one byte
+ * at a time, 16 bytes at a time or using the DC ZVA instruction to
+ * zero aligned block of bytes, which is assumed to be more than 16.
+ * In the case where the DC ZVA instruction cannot be used or if the
+ * first 16 bytes loop would overflow, there is fallback path that does
+ * not use DC ZVA.
+ * Note: The fallback path is also used by the zeromem function that
+ * branches to it directly.
+ *
+ * +---------+ zeromem_dczva
+ * | entry |
+ * +----+----+
+ * |
+ * v
+ * +---------+
+ * | checks |>o-------+ (If any check fails, fallback)
+ * +----+----+ |
+ * | |---------------+
+ * v | Fallback path |
+ * +------+------+ |---------------+
+ * | 1 byte loop | |
+ * +------+------+ .Lzeromem_dczva_initial_1byte_aligned_end
+ * | |
+ * v |
+ * +-------+-------+ |
+ * | 16 bytes loop | |
+ * +-------+-------+ |
+ * | |
+ * v |
+ * +------+------+ .Lzeromem_dczva_blocksize_aligned
+ * | DC ZVA loop | |
+ * +------+------+ |
+ * +--------+ | |
+ * | | | |
+ * | v v |
+ * | +-------+-------+ .Lzeromem_dczva_final_16bytes_aligned
+ * | | 16 bytes loop | |
+ * | +-------+-------+ |
+ * | | |
+ * | v |
+ * | +------+------+ .Lzeromem_dczva_final_1byte_aligned
+ * | | 1 byte loop | |
+ * | +-------------+ |
+ * | | |
+ * | v |
+ * | +---+--+ |
+ * | | exit | |
+ * | +------+ |
+ * | |
+ * | +--------------+ +------------------+ zeromem
+ * | | +----------------| zeromem function |
+ * | | | +------------------+
+ * | v v
+ * | +-------------+ .Lzeromem_dczva_fallback_entry
+ * | | 1 byte loop |
+ * | +------+------+
+ * | |
+ * +-----------+
+ */
+
+ /*
+ * Readable names for registers
+ *
+ * Registers x0, x1 and x2 are also set by zeromem which
+ * branches into the fallback path directly, so cursor, length and
+ * stop_address should not be retargeted to other registers.
+ */
+ cursor .req x0 /* Start address and then current address */
+ length .req x1 /* Length in bytes of the region to zero out */
+ /* Reusing x1 as length is never used after block_mask is set */
+ block_mask .req x1 /* Bitmask of the block size read in DCZID_EL0 */
+ stop_address .req x2 /* Address past the last zeroed byte */
+ block_size .req x3 /* Size of a block in bytes as read in DCZID_EL0 */
+ tmp1 .req x4
+ tmp2 .req x5
+
+#if ENABLE_ASSERTIONS
+ /*
+ * Check for M bit (MMU enabled) of the current SCTLR_EL(1|3)
+ * register value and panic if the MMU is disabled.
+ */
+#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || \
+ (defined(IMAGE_BL2) && BL2_AT_EL3)
+ mrs tmp1, sctlr_el3
+#else
+ mrs tmp1, sctlr_el1
+#endif
+
+ tst tmp1, #SCTLR_M_BIT
+ ASM_ASSERT(ne)
+#endif /* ENABLE_ASSERTIONS */
+
+ /* stop_address is the address past the last to zero */
+ add stop_address, cursor, length
+
+ /*
+ * Get block_size = (log2(<block size>) >> 2) (see encoding of
+ * dczid_el0 reg)
+ */
+ mrs block_size, dczid_el0
+
+ /*
+ * Select the 4 lowest bits and convert the extracted log2(<block size
+ * in words>) to <block size in bytes>
+ */
+ ubfx block_size, block_size, #0, #4
+ mov tmp2, #(1 << 2)
+ lsl block_size, tmp2, block_size
+
+#if ENABLE_ASSERTIONS
+ /*
+ * Assumes block size is at least 16 bytes to avoid manual realignment
+ * of the cursor at the end of the DCZVA loop.
+ */
+ cmp block_size, #16
+ ASM_ASSERT(hs)
+#endif
+ /*
+ * Not worth doing all the setup for a region less than a block and
+ * protects against zeroing a whole block when the area to zero is
+ * smaller than that. Also, as it is assumed that the block size is at
+ * least 16 bytes, this also protects the initial aligning loops from
+ * trying to zero 16 bytes when length is less than 16.
+ */
+ cmp length, block_size
+ b.lo .Lzeromem_dczva_fallback_entry
+
+ /*
+ * Calculate the bitmask of the block alignment. It will never
+ * underflow as the block size is between 4 bytes and 2kB.
+ * block_mask = block_size - 1
+ */
+ sub block_mask, block_size, #1
+
+ /*
+ * length alias should not be used after this point unless it is
+ * defined as a register other than block_mask's.
+ */
+ .unreq length
+
+ /*
+ * If the start address is already aligned to zero block size, go
+ * straight to the cache zeroing loop. This is safe because at this
+ * point, the length cannot be smaller than a block size.
+ */
+ tst cursor, block_mask
+ b.eq .Lzeromem_dczva_blocksize_aligned
+
+ /*
+ * Calculate the first block-size-aligned address. It is assumed that
+ * the zero block size is at least 16 bytes. This address is the last
+ * address of this initial loop.
+ */
+ orr tmp1, cursor, block_mask
+ add tmp1, tmp1, #1
+
+ /*
+ * If the addition overflows, skip the cache zeroing loops. This is
+ * quite unlikely however.
+ */
+ cbz tmp1, .Lzeromem_dczva_fallback_entry
+
+ /*
+ * If the first block-size-aligned address is past the last address,
+ * fallback to the simpler code.
+ */
+ cmp tmp1, stop_address
+ b.hi .Lzeromem_dczva_fallback_entry
+
+ /*
+ * If the start address is already aligned to 16 bytes, skip this loop.
+ * It is safe to do this because tmp1 (the stop address of the initial
+ * 16 bytes loop) will never be greater than the final stop address.
+ */
+ tst cursor, #0xf
+ b.eq .Lzeromem_dczva_initial_1byte_aligned_end
+
+ /* Calculate the next address aligned to 16 bytes */
+ orr tmp2, cursor, #0xf
+ add tmp2, tmp2, #1
+ /* If it overflows, fallback to the simple path (unlikely) */
+ cbz tmp2, .Lzeromem_dczva_fallback_entry
+ /*
+ * Next aligned address cannot be after the stop address because the
+ * length cannot be smaller than 16 at this point.
+ */
+
+ /* First loop: zero byte per byte */
+1:
+ strb wzr, [cursor], #1
+ cmp cursor, tmp2
+ b.ne 1b
+.Lzeromem_dczva_initial_1byte_aligned_end:
+
+ /*
+ * Second loop: we need to zero 16 bytes at a time from cursor to tmp1
+ * before being able to use the code that deals with block-size-aligned
+ * addresses.
+ */
+ cmp cursor, tmp1
+ b.hs 2f
+1:
+ stp xzr, xzr, [cursor], #16
+ cmp cursor, tmp1
+ b.lo 1b
+2:
+
+ /*
+ * Third loop: zero a block at a time using DC ZVA cache block zeroing
+ * instruction.
+ */
+.Lzeromem_dczva_blocksize_aligned:
+ /*
+ * Calculate the last block-size-aligned address. If the result equals
+ * to the start address, the loop will exit immediately.
+ */
+ bic tmp1, stop_address, block_mask
+
+ cmp cursor, tmp1
+ b.hs 2f
+1:
+ /* Zero the block containing the cursor */
+ dc zva, cursor
+ /* Increment the cursor by the size of a block */
+ add cursor, cursor, block_size
+ cmp cursor, tmp1
+ b.lo 1b
+2:
+
+ /*
+ * Fourth loop: zero 16 bytes at a time and then byte per byte the
+ * remaining area
+ */
+.Lzeromem_dczva_final_16bytes_aligned:
+ /*
+ * Calculate the last 16 bytes aligned address. It is assumed that the
+ * block size will never be smaller than 16 bytes so that the current
+ * cursor is aligned to at least 16 bytes boundary.
+ */
+ bic tmp1, stop_address, #15
+
+ cmp cursor, tmp1
+ b.hs 2f
+1:
+ stp xzr, xzr, [cursor], #16
+ cmp cursor, tmp1
+ b.lo 1b
+2:
+
+ /* Fifth and final loop: zero byte per byte */
+.Lzeromem_dczva_final_1byte_aligned:
+ cmp cursor, stop_address
+ b.eq 2f
+1:
+ strb wzr, [cursor], #1
+ cmp cursor, stop_address
+ b.ne 1b
+2:
+ ret
+
+ /* Fallback for unaligned start addresses */
+.Lzeromem_dczva_fallback_entry:
+ /*
+ * If the start address is already aligned to 16 bytes, skip this loop.
+ */
+ tst cursor, #0xf
+ b.eq .Lzeromem_dczva_final_16bytes_aligned
+
+ /* Calculate the next address aligned to 16 bytes */
+ orr tmp1, cursor, #15
+ add tmp1, tmp1, #1
+ /* If it overflows, fallback to byte per byte zeroing */
+ cbz tmp1, .Lzeromem_dczva_final_1byte_aligned
+ /* If the next aligned address is after the stop address, fall back */
+ cmp tmp1, stop_address
+ b.hs .Lzeromem_dczva_final_1byte_aligned
+
+ /* Fallback entry loop: zero byte per byte */
+1:
+ strb wzr, [cursor], #1
+ cmp cursor, tmp1
+ b.ne 1b
+
+ b .Lzeromem_dczva_final_16bytes_aligned
+
+ .unreq cursor
+ /*
+ * length is already unreq'ed to reuse the register for another
+ * variable.
+ */
+ .unreq stop_address
+ .unreq block_size
+ .unreq block_mask
+ .unreq tmp1
+ .unreq tmp2
+endfunc zeromem_dczva
+
+/* --------------------------------------------------------------------------
+ * void memcpy16(void *dest, const void *src, unsigned int length)
+ *
+ * Copy length bytes from memory area src to memory area dest.
+ * The memory areas should not overlap.
+ * Destination and source addresses must be 16-byte aligned.
+ * --------------------------------------------------------------------------
+ */
+func memcpy16
+#if ENABLE_ASSERTIONS
+ orr x3, x0, x1
+ tst x3, #0xf
+ ASM_ASSERT(eq)
+#endif
+/* copy 16 bytes at a time */
+m_loop16:
+ cmp x2, #16
+ b.lo m_loop1
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ sub x2, x2, #16
+ b m_loop16
+/* copy byte per byte */
+m_loop1:
+ cbz x2, m_end
+ ldrb w3, [x1], #1
+ strb w3, [x0], #1
+ subs x2, x2, #1
+ b.ne m_loop1
+m_end:
+ ret
+endfunc memcpy16
+
+/* ---------------------------------------------------------------------------
+ * Disable the MMU at EL3
+ * ---------------------------------------------------------------------------
+ */
+
+func disable_mmu_el3
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
+do_disable_mmu_el3:
+ mrs x0, sctlr_el3
+ bic x0, x0, x1
+ msr sctlr_el3, x0
+ isb /* ensure MMU is off */
+ dsb sy
+ ret
+endfunc disable_mmu_el3
+
+
+func disable_mmu_icache_el3
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
+ b do_disable_mmu_el3
+endfunc disable_mmu_icache_el3
+
+/* ---------------------------------------------------------------------------
+ * Disable the MMU at EL1
+ * ---------------------------------------------------------------------------
+ */
+
+func disable_mmu_el1
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
+do_disable_mmu_el1:
+ mrs x0, sctlr_el1
+ bic x0, x0, x1
+ msr sctlr_el1, x0
+ isb /* ensure MMU is off */
+ dsb sy
+ ret
+endfunc disable_mmu_el1
+
+
+func disable_mmu_icache_el1
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
+ b do_disable_mmu_el1
+endfunc disable_mmu_icache_el1
+
+/* ---------------------------------------------------------------------------
+ * Enable the use of VFP at EL3
+ * ---------------------------------------------------------------------------
+ */
+#if SUPPORT_VFP
+func enable_vfp
+ mrs x0, cpacr_el1
+ orr x0, x0, #CPACR_VFP_BITS
+ msr cpacr_el1, x0
+ mrs x0, cptr_el3
+ mov x1, #AARCH64_CPTR_TFP
+ bic x0, x0, x1
+ msr cptr_el3, x0
+ isb
+ ret
+endfunc enable_vfp
+#endif
diff --git a/arch/arm/armv8-a/src/arch_mm.c b/arch/arm/armv8-a/src/arch_mm.c
new file mode 100644
index 00000000..ec26b5dc
--- /dev/null
+++ b/arch/arm/armv8-a/src/arch_mm.c
@@ -0,0 +1,66 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <fwk_arch.h>
+#include <fwk_macros.h>
+
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+extern char __HEAP_START__;
+extern char __HEAP_END__;
+
+/*!
+ * \brief Architecture memory manager context.
+ */
+static struct arch_mm_ctx {
+ uintptr_t heap_break;
+ uintptr_t heap_end;
+} arch_mm_ctx = {
+ .heap_break = (uintptr_t)(&__HEAP_START__),
+ .heap_end = (uintptr_t)(&__HEAP_END__),
+};
+
+void *_sbrk(intptr_t increment)
+{
+ if (increment == 0) {
+ return (void *)arch_mm_ctx.heap_break;
+ } else {
+ uintptr_t heap_old = FWK_ALIGN_NEXT(arch_mm_ctx.heap_break, 16);
+ uintptr_t heap_new = heap_old + increment;
+
+ if (heap_new > arch_mm_ctx.heap_end) {
+ return (void *)-1;
+ } else {
+ arch_mm_ctx.heap_break = heap_new;
+
+ return (void *)heap_old;
+ }
+ }
+}
+
+void *malloc(size_t size)
+{
+ void *mem = _sbrk(size);
+
+ if (mem == ((void *)-1))
+ mem = NULL;
+
+ return mem;
+}
+
+void *calloc(size_t nmemb, size_t size)
+{
+ void *mem = malloc(nmemb * size);
+
+ if (mem)
+ memset(mem, 0, nmemb * size);
+
+ return mem;
+}
diff --git a/product/rcar/module/rcar_mstp_clock/src/mod_rcar_mstp_clock.c b/product/rcar/module/rcar_mstp_clock/src/mod_rcar_mstp_clock.c
index 36d3e8b6..8c4f1e6a 100644
--- a/product/rcar/module/rcar_mstp_clock/src/mod_rcar_mstp_clock.c
+++ b/product/rcar/module/rcar_mstp_clock/src/mod_rcar_mstp_clock.c
@@ -7,6 +7,7 @@
#include <clock_mstp_devices.h>
#include <mmio.h>
+#include <utils_def.h>
#include <mod_clock.h>
#include <mod_rcar_clock.h>
diff --git a/tools/build_system/cpu.mk b/tools/build_system/cpu.mk
index 51810293..3ad53e13 100644
--- a/tools/build_system/cpu.mk
+++ b/tools/build_system/cpu.mk
@@ -15,6 +15,9 @@ BS_ARCH_CPU := $(BS_FIRMWARE_CPU)
# Supported ARMv7-M CPUs
ARMV7M_CPUS := cortex-m3 cortex-m7
+# Supported ARMv8-A CPUs
+ARMV8A_CPUS := cortex-a53 cortex-a57 cortex-a57.cortex-a53
+
ifneq ($(findstring $(BS_FIRMWARE_CPU),$(ARMV7M_CPUS)),)
BS_ARCH_VENDOR := arm
BS_ARCH_ARCH := armv7-m
@@ -25,6 +28,13 @@ ifneq ($(findstring $(BS_FIRMWARE_CPU),$(ARMV7M_CPUS)),)
CFLAGS_CLANG += --target=arm-arm-none-eabi
CFLAGS += -mfloat-abi=soft # No hardware floating point support
+else ifneq ($(findstring $(BS_FIRMWARE_CPU),$(ARMV8A_CPUS)),)
+ BS_ARCH_VENDOR := arm
+ BS_ARCH_ARCH := armv8-a
+
+ CFLAGS += -fno-builtin -mstrict-align
+ DEP_CFLAGS_GCC += -DAARCH64
+ DEP_ASFLAGS_GCC += -D__ASSEMBLY__
else ifeq ($(BS_FIRMWARE_CPU),host)
BS_ARCH_VENDOR := none
BS_ARCH_ARCH := host
diff --git a/tools/build_system/rules.mk b/tools/build_system/rules.mk
index c8354a68..c3bbcacd 100644
--- a/tools/build_system/rules.mk
+++ b/tools/build_system/rules.mk
@@ -13,7 +13,9 @@ include $(BS_DIR)/cpu.mk
ifeq ($(BUILD_HAS_MULTITHREADING),yes)
# Add the OS directory to the main INCLUDES list
- INCLUDES += $(OS_DIR)/Include
+ ifeq ($(findstring $(BS_FIRMWARE_CPU),$(ARMV8A_CPUS)),)
+ INCLUDES += $(OS_DIR)/Include
+ endif
DEFINES += BUILD_HAS_MULTITHREADING
endif
@@ -148,7 +150,11 @@ LDFLAGS_GCC += -Wl,--cref
LDFLAGS_GCC += -Wl,--undefined=arch_exceptions
LDFLAGS_ARM += -Wl,--undefined=arch_exceptions
-BUILTIN_LIBS_GCC := -lc -lgcc
+ifneq ($(BS_ARCH_ARCH),armv8-a)
+ BUILTIN_LIBS_GCC := -lc -lgcc
+else
+ BUILTIN_LIBS_GCC := -nostdlib
+endif
ifeq ($(MODE),release)
O ?= $(DEFAULT_OPT_GCC_RELEASE)
@@ -177,15 +183,24 @@ INCLUDES += $(ARCH_DIR)/include
INCLUDES += $(ARCH_DIR)/$(BS_ARCH_VENDOR)/include
INCLUDES += $(ARCH_DIR)/$(BS_ARCH_VENDOR)/$(BS_ARCH_ARCH)/include
+ifeq ($(BS_ARCH_ARCH),armv8-a)
+ INCLUDES += $(ARCH_DIR)/$(BS_ARCH_VENDOR)/$(BS_ARCH_ARCH)/include/common
+ INCLUDES += $(ARCH_DIR)/$(BS_ARCH_VENDOR)/$(BS_ARCH_ARCH)/include/lib
+ INCLUDES += $(ARCH_DIR)/$(BS_ARCH_VENDOR)/$(BS_ARCH_ARCH)/include/lib/libc
+ INCLUDES += $(ARCH_DIR)/$(BS_ARCH_VENDOR)/$(BS_ARCH_ARCH)/include/lib/libc/aarch64
+endif
+
#
# Always include the framework library
#
INCLUDES += $(FWK_DIR)/include
#
-# Always include CMSIS
+# CMSIS library
#
-INCLUDES += $(CMSIS_DIR)/Include
+ifneq ($(BS_ARCH_ARCH),armv8-a)
+ INCLUDES += $(CMSIS_DIR)/Include
+endif
#
# Toolchain-independent flags
@@ -201,6 +216,7 @@ ASFLAGS += $(ASFLAGS_GCC)
ARFLAGS = $(ARFLAGS_GCC)
LDFLAGS += $(LDFLAGS_$(BS_LINKER))
DEP_CFLAGS = $(DEP_CFLAGS_GCC)
+DEP_ASFLAGS = $(DEP_ASFLAGS_GCC)
BUILTIN_LIBS = $(BUILTIN_LIBS_$(BS_LINKER))
CFLAGS += $(CFLAGS_$(BS_COMPILER))
@@ -255,7 +271,7 @@ $(OBJ_DIR)/%.o: %.s | $$(@D)/
$(OBJ_DIR)/%.o: %.S | $$(@D)/
$(call show-action,AS,$<)
- $(AS) -c $(CFLAGS) $(DEP_CFLAGS) $< -o $@
+ $(AS) -c $(CFLAGS) $(DEP_CFLAGS) $(DEP_ASFLAGS) $< -o $@
$(BUILD_PATH)%/:
$(call show-action,MD,$@)