aboutsummaryrefslogtreecommitdiff
path: root/include/arch/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'include/arch/aarch64')
-rw-r--r--include/arch/aarch64/arch.h136
-rw-r--r--include/arch/aarch64/arch_features.h17
-rw-r--r--include/arch/aarch64/arch_helpers.h43
-rw-r--r--include/arch/aarch64/el2_common_macros.S422
-rw-r--r--include/arch/aarch64/el3_common_macros.S63
5 files changed, 667 insertions, 14 deletions
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index c12dbc4b4..0fb4e7436 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -182,24 +182,44 @@
#define ID_AA64PFR0_CSV2_SHIFT U(56)
#define ID_AA64PFR0_CSV2_MASK ULL(0xf)
#define ID_AA64PFR0_CSV2_LENGTH U(4)
+#define ID_AA64PFR0_FEAT_RME_SHIFT U(52)
+#define ID_AA64PFR0_FEAT_RME_MASK ULL(0xf)
+#define ID_AA64PFR0_FEAT_RME_LENGTH U(4)
+#define ID_AA64PFR0_FEAT_RME_NOT_SUPPORTED U(0)
+#define ID_AA64PFR0_FEAT_RME_V1 U(1)
/* Exception level handling */
#define EL_IMPL_NONE ULL(0)
#define EL_IMPL_A64ONLY ULL(1)
#define EL_IMPL_A64_A32 ULL(2)
+/* ID_AA64DFR0_EL1.TraceVer definitions */
+#define ID_AA64DFR0_TRACEVER_SHIFT U(4)
+#define ID_AA64DFR0_TRACEVER_MASK ULL(0xf)
+#define ID_AA64DFR0_TRACEVER_SUPPORTED ULL(1)
+#define ID_AA64DFR0_TRACEVER_LENGTH U(4)
+#define ID_AA64DFR0_TRACEFILT_SHIFT U(40)
+#define ID_AA64DFR0_TRACEFILT_MASK U(0xf)
+#define ID_AA64DFR0_TRACEFILT_SUPPORTED U(1)
+#define ID_AA64DFR0_TRACEFILT_LENGTH U(4)
+
/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
#define ID_AA64DFR0_PMS_SHIFT U(32)
#define ID_AA64DFR0_PMS_MASK ULL(0xf)
+/* ID_AA64DFR0_EL1.TraceBuffer definitions */
+#define ID_AA64DFR0_TRACEBUFFER_SHIFT U(44)
+#define ID_AA64DFR0_TRACEBUFFER_MASK ULL(0xf)
+#define ID_AA64DFR0_TRACEBUFFER_SUPPORTED ULL(1)
+
/* ID_AA64DFR0_EL1.MTPMU definitions (for ARMv8.6+) */
#define ID_AA64DFR0_MTPMU_SHIFT U(48)
#define ID_AA64DFR0_MTPMU_MASK ULL(0xf)
#define ID_AA64DFR0_MTPMU_SUPPORTED ULL(1)
/* ID_AA64ISAR0_EL1 definitions */
-#define ID_AA64ISAR0_RNDR_SHIFT U(60)
-#define ID_AA64ISAR0_RNDR_MASK ULL(0xf)
+#define ID_AA64ISAR0_RNDR_SHIFT U(60)
+#define ID_AA64ISAR0_RNDR_MASK ULL(0xf)
/* ID_AA64ISAR1_EL1 definitions */
#define ID_AA64ISAR1_EL1 S3_0_C0_C6_1
@@ -266,6 +286,11 @@
#define ID_AA64MMFR1_EL1_VHE_SHIFT U(8)
#define ID_AA64MMFR1_EL1_VHE_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_HCX_SHIFT U(40)
+#define ID_AA64MMFR1_EL1_HCX_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_HCX_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_HCX_NOT_SUPPORTED ULL(0x0)
+
/* ID_AA64MMFR2_EL1 definitions */
#define ID_AA64MMFR2_EL1 S3_0_C0_C7_2
@@ -304,6 +329,9 @@
#define ID_AA64PFR1_MPAM_FRAC_SHIFT ULL(16)
#define ID_AA64PFR1_MPAM_FRAC_MASK ULL(0xf)
+#define ID_AA64PFR1_EL1_SME_SHIFT U(24)
+#define ID_AA64PFR1_EL1_SME_MASK ULL(0xf)
+
/* ID_PFR1_EL1 definitions */
#define ID_PFR1_VIRTEXT_SHIFT U(12)
#define ID_PFR1_VIRTEXT_MASK U(0xf)
@@ -363,6 +391,7 @@
#define SCTLR_ITFSB_BIT (ULL(1) << 37)
#define SCTLR_TCF0_SHIFT U(38)
#define SCTLR_TCF0_MASK ULL(3)
+#define SCTLR_ENTP2_BIT (ULL(1) << 60)
/* Tag Check Faults in EL0 have no effect on the PE */
#define SCTLR_TCF0_NO_EFFECT U(0)
@@ -412,13 +441,20 @@
/* SCR definitions */
#define SCR_RES1_BITS ((U(1) << 4) | (U(1) << 5))
+#define SCR_NSE_SHIFT U(62)
+#define SCR_NSE_BIT (ULL(1) << SCR_NSE_SHIFT)
+#define SCR_GPF_BIT (UL(1) << 48)
#define SCR_TWEDEL_SHIFT U(30)
#define SCR_TWEDEL_MASK ULL(0xf)
+#define SCR_HXEn_BIT (UL(1) << 38)
+#define SCR_ENTP2_SHIFT U(41)
+#define SCR_ENTP2_BIT (UL(1) << SCR_ENTP2_SHIFT)
#define SCR_AMVOFFEN_BIT (UL(1) << 35)
#define SCR_TWEDEn_BIT (UL(1) << 29)
#define SCR_ECVEN_BIT (UL(1) << 28)
#define SCR_FGTEN_BIT (UL(1) << 27)
#define SCR_ATA_BIT (UL(1) << 26)
+#define SCR_EnSCXT_BIT (UL(1) << 25)
#define SCR_FIEN_BIT (UL(1) << 21)
#define SCR_EEL2_BIT (UL(1) << 18)
#define SCR_API_BIT (UL(1) << 17)
@@ -435,13 +471,16 @@
#define SCR_FIQ_BIT (UL(1) << 2)
#define SCR_IRQ_BIT (UL(1) << 1)
#define SCR_NS_BIT (UL(1) << 0)
-#define SCR_VALID_BIT_MASK U(0x2f8f)
+#define SCR_VALID_BIT_MASK U(0x24000002F8F)
#define SCR_RESET_VAL SCR_RES1_BITS
/* MDCR_EL3 definitions */
#define MDCR_EnPMSN_BIT (ULL(1) << 36)
#define MDCR_MPMX_BIT (ULL(1) << 35)
#define MDCR_MCCD_BIT (ULL(1) << 34)
+#define MDCR_NSTB(x) ((x) << 24)
+#define MDCR_NSTB_EL1 ULL(0x3)
+#define MDCR_NSTBE (ULL(1) << 26)
#define MDCR_MTPME_BIT (ULL(1) << 28)
#define MDCR_TDCC_BIT (ULL(1) << 27)
#define MDCR_SCCD_BIT (ULL(1) << 23)
@@ -465,6 +504,8 @@
/* MDCR_EL2 definitions */
#define MDCR_EL2_MTPME (U(1) << 28)
#define MDCR_EL2_HLP (U(1) << 26)
+#define MDCR_EL2_E2TB(x) ((x) << 24)
+#define MDCR_EL2_E2TB_EL1 U(0x3)
#define MDCR_EL2_HCCD (U(1) << 23)
#define MDCR_EL2_TTRF (U(1) << 19)
#define MDCR_EL2_HPMD (U(1) << 17)
@@ -496,13 +537,19 @@
#define VTTBR_BADDR_SHIFT U(0)
/* HCR definitions */
-#define HCR_AMVOFFEN_BIT (ULL(1) << 51)
+#define HCR_RESET_VAL ULL(0x0)
+#define HCR_AMVOFFEN_SHIFT U(51)
+#define HCR_AMVOFFEN_BIT (ULL(1) << HCR_AMVOFFEN_SHIFT)
+#define HCR_TEA_BIT (ULL(1) << 47)
#define HCR_API_BIT (ULL(1) << 41)
#define HCR_APK_BIT (ULL(1) << 40)
#define HCR_E2H_BIT (ULL(1) << 34)
+#define HCR_HCD_BIT (ULL(1) << 29)
#define HCR_TGE_BIT (ULL(1) << 27)
#define HCR_RW_SHIFT U(31)
#define HCR_RW_BIT (ULL(1) << HCR_RW_SHIFT)
+#define HCR_TWE_BIT (ULL(1) << 14)
+#define HCR_TWI_BIT (ULL(1) << 13)
#define HCR_AMO_BIT (ULL(1) << 5)
#define HCR_IMO_BIT (ULL(1) << 4)
#define HCR_FMO_BIT (ULL(1) << 3)
@@ -530,21 +577,32 @@
/* CPTR_EL3 definitions */
#define TCPAC_BIT (U(1) << 31)
-#define TAM_BIT (U(1) << 30)
+#define TAM_SHIFT U(30)
+#define TAM_BIT (U(1) << TAM_SHIFT)
#define TTA_BIT (U(1) << 20)
+#define ESM_BIT (U(1) << 12)
#define TFP_BIT (U(1) << 10)
#define CPTR_EZ_BIT (U(1) << 8)
-#define CPTR_EL3_RESET_VAL (TCPAC_BIT | TAM_BIT | TTA_BIT | TFP_BIT & ~(CPTR_EZ_BIT))
+#define CPTR_EL3_RESET_VAL ((TCPAC_BIT | TAM_BIT | TTA_BIT | TFP_BIT) & \
+ ~(CPTR_EZ_BIT | ESM_BIT))
/* CPTR_EL2 definitions */
#define CPTR_EL2_RES1 ((U(1) << 13) | (U(1) << 12) | (U(0x3ff)))
#define CPTR_EL2_TCPAC_BIT (U(1) << 31)
-#define CPTR_EL2_TAM_BIT (U(1) << 30)
+#define CPTR_EL2_TAM_SHIFT U(30)
+#define CPTR_EL2_TAM_BIT (U(1) << CPTR_EL2_TAM_SHIFT)
+#define CPTR_EL2_SMEN_MASK ULL(0x3)
+#define CPTR_EL2_SMEN_SHIFT U(24)
#define CPTR_EL2_TTA_BIT (U(1) << 20)
+#define CPTR_EL2_TSM_BIT (U(1) << 12)
#define CPTR_EL2_TFP_BIT (U(1) << 10)
#define CPTR_EL2_TZ_BIT (U(1) << 8)
#define CPTR_EL2_RESET_VAL CPTR_EL2_RES1
+/* VTCR_EL2 definitions */
+#define VTCR_RESET_VAL U(0x0)
+#define VTCR_EL2_MSA (U(1) << 31)
+
/* CPSR/SPSR definitions */
#define DAIF_FIQ_BIT (U(1) << 0)
#define DAIF_IRQ_BIT (U(1) << 1)
@@ -570,6 +628,7 @@
#define SPSR_M_MASK U(0x1)
#define SPSR_M_AARCH64 U(0x0)
#define SPSR_M_AARCH32 U(0x1)
+#define SPSR_M_EL2H U(0x9)
#define SPSR_EL_SHIFT U(2)
#define SPSR_EL_WIDTH U(2)
@@ -870,6 +929,20 @@
#define ZCR_EL2_LEN_MASK U(0xf)
/*******************************************************************************
+ * Definitions for system register interface to SME as needed in EL3
+ ******************************************************************************/
+#define ID_AA64SMFR0_EL1 S3_0_C0_C4_5
+#define SMCR_EL3 S3_6_C1_C2_6
+
+/* ID_AA64SMFR0_EL1 definitions */
+#define ID_AA64SMFR0_EL1_FA64_BIT (UL(1) << 63)
+
+/* SMCR_ELx definitions */
+#define SMCR_ELX_LEN_SHIFT U(0)
+#define SMCR_ELX_LEN_MASK U(0x1ff)
+#define SMCR_ELX_FA64_BIT (U(1) << 31)
+
+/*******************************************************************************
* Definitions of MAIR encodings for device and normal memory
******************************************************************************/
/*
@@ -998,6 +1071,22 @@
#define AMEVTYPER1E_EL0 S3_3_C13_C15_6
#define AMEVTYPER1F_EL0 S3_3_C13_C15_7
+/* AMCNTENSET0_EL0 definitions */
+#define AMCNTENSET0_EL0_Pn_SHIFT U(0)
+#define AMCNTENSET0_EL0_Pn_MASK ULL(0xffff)
+
+/* AMCNTENSET1_EL0 definitions */
+#define AMCNTENSET1_EL0_Pn_SHIFT U(0)
+#define AMCNTENSET1_EL0_Pn_MASK ULL(0xffff)
+
+/* AMCNTENCLR0_EL0 definitions */
+#define AMCNTENCLR0_EL0_Pn_SHIFT U(0)
+#define AMCNTENCLR0_EL0_Pn_MASK ULL(0xffff)
+
+/* AMCNTENCLR1_EL0 definitions */
+#define AMCNTENCLR1_EL0_Pn_SHIFT U(0)
+#define AMCNTENCLR1_EL0_Pn_MASK ULL(0xffff)
+
/* AMCFGR_EL0 definitions */
#define AMCFGR_EL0_NCG_SHIFT U(28)
#define AMCFGR_EL0_NCG_MASK U(0xf)
@@ -1005,6 +1094,8 @@
#define AMCFGR_EL0_N_MASK U(0xff)
/* AMCGCR_EL0 definitions */
+#define AMCGCR_EL0_CG0NC_SHIFT U(0)
+#define AMCGCR_EL0_CG0NC_MASK U(0xff)
#define AMCGCR_EL0_CG1NC_SHIFT U(8)
#define AMCGCR_EL0_CG1NC_MASK U(0xff)
@@ -1029,7 +1120,8 @@
#define AMCG1IDR_VOFF_SHIFT U(16)
/* New bit added to AMCR_EL0 */
-#define AMCR_CG1RZ_BIT (ULL(0x1) << 17)
+#define AMCR_CG1RZ_SHIFT U(17)
+#define AMCR_CG1RZ_BIT (ULL(0x1) << AMCR_CG1RZ_SHIFT)
/*
* Definitions for virtual offset registers for architected activity monitor
@@ -1062,6 +1154,12 @@
#define AMEVCNTVOFF1F_EL2 S3_4_C13_C11_7
/*******************************************************************************
+ * Realm management extension register definitions
+ ******************************************************************************/
+#define GPCCR_EL3 S3_6_C2_C1_6
+#define GPTBR_EL3 S3_6_C2_C1_4
+
+/*******************************************************************************
* RAS system registers
******************************************************************************/
#define DISR_EL1 S3_0_C12_C1_1
@@ -1124,6 +1222,16 @@
#define GCR_EL1 S3_0_C1_C0_6
/*******************************************************************************
+ * FEAT_HCX - Extended Hypervisor Configuration Register
+ ******************************************************************************/
+#define HCRX_EL2 S3_4_C1_C2_2
+#define HCRX_EL2_FGTnXS_BIT (UL(1) << 4)
+#define HCRX_EL2_FnXS_BIT (UL(1) << 3)
+#define HCRX_EL2_EnASR_BIT (UL(1) << 2)
+#define HCRX_EL2_EnALS_BIT (UL(1) << 1)
+#define HCRX_EL2_EnAS0_BIT (UL(1) << 0)
+
+/*******************************************************************************
* Definitions for DynamicIQ Shared Unit registers
******************************************************************************/
#define CLUSTERPWRDN_EL1 S3_0_c15_c3_6
@@ -1133,4 +1241,16 @@
#define DSU_CLUSTER_PWR_ON 1
#define DSU_CLUSTER_PWR_MASK U(1)
+/*******************************************************************************
+ * Definitions for CPU Power/Performance Management registers
+ ******************************************************************************/
+
+#define CPUPPMCR_EL3 S3_6_C15_C2_0
+#define CPUPPMCR_EL3_MPMMPINCTL_SHIFT UINT64_C(0)
+#define CPUPPMCR_EL3_MPMMPINCTL_MASK UINT64_C(0x1)
+
+#define CPUMPMMCR_EL3 S3_6_C15_C2_1
+#define CPUMPMMCR_EL3_MPMM_EN_SHIFT UINT64_C(0)
+#define CPUMPMMCR_EL3_MPMM_EN_MASK UINT64_C(0x1)
+
#endif /* ARCH_H */
diff --git a/include/arch/aarch64/arch_features.h b/include/arch/aarch64/arch_features.h
index dc0b7f306..46cd1c982 100644
--- a/include/arch/aarch64/arch_features.h
+++ b/include/arch/aarch64/arch_features.h
@@ -117,4 +117,21 @@ static inline unsigned int get_mpam_version(void)
ID_AA64PFR1_MPAM_FRAC_SHIFT) & ID_AA64PFR1_MPAM_FRAC_MASK));
}
+static inline bool is_feat_hcx_present(void)
+{
+ return (((read_id_aa64mmfr1_el1() >> ID_AA64MMFR1_EL1_HCX_SHIFT) &
+ ID_AA64MMFR1_EL1_HCX_MASK) == ID_AA64MMFR1_EL1_HCX_SUPPORTED);
+}
+
+static inline unsigned int get_armv9_2_feat_rme_support(void)
+{
+ /*
+ * Return the RME version, zero if not supported. This function can be
+ * used as both an integer value for the RME version or compared to zero
+ * to detect RME presence.
+ */
+ return (unsigned int)(read_id_aa64pfr0_el1() >>
+ ID_AA64PFR0_FEAT_RME_SHIFT) & ID_AA64PFR0_FEAT_RME_MASK;
+}
+
#endif /* ARCH_FEATURES_H */
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
index a41b3258e..733bb23c4 100644
--- a/include/arch/aarch64/arch_helpers.h
+++ b/include/arch/aarch64/arch_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -233,8 +233,10 @@ void dcsw_op_all(u_register_t op_type);
void disable_mmu_el1(void);
void disable_mmu_el3(void);
+void disable_mpu_el2(void);
void disable_mmu_icache_el1(void);
void disable_mmu_icache_el3(void);
+void disable_mpu_icache_el2(void);
/*******************************************************************************
* Misc. accessor prototypes
@@ -440,6 +442,8 @@ DEFINE_SYSREG_RW_FUNCS(cntp_cval_el0)
DEFINE_SYSREG_READ_FUNC(cntpct_el0)
DEFINE_SYSREG_RW_FUNCS(cnthctl_el2)
+DEFINE_SYSREG_RW_FUNCS(vtcr_el2)
+
#define get_cntp_ctl_enable(x) (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
CNTP_CTL_ENABLE_MASK)
#define get_cntp_ctl_imask(x) (((x) >> CNTP_CTL_IMASK_SHIFT) & \
@@ -505,6 +509,9 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3)
DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el2, ZCR_EL2)
+DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64smfr0_el1, ID_AA64SMFR0_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(smcr_el3, SMCR_EL3)
+
DEFINE_RENAME_SYSREG_READ_FUNC(erridr_el1, ERRIDR_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(errselr_el1, ERRSELR_EL1)
@@ -532,9 +539,20 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(gcr_el1, GCR_EL1)
DEFINE_SYSREG_READ_FUNC(rndr)
DEFINE_SYSREG_READ_FUNC(rndrrs)
+/* FEAT_HCX Register */
+DEFINE_RENAME_SYSREG_RW_FUNCS(hcrx_el2, HCRX_EL2)
+
/* DynamIQ Shared Unit power management */
DEFINE_RENAME_SYSREG_RW_FUNCS(clusterpwrdn_el1, CLUSTERPWRDN_EL1)
+/* CPU Power/Performance Management registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(cpuppmcr_el3, CPUPPMCR_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(cpumpmmcr_el3, CPUMPMMCR_EL3)
+
+/* Armv9.2 RME Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(gptbr_el3, GPTBR_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(gpccr_el3, GPCCR_EL3)
+
#define IS_IN_EL(x) \
(GET_EL(read_CurrentEl()) == MODE_EL##x)
@@ -578,7 +596,28 @@ static inline uint64_t el_implemented(unsigned int el)
}
}
-/* Previously defined accesor functions with incomplete register names */
+/*
+ * TLBIPAALLOS instruction
+ * (TLB Inivalidate GPT Information by PA,
+ * All Entries, Outer Shareable)
+ */
+static inline void tlbipaallos(void)
+{
+ __asm__("SYS #6,c8,c1,#4");
+}
+
+/*
+ * Invalidate cached copies of GPT entries
+ * from TLBs by physical address
+ *
+ * @pa: the starting address for the range
+ * of invalidation
+ * @size: size of the range of invalidation
+ */
+void gpt_tlbi_by_pa(uint64_t pa, size_t size);
+
+
+/* Previously defined accessor functions with incomplete register names */
#define read_current_el() read_CurrentEl()
diff --git a/include/arch/aarch64/el2_common_macros.S b/include/arch/aarch64/el2_common_macros.S
new file mode 100644
index 000000000..7bf480698
--- /dev/null
+++ b/include/arch/aarch64/el2_common_macros.S
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef EL2_COMMON_MACROS_S
+#define EL2_COMMON_MACROS_S
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+#include <platform_def.h>
+
+ /*
+ * Helper macro to initialise system registers at EL2.
+ */
+ .macro el2_arch_init_common
+
+ /* ---------------------------------------------------------------------
+ * SCTLR_EL2 has already been initialised - read current value before
+ * modifying.
+ *
+ * SCTLR_EL2.I: Enable the instruction cache.
+ *
+ * SCTLR_EL2.SA: Enable Stack Alignment check. A SP alignment fault
+ * exception is generated if a load or store instruction executed at
+ * EL2 uses the SP as the base address and the SP is not aligned to a
+ * 16-byte boundary.
+ *
+ * SCTLR_EL2.A: Enable Alignment fault checking. All instructions that
+ * load or store one or more registers have an alignment check that the
+ * address being accessed is aligned to the size of the data element(s)
+ * being accessed.
+ * ---------------------------------------------------------------------
+ */
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ mrs x0, sctlr_el2
+ orr x0, x0, x1
+ msr sctlr_el2, x0
+ isb
+
+ /* ---------------------------------------------------------------------
+ * Initialise HCR_EL2, setting all fields rather than relying on HW.
+ * All fields are architecturally UNKNOWN on reset. The following fields
+ * do not change during the TF lifetime. The remaining fields are set to
+ * zero here but are updated ahead of transitioning to a lower EL in the
+ * function cm_init_context_common().
+ *
+ * HCR_EL2.TWE: Set to zero so that execution of WFE instructions at
+ * EL2, EL1 and EL0 are not trapped to EL2.
+ *
+ * HCR_EL2.TWI: Set to zero so that execution of WFI instructions at
+ * EL2, EL1 and EL0 are not trapped to EL2.
+ *
+ * HCR_EL2.HCD: Set to zero to enable HVC calls at EL1 and above,
+ * from both Security states and both Execution states.
+ *
+ * HCR_EL2.TEA: Set to one to route External Aborts and SError
+ * Interrupts to EL2 when executing at any EL.
+ *
+ * HCR_EL2.{API,APK}: For Armv8.3 pointer authentication feature,
+ * disable traps to EL2 when accessing key registers or using
+ * pointer authentication instructions from lower ELs.
+ * ---------------------------------------------------------------------
+ */
+ mov_imm x0, ((HCR_RESET_VAL | HCR_TEA_BIT) \
+ & ~(HCR_TWE_BIT | HCR_TWI_BIT | HCR_HCD_BIT))
+#if CTX_INCLUDE_PAUTH_REGS
+ /*
+ * If the pointer authentication registers are saved during world
+ * switches, enable pointer authentication everywhere, as it is safe to
+ * do so.
+ */
+ orr x0, x0, #(HCR_API_BIT | HCR_APK_BIT)
+#endif /* CTX_INCLUDE_PAUTH_REGS */
+ msr hcr_el2, x0
+
+ /* ---------------------------------------------------------------------
+ * Initialise MDCR_EL2, setting all fields rather than relying on
+ * hw. Some fields are architecturally UNKNOWN on reset.
+ *
+ * MDCR_EL2.TDOSA: Set to zero so that EL2 and EL2 System register
+ * access to the powerdown debug registers do not trap to EL2.
+ *
+ * MDCR_EL2.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
+ * debug registers, other than those registers that are controlled by
+ * MDCR_EL2.TDOSA.
+ *
+ * MDCR_EL2.TPM: Set to zero so that EL0, EL1, and EL2 System
+ * register accesses to all Performance Monitors registers do not trap
+ * to EL2.
+ *
+ * MDCR_EL2.HPMD: Set to zero so that event counting by the program-
+ * mable counters PMEVCNTR<n>_EL0 is prohibited in Secure state. If
+ * ARMv8.2 Debug is not implemented this bit does not have any effect
+ * on the counters unless there is support for the implementation
+ * defined authentication interface
+ * ExternalSecureNoninvasiveDebugEnabled().
+ * ---------------------------------------------------------------------
+ */
+ mov_imm x0, ((MDCR_EL2_RESET_VAL | \
+ MDCR_SPD32(MDCR_SPD32_DISABLE)) \
+ & ~(MDCR_EL2_HPMD | MDCR_TDOSA_BIT | \
+ MDCR_TDA_BIT | MDCR_TPM_BIT))
+
+ msr mdcr_el2, x0
+
+ /* ---------------------------------------------------------------------
+ * Initialise PMCR_EL0 setting all fields rather than relying
+ * on hw. Some fields are architecturally UNKNOWN on reset.
+ *
+ * PMCR_EL0.DP: Set to one so that the cycle counter,
+ * PMCCNTR_EL0 does not count when event counting is prohibited.
+ *
+ * PMCR_EL0.X: Set to zero to disable export of events.
+ *
+ * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
+ * counts on every clock cycle.
+ * ---------------------------------------------------------------------
+ */
+ mov_imm x0, ((PMCR_EL0_RESET_VAL | PMCR_EL0_DP_BIT) & \
+ ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT))
+
+ msr pmcr_el0, x0
+
+ /* ---------------------------------------------------------------------
+ * Enable External Aborts and SError Interrupts now that the exception
+ * vectors have been setup.
+ * ---------------------------------------------------------------------
+ */
+ msr daifclr, #DAIF_ABT_BIT
+
+ /* ---------------------------------------------------------------------
+ * Initialise CPTR_EL2, setting all fields rather than relying on hw.
+ * All fields are architecturally UNKNOWN on reset.
+ *
+ * CPTR_EL2.TCPAC: Set to zero so that any accesses to CPACR_EL1 do
+ * not trap to EL2.
+ *
+ * CPTR_EL2.TTA: Set to zero so that System register accesses to the
+ * trace registers do not trap to EL2.
+ *
+ * CPTR_EL2.TFP: Set to zero so that accesses to the V- or Z- registers
+ * by Advanced SIMD, floating-point or SVE instructions (if implemented)
+ * do not trap to EL2.
+ */
+
+ mov_imm x0, (CPTR_EL2_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
+ msr cptr_el2, x0
+
+ /*
+ * If Data Independent Timing (DIT) functionality is implemented,
+ * always enable DIT in EL2
+ */
+ mrs x0, id_aa64pfr0_el1
+ ubfx x0, x0, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH
+ cmp x0, #ID_AA64PFR0_DIT_SUPPORTED
+ bne 1f
+ mov x0, #DIT_BIT
+ msr DIT, x0
+1:
+ .endm
+
+/* -----------------------------------------------------------------------------
+ * This is the super set of actions that need to be performed during a cold boot
+ * or a warm boot in EL2. This code is shared by BL1 and BL31.
+ *
+ * This macro will always perform reset handling, architectural initialisations
+ * and stack setup. The rest of the actions are optional because they might not
+ * be needed, depending on the context in which this macro is called. This is
+ * why this macro is parameterised ; each parameter allows to enable/disable
+ * some actions.
+ *
+ * _init_sctlr:
+ * Whether the macro needs to initialise SCTLR_EL2, including configuring
+ * the endianness of data accesses.
+ *
+ * _warm_boot_mailbox:
+ * Whether the macro needs to detect the type of boot (cold/warm). The
+ * detection is based on the platform entrypoint address : if it is zero
+ * then it is a cold boot, otherwise it is a warm boot. In the latter case,
+ * this macro jumps on the platform entrypoint address.
+ *
+ * _secondary_cold_boot:
+ * Whether the macro needs to identify the CPU that is calling it: primary
+ * CPU or secondary CPU. The primary CPU will be allowed to carry on with
+ * the platform initialisations, while the secondaries will be put in a
+ * platform-specific state in the meantime.
+ *
+ * If the caller knows this macro will only be called by the primary CPU
+ * then this parameter can be defined to 0 to skip this step.
+ *
+ * _init_memory:
+ * Whether the macro needs to initialise the memory.
+ *
+ * _init_c_runtime:
+ * Whether the macro needs to initialise the C runtime environment.
+ *
+ * _exception_vectors:
+ * Address of the exception vectors to program in the VBAR_EL2 register.
+ *
+ * _pie_fixup_size:
+ * Size of memory region to fixup Global Descriptor Table (GDT).
+ *
+ * A non-zero value is expected when firmware needs GDT to be fixed-up.
+ *
+ * -----------------------------------------------------------------------------
+ */
+ .macro el2_entrypoint_common \
+ _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot, \
+ _init_memory, _init_c_runtime, _exception_vectors, \
+ _pie_fixup_size
+
+ .if \_init_sctlr
+ /* -------------------------------------------------------------
+ * This is the initialisation of SCTLR_EL2 and so must ensure
+ * that all fields are explicitly set rather than relying on hw.
+ * Some fields reset to an IMPLEMENTATION DEFINED value and
+ * others are architecturally UNKNOWN on reset.
+ *
+ * SCTLR.EE: Set the CPU endianness before doing anything that
+ * might involve memory reads or writes. Set to zero to select
+ * Little Endian.
+ *
+ * SCTLR_EL2.WXN: For the EL2 translation regime, this field can
+ * force all memory regions that are writeable to be treated as
+ * XN (Execute-never). Set to zero so that this control has no
+ * effect on memory access permissions.
+ *
+ * SCTLR_EL2.SA: Set to zero to disable Stack Alignment check.
+ *
+ * SCTLR_EL2.A: Set to zero to disable Alignment fault checking.
+ *
+ * SCTLR.DSSBS: Set to zero to disable speculation store bypass
+ * safe behaviour upon exception entry to EL2.
+ * -------------------------------------------------------------
+ */
+ mov_imm x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
+ | SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
+ msr sctlr_el2, x0
+ isb
+ .endif /* _init_sctlr */
+
+#if DISABLE_MTPMU
+ bl mtpmu_disable
+#endif
+
+ .if \_warm_boot_mailbox
+ /* -------------------------------------------------------------
+ * This code will be executed for both warm and cold resets.
+ * Now is the time to distinguish between the two.
+ * Query the platform entrypoint address and if it is not zero
+ * then it means it is a warm boot so jump to this address.
+ * -------------------------------------------------------------
+ */
+ bl plat_get_my_entrypoint
+ cbz x0, do_cold_boot
+ br x0
+
+ do_cold_boot:
+ .endif /* _warm_boot_mailbox */
+
+ .if \_pie_fixup_size
+#if ENABLE_PIE
+ /*
+ * ------------------------------------------------------------
+ * If PIE is enabled fixup the Global descriptor Table only
+ * once during primary core cold boot path.
+ *
+ * Compile time base address, required for fixup, is calculated
+ * using "pie_fixup" label present within first page.
+ * ------------------------------------------------------------
+ */
+ pie_fixup:
+ ldr x0, =pie_fixup
+ and x0, x0, #~(PAGE_SIZE_MASK)
+ mov_imm x1, \_pie_fixup_size
+ add x1, x1, x0
+ bl fixup_gdt_reloc
+#endif /* ENABLE_PIE */
+ .endif /* _pie_fixup_size */
+
+ /* ---------------------------------------------------------------------
+ * Set the exception vectors.
+ * ---------------------------------------------------------------------
+ */
+ adr x0, \_exception_vectors
+ msr vbar_el2, x0
+ isb
+
+ /* ---------------------------------------------------------------------
+ * It is a cold boot.
+ * Perform any processor specific actions upon reset e.g. cache, TLB
+ * invalidations etc.
+ * ---------------------------------------------------------------------
+ */
+ bl reset_handler
+
+ el2_arch_init_common
+
+ .if \_secondary_cold_boot
+ /* -------------------------------------------------------------
+ * Check if this is a primary or secondary CPU cold boot.
+ * The primary CPU will set up the platform while the
+ * secondaries are placed in a platform-specific state until the
+ * primary CPU performs the necessary actions to bring them out
+ * of that state and allows entry into the OS.
+ * -------------------------------------------------------------
+ */
+ bl plat_is_my_cpu_primary
+ cbnz w0, do_primary_cold_boot
+
+ /* This is a cold boot on a secondary CPU */
+ bl plat_secondary_cold_boot_setup
+ /* plat_secondary_cold_boot_setup() is not supposed to return */
+ bl el2_panic
+ do_primary_cold_boot:
+ .endif /* _secondary_cold_boot */
+
+ /* ---------------------------------------------------------------------
+ * Initialize memory now. Secondary CPU initialization won't get to this
+ * point.
+ * ---------------------------------------------------------------------
+ */
+
+ .if \_init_memory
+ bl platform_mem_init
+ .endif /* _init_memory */
+
+ /* ---------------------------------------------------------------------
+ * Init C runtime environment:
+ * - Zero-initialise the NOBITS sections. There are 2 of them:
+ * - the .bss section;
+ * - the coherent memory section (if any).
+ * - Relocate the data section from ROM to RAM, if required.
+ * ---------------------------------------------------------------------
+ */
+ .if \_init_c_runtime
+ adrp x0, __BSS_START__
+ add x0, x0, :lo12:__BSS_START__
+
+ adrp x1, __BSS_END__
+ add x1, x1, :lo12:__BSS_END__
+ sub x1, x1, x0
+ bl zeromem
+
+#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
+ adrp x0, __DATA_RAM_START__
+ add x0, x0, :lo12:__DATA_RAM_START__
+ adrp x1, __DATA_ROM_START__
+ add x1, x1, :lo12:__DATA_ROM_START__
+ adrp x2, __DATA_RAM_END__
+ add x2, x2, :lo12:__DATA_RAM_END__
+ sub x2, x2, x0
+ bl memcpy16
+#endif
+ .endif /* _init_c_runtime */
+
+ /* ---------------------------------------------------------------------
+ * Use SP_EL0 for the C runtime stack.
+ * ---------------------------------------------------------------------
+ */
+ msr spsel, #0
+
+ /* ---------------------------------------------------------------------
+ * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
+ * the MMU is enabled. There is no risk of reading stale stack memory
+ * after enabling the MMU as only the primary CPU is running at the
+ * moment.
+ * ---------------------------------------------------------------------
+ */
+ bl plat_set_my_stack
+
+#if STACK_PROTECTOR_ENABLED
+ .if \_init_c_runtime
+ bl update_stack_protector_canary
+ .endif /* _init_c_runtime */
+#endif
+ .endm
+
+ .macro apply_at_speculative_wa
+#if ERRATA_SPECULATIVE_AT
+ /*
+ * Explicitly save x30 so as to free up a register and to enable
+ * branching and also, save x29 which will be used in the called
+ * function
+ */
+ stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+ bl save_and_update_ptw_el1_sys_regs
+ ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+#endif
+ .endm
+
+ .macro restore_ptw_el1_sys_regs
+#if ERRATA_SPECULATIVE_AT
+ /* -----------------------------------------------------------
+ * In case of ERRATA_SPECULATIVE_AT, must follow below order
+ * to ensure that page table walk is not enabled until
+ * restoration of all EL1 system registers. TCR_EL1 register
+ * should be updated at the end which restores previous page
+ * table walk setting of stage1 i.e.(TCR_EL1.EPDx) bits. ISB
+ * ensures that CPU does below steps in order.
+ *
+ * 1. Ensure all other system registers are written before
+ * updating SCTLR_EL1 using ISB.
+ * 2. Restore SCTLR_EL1 register.
+ * 3. Ensure SCTLR_EL1 written successfully using ISB.
+ * 4. Restore TCR_EL1 register.
+ * -----------------------------------------------------------
+ */
+ isb
+ ldp x28, x29, [sp, #CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1]
+ msr sctlr_el1, x28
+ isb
+ msr tcr_el1, x29
+#endif
+ .endm
+
+#endif /* EL2_COMMON_MACROS_S */
diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S
index 973433575..f29def7f3 100644
--- a/include/arch/aarch64/el3_common_macros.S
+++ b/include/arch/aarch64/el3_common_macros.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2021, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -88,6 +88,13 @@
*/
orr x0, x0, #(SCR_API_BIT | SCR_APK_BIT)
#endif
+#if ENABLE_RME
+ /*
+ * TODO: Settting the EEL2 bit to allow EL3 access to secure only registers
+ * in context management. This will need to be refactored.
+ */
+ orr x0, x0, #SCR_EEL2_BIT
+#endif
msr scr_el3, x0
/* ---------------------------------------------------------------------
@@ -126,13 +133,31 @@
* Debug is not implemented this bit does not have any effect on the
* counters unless there is support for the implementation defined
* authentication interface ExternalSecureNoninvasiveDebugEnabled().
+ *
+ * MDCR_EL3.NSTB, MDCR_EL3.NSTBE: Set to zero so that Trace Buffer
+ * owning security state is Secure state. If FEAT_TRBE is implemented,
+ * accesses to Trace Buffer control registers at EL2 and EL1 in any
+ * security state generates trap exceptions to EL3.
+ * If FEAT_TRBE is not implemented, these bits are RES0.
+ *
+ * MDCR_EL3.TTRF: Set to one so that access to trace filter control
+ * registers in non-monitor mode generate EL3 trap exception,
+ * unless the access generates a higher priority exception when trace
+ * filter control(FEAT_TRF) is implemented.
+ * When FEAT_TRF is not implemented, this bit is RES0.
* ---------------------------------------------------------------------
*/
mov_imm x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | \
MDCR_SPD32(MDCR_SPD32_DISABLE) | MDCR_SCCD_BIT | \
MDCR_MCCD_BIT) & ~(MDCR_SPME_BIT | MDCR_TDOSA_BIT | \
- MDCR_TDA_BIT | MDCR_TPM_BIT))
+ MDCR_TDA_BIT | MDCR_TPM_BIT | MDCR_NSTB(MDCR_NSTB_EL1) | \
+ MDCR_NSTBE | MDCR_TTRF_BIT))
+ mrs x1, id_aa64dfr0_el1
+ ubfx x1, x1, #ID_AA64DFR0_TRACEFILT_SHIFT, #ID_AA64DFR0_TRACEFILT_LENGTH
+ cbz x1, 1f
+ orr x0, x0, #MDCR_TTRF_BIT
+1:
msr mdcr_el3, x0
/* ---------------------------------------------------------------------
@@ -179,6 +204,12 @@
* CPTR_EL3.TCPAC: Set to zero so that any accesses to CPACR_EL1,
* CPTR_EL2, CPACR, or HCPTR do not trap to EL3.
*
+ * CPTR_EL3.TTA: Set to one so that accesses to the trace system
+ * registers trap to EL3 from all exception levels and security
+ * states when system register trace is implemented.
+ * When system register trace is not implemented, this bit is RES0 and
+ * hence set to zero.
+ *
* CPTR_EL3.TTA: Set to zero so that System register accesses to the
* trace registers do not trap to EL3.
*
@@ -191,9 +222,17 @@
*
* CPTR_EL3.EZ: Set to zero so that all SVE functionality is trapped
* to EL3 by default.
+ *
+ * CPTR_EL3.ESM: Set to zero so that all SME functionality is trapped
+ * to EL3 by default.
*/
mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
+ mrs x1, id_aa64dfr0_el1
+ ubfx x1, x1, #ID_AA64DFR0_TRACEVER_SHIFT, #ID_AA64DFR0_TRACEVER_LENGTH
+ cbz x1, 1f
+ orr x0, x0, #TTA_BIT
+1:
msr cptr_el3, x0
/*
@@ -336,6 +375,7 @@
msr vbar_el3, x0
isb
+#if !(defined(IMAGE_BL2) && ENABLE_RME)
/* ---------------------------------------------------------------------
* It is a cold boot.
* Perform any processor specific actions upon reset e.g. cache, TLB
@@ -343,6 +383,7 @@
* ---------------------------------------------------------------------
*/
bl reset_handler
+#endif
el3_arch_init_common
@@ -385,17 +426,31 @@
* ---------------------------------------------------------------------
*/
.if \_init_c_runtime
-#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_INV_DCACHE)
+#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \
+ ((BL2_AT_EL3 && BL2_INV_DCACHE) || ENABLE_RME))
/* -------------------------------------------------------------
* Invalidate the RW memory used by the BL31 image. This
* includes the data and NOBITS sections. This is done to
* safeguard against possible corruption of this memory by
* dirty cache lines in a system cache as a result of use by
- * an earlier boot loader stage.
+ * an earlier boot loader stage. If PIE is enabled however,
+ * RO sections including the GOT may be modified during
+ * pie fixup. Therefore, to be on the safe side, invalidate
+ * the entire image region if PIE is enabled.
* -------------------------------------------------------------
*/
+#if ENABLE_PIE
+#if SEPARATE_CODE_AND_RODATA
+ adrp x0, __TEXT_START__
+ add x0, x0, :lo12:__TEXT_START__
+#else
+ adrp x0, __RO_START__
+ add x0, x0, :lo12:__RO_START__
+#endif /* SEPARATE_CODE_AND_RODATA */
+#else
adrp x0, __RW_START__
add x0, x0, :lo12:__RW_START__
+#endif /* ENABLE_PIE */
adrp x1, __RW_END__
add x1, x1, :lo12:__RW_END__
sub x1, x1, x0