diff options
author | Treehugger Robot <treehugger-gerrit@google.com> | 2021-11-05 21:40:44 +0000 |
---|---|---|
committer | Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com> | 2021-11-05 21:40:44 +0000 |
commit | d2ebd507612c6d0e62ab68cc89a76b29d47a4394 (patch) | |
tree | 48f7e7e1c9d2b0913361c2f1a922b3a36b97aa0b /lib | |
parent | 368a21e4a975659615c571a918bb9cb3a7c01c46 (diff) | |
parent | f4dc25e77d057de867462e9a5a14548b1164545f (diff) | |
download | arm-trusted-firmware-android13-qpr1-s3-release.tar.gz |
Merge "Merge remote-tracking branch 'aosp/upstream-master' into HEAD" am: 340853027b am: 0e7ad30895 am: 4c63dcff82 am: 8a51068b5f am: f4dc25e77dt_frc_odp_330442040t_frc_odp_330442000t_frc_con_330443020t_frc_cbr_330443000t_frc_ase_330444010t_frc_art_330443060t_frc_adb_330444000android-13.0.0_r83android-13.0.0_r82android-13.0.0_r81android-13.0.0_r80android-13.0.0_r79android-13.0.0_r78android-13.0.0_r77android-13.0.0_r76android-13.0.0_r75android-13.0.0_r74android-13.0.0_r73android-13.0.0_r72android-13.0.0_r71android-13.0.0_r70android-13.0.0_r69android-13.0.0_r68android-13.0.0_r67android-13.0.0_r66android-13.0.0_r65android-13.0.0_r64android-13.0.0_r63android-13.0.0_r62android-13.0.0_r61android-13.0.0_r60android-13.0.0_r59android-13.0.0_r58android-13.0.0_r57android-13.0.0_r56android-13.0.0_r55android-13.0.0_r54android-13.0.0_r53android-13.0.0_r52android-13.0.0_r51android-13.0.0_r50android-13.0.0_r49android-13.0.0_r48android-13.0.0_r47android-13.0.0_r46android-13.0.0_r45android-13.0.0_r44android-13.0.0_r43android-13.0.0_r42android-13.0.0_r41android-13.0.0_r40android-13.0.0_r39android-13.0.0_r38android-13.0.0_r37android-13.0.0_r36android-13.0.0_r35android-13.0.0_r34android-13.0.0_r33android-13.0.0_r32android-13.0.0_r30android-13.0.0_r29android-13.0.0_r28android-13.0.0_r27android-13.0.0_r24android-13.0.0_r23android-13.0.0_r22android-13.0.0_r21android-13.0.0_r20android-13.0.0_r19android-13.0.0_r18android-13.0.0_r17android-13.0.0_r16aml_uwb_331910010aml_uwb_331820070aml_uwb_331613010aml_uwb_331611010aml_uwb_331410010aml_uwb_331310030aml_uwb_331115000aml_uwb_331015040aml_uwb_330810010aml_tz4_332714070aml_tz4_332714050aml_tz4_332714010aml_tz4_331910000aml_tz4_331314030aml_tz4_331314020aml_tz4_331314010aml_tz4_331012050aml_tz4_331012040aml_tz4_331012000aml_go_wif_330911000aml_go_uwb_330912000aml_go_tz4_330912000aml_go_tet_330914010aml_go_swc_330913000aml_go_sta_330911000aml_go_sdk_330810000aml_go_sch_330911000aml_go_res_330912000aml_go_per_330912000aml_go_odp_330913000aml_go_odp_330912000aml_go_neu_330912000aml_go_net_330913000aml_go_mpr_330912000aml_go_ase_330913000aml_go_ads_330915100aml_go_ads_330915000aml_go_ads_330913000aml_go_adb_330913000aml_ase_331311020aml_ase_331112000aml_ase_331011020aml_ads_331920180aml_ads_331814200aml_ads_331710270aml_ads_331611190aml_ads_331511020aml_ads_331418080aml_ads_331131000android13-qpr3-s9-releaseandroid13-qpr3-s8-releaseandroid13-qpr3-s7-releaseandroid13-qpr3-s6-releaseandroid13-qpr3-s5-releaseandroid13-qpr3-s4-releaseandroid13-qpr3-s3-releaseandroid13-qpr3-s2-releaseandroid13-qpr3-s14-releaseandroid13-qpr3-s13-releaseandroid13-qpr3-s12-releaseandroid13-qpr3-s11-releaseandroid13-qpr3-s10-releaseandroid13-qpr3-s1-releaseandroid13-qpr3-releaseandroid13-qpr3-c-s8-releaseandroid13-qpr3-c-s7-releaseandroid13-qpr3-c-s6-releaseandroid13-qpr3-c-s5-releaseandroid13-qpr3-c-s4-releaseandroid13-qpr3-c-s3-releaseandroid13-qpr3-c-s2-releaseandroid13-qpr3-c-s12-releaseandroid13-qpr3-c-s11-releaseandroid13-qpr3-c-s10-releaseandroid13-qpr3-c-s1-releaseandroid13-qpr2-s9-releaseandroid13-qpr2-s8-releaseandroid13-qpr2-s7-releaseandroid13-qpr2-s6-releaseandroid13-qpr2-s5-releaseandroid13-qpr2-s3-releaseandroid13-qpr2-s2-releaseandroid13-qpr2-s12-releaseandroid13-qpr2-s11-releaseandroid13-qpr2-s10-releaseandroid13-qpr2-s1-releaseandroid13-qpr2-releaseandroid13-qpr2-b-s1-releaseandroid13-qpr1-s8-releaseandroid13-qpr1-s7-releaseandroid13-qpr1-s6-releaseandroid13-qpr1-s5-releaseandroid13-qpr1-s4-releaseandroid13-qpr1-s3-releaseandroid13-qpr1-s2-releaseandroid13-qpr1-s1-releaseandroid13-qpr1-releaseandroid13-mainline-uwb-releaseandroid13-mainline-tzdata4-releaseandroid13-mainline-go-wifi-releaseandroid13-mainline-go-uwb-releaseandroid13-mainline-go-tzdata4-releaseandroid13-mainline-go-tethering-releaseandroid13-mainline-go-sdkext-releaseandroid13-mainline-go-scheduling-releaseandroid13-mainline-go-resolv-releaseandroid13-mainline-go-permission-releaseandroid13-mainline-go-os-statsd-releaseandroid13-mainline-go-odp-releaseandroid13-mainline-go-neuralnetworks-releaseandroid13-mainline-go-networking-releaseandroid13-mainline-go-mediaprovider-releaseandroid13-mainline-go-media-swcodec-releaseandroid13-mainline-go-appsearch-releaseandroid13-mainline-go-adservices-releaseandroid13-mainline-go-adbd-releaseandroid13-mainline-appsearch-releaseandroid13-mainline-adservices-releaseandroid13-frc-odp-releaseandroid13-frc-conscrypt-releaseandroid13-frc-cellbroadcast-releaseandroid13-frc-art-releaseandroid13-frc-adbd-releaseandroid13-devandroid13-d4-s2-releaseandroid13-d4-s1-releaseandroid13-d4-releaseandroid13-d3-s1-releaseandroid13-d2-releaseaml_tz4_332714010
Original change: https://android-review.googlesource.com/c/platform/external/arm-trusted-firmware/+/1824194
Change-Id: I9c0d1e863a9ee0cf8c64a31cb1970e8f279991a1
Diffstat (limited to 'lib')
29 files changed, 2585 insertions, 600 deletions
diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S index e9734ac2c..8b16f93cc 100644 --- a/lib/aarch32/misc_helpers.S +++ b/lib/aarch32/misc_helpers.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -7,6 +7,8 @@ #include <arch.h> #include <asm_macros.S> #include <assert_macros.S> +#include <common/bl_common.h> +#include <lib/xlat_tables/xlat_tables_defs.h> .globl smc .globl zeromem @@ -14,6 +16,9 @@ .globl memcpy4 .globl disable_mmu_icache_secure .globl disable_mmu_secure + .globl fixup_gdt_reloc + +#define PAGE_START_MASK ~(PAGE_SIZE_MASK) func smc /* @@ -187,3 +192,124 @@ func disable_mmu_icache_secure ldr r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT) b do_disable_mmu endfunc disable_mmu_icache_secure + +/* --------------------------------------------------------------------------- + * Helper to fixup Global Descriptor table (GDT) and dynamic relocations + * (.rel.dyn) at runtime. + * + * This function is meant to be used when the firmware is compiled with -fpie + * and linked with -pie options. We rely on the linker script exporting + * appropriate markers for start and end of the section. For GOT, we + * expect __GOT_START__ and __GOT_END__. Similarly for .rela.dyn, we expect + * __RELA_START__ and __RELA_END__. + * + * The function takes the limits of the memory to apply fixups to as + * arguments (which is usually the limits of the relocable BL image). + * r0 - the start of the fixup region + * r1 - the limit of the fixup region + * These addresses have to be 4KB page aligned. + * --------------------------------------------------------------------------- + */ + +/* Relocation codes */ +#define R_ARM_RELATIVE 23 + +func fixup_gdt_reloc + mov r6, r0 + mov r7, r1 + +#if ENABLE_ASSERTIONS + /* Test if the limits are 4K aligned */ + orr r0, r0, r1 + mov r1, #(PAGE_SIZE_MASK) + tst r0, r1 + ASM_ASSERT(eq) +#endif + /* + * Calculate the offset based on return address in lr. + * Assume that this function is called within a page at the start of + * fixup region. + */ + ldr r1, =PAGE_START_MASK + and r2, lr, r1 + subs r0, r2, r6 /* Diff(S) = Current Address - Compiled Address */ + beq 3f /* Diff(S) = 0. No relocation needed */ + + ldr r1, =__GOT_START__ + add r1, r1, r0 + ldr r2, =__GOT_END__ + add r2, r2, r0 + + /* + * GOT is an array of 32_bit addresses which must be fixed up as + * new_addr = old_addr + Diff(S). + * The new_addr is the address currently the binary is executing from + * and old_addr is the address at compile time. + */ +1: ldr r3, [r1] + + /* Skip adding offset if address is < lower limit */ + cmp r3, r6 + blo 2f + + /* Skip adding offset if address is > upper limit */ + cmp r3, r7 + bhi 2f + add r3, r3, r0 + str r3, [r1] + +2: add r1, r1, #4 + cmp r1, r2 + blo 1b + + /* Starting dynamic relocations. Use ldr to get RELA_START and END */ +3: ldr r1, =__RELA_START__ + add r1, r1, r0 + ldr r2, =__RELA_END__ + add r2, r2, r0 + + /* + * According to ELF-32 specification, the RELA data structure is as + * follows: + * typedef struct { + * Elf32_Addr r_offset; + * Elf32_Xword r_info; + * } Elf32_Rela; + * + * r_offset is address of reference + * r_info is symbol index and type of relocation (in this case + * code 23 which corresponds to R_ARM_RELATIVE). + * + * Size of Elf32_Rela structure is 8 bytes. + */ + + /* Skip R_ARM_NONE entry with code 0 */ +1: ldr r3, [r1, #4] + ands r3, r3, #0xff + beq 2f + +#if ENABLE_ASSERTIONS + /* Assert that the relocation type is R_ARM_RELATIVE */ + cmp r3, #R_ARM_RELATIVE + ASM_ASSERT(eq) +#endif + ldr r3, [r1] /* r_offset */ + add r3, r0, r3 /* Diff(S) + r_offset */ + ldr r4, [r3] + + /* Skip adding offset if address is < lower limit */ + cmp r4, r6 + blo 2f + + /* Skip adding offset if address is >= upper limit */ + cmp r4, r7 + bhs 2f + + add r4, r0, r4 + str r4, [r3] + +2: add r1, r1, #8 + cmp r1, r2 + blo 1b + bx lr +endfunc fixup_gdt_reloc diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S index 9b5d787ed..6ed800cbc 100644 --- a/lib/cpus/aarch32/cpu_helpers.S +++ b/lib/cpus/aarch32/cpu_helpers.S @@ -78,6 +78,10 @@ func prepare_cpu_pwr_dwn mov r1, #CPU_PWR_DWN_OPS add r1, r1, r2, lsl #2 ldr r1, [r0, r1] +#if ENABLE_ASSERTIONS + cmp r1, #0 + ASM_ASSERT(ne) +#endif bx r1 endfunc prepare_cpu_pwr_dwn @@ -146,6 +150,10 @@ func get_cpu_ops_ptr /* Subtract the increment and offset to get the cpu-ops pointer */ sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR) +#if ENABLE_ASSERTIONS + cmp r0, #0 + ASM_ASSERT(ne) +#endif error_exit: bx lr endfunc get_cpu_ops_ptr @@ -224,7 +232,15 @@ func print_errata_status * function. If it's non-NULL, jump to the function in turn. */ bl _cpu_data +#if ENABLE_ASSERTIONS + cmp r0, #0 + ASM_ASSERT(ne) +#endif ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] +#if ENABLE_ASSERTIONS + cmp r1, #0 + ASM_ASSERT(ne) +#endif ldr r0, [r1, #CPU_ERRATA_FUNC] cmp r0, #0 beq 1f diff --git a/lib/cpus/aarch64/cortex_klein.S b/lib/cpus/aarch64/cortex_a510.S index d3a8ab481..33103228a 100644 --- a/lib/cpus/aarch64/cortex_klein.S +++ b/lib/cpus/aarch64/cortex_a510.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, ARM Limited. All rights reserved. + * Copyright (c) 2021, ARM Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -7,54 +7,54 @@ #include <arch.h> #include <asm_macros.S> #include <common/bl_common.h> -#include <cortex_klein.h> +#include <cortex_a510.h> #include <cpu_macros.S> #include <plat_macros.S> /* Hardware handled coherency */ #if HW_ASSISTED_COHERENCY == 0 -#error "Cortex Klein must be compiled with HW_ASSISTED_COHERENCY enabled" +#error "Cortex A510 must be compiled with HW_ASSISTED_COHERENCY enabled" #endif /* 64-bit only core */ #if CTX_INCLUDE_AARCH32_REGS == 1 -#error "Cortex Klein supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" +#error "Cortex A510 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif /* ---------------------------------------------------- * HW will do the cache maintenance while powering down * ---------------------------------------------------- */ -func cortex_klein_core_pwr_dwn +func cortex_a510_core_pwr_dwn /* --------------------------------------------------- * Enable CPU power down bit in power control register * --------------------------------------------------- */ - mrs x0, CORTEX_KLEIN_CPUPWRCTLR_EL1 - orr x0, x0, #CORTEX_KLEIN_CPUPWRCTLR_EL1_CORE_PWRDN_BIT - msr CORTEX_KLEIN_CPUPWRCTLR_EL1, x0 + mrs x0, CORTEX_A510_CPUPWRCTLR_EL1 + orr x0, x0, #CORTEX_A510_CPUPWRCTLR_EL1_CORE_PWRDN_BIT + msr CORTEX_A510_CPUPWRCTLR_EL1, x0 isb ret -endfunc cortex_klein_core_pwr_dwn +endfunc cortex_a510_core_pwr_dwn /* - * Errata printing function for Cortex Klein. Must follow AAPCS. + * Errata printing function for Cortex A510. Must follow AAPCS. */ #if REPORT_ERRATA -func cortex_klein_errata_report +func cortex_a510_errata_report ret -endfunc cortex_klein_errata_report +endfunc cortex_a510_errata_report #endif -func cortex_klein_reset_func +func cortex_a510_reset_func /* Disable speculative loads */ msr SSBS, xzr isb ret -endfunc cortex_klein_reset_func +endfunc cortex_a510_reset_func /* --------------------------------------------- - * This function provides Cortex-Klein specific + * This function provides Cortex-A510 specific * register information for crash reporting. * It needs to return with x6 pointing to * a list of register names in ascii and @@ -62,16 +62,16 @@ endfunc cortex_klein_reset_func * reported. * --------------------------------------------- */ -.section .rodata.cortex_klein_regs, "aS" -cortex_klein_regs: /* The ascii list of register names to be reported */ +.section .rodata.cortex_a510_regs, "aS" +cortex_a510_regs: /* The ascii list of register names to be reported */ .asciz "cpuectlr_el1", "" -func cortex_klein_cpu_reg_dump - adr x6, cortex_klein_regs - mrs x8, CORTEX_KLEIN_CPUECTLR_EL1 +func cortex_a510_cpu_reg_dump + adr x6, cortex_a510_regs + mrs x8, CORTEX_A510_CPUECTLR_EL1 ret -endfunc cortex_klein_cpu_reg_dump +endfunc cortex_a510_cpu_reg_dump -declare_cpu_ops cortex_klein, CORTEX_KLEIN_MIDR, \ - cortex_klein_reset_func, \ - cortex_klein_core_pwr_dwn +declare_cpu_ops cortex_a510, CORTEX_A510_MIDR, \ + cortex_a510_reset_func, \ + cortex_a510_core_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S new file mode 100644 index 000000000..75b7647bd --- /dev/null +++ b/lib/cpus/aarch64/cortex_a710.S @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <common/bl_common.h> +#include <cortex_a710.h> +#include <cpu_macros.S> +#include <plat_macros.S> + +/* Hardware handled coherency */ +#if HW_ASSISTED_COHERENCY == 0 +#error "Cortex A710 must be compiled with HW_ASSISTED_COHERENCY enabled" +#endif + +/* 64-bit only core */ +#if CTX_INCLUDE_AARCH32_REGS == 1 +#error "Cortex A710 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" +#endif + +/* -------------------------------------------------- + * Errata Workaround for Cortex-A710 Erratum 1987031. + * This applies to revision r0p0, r1p0 and r2p0 of Cortex-A710. It is still + * open. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_a710_1987031_wa + /* Check revision. */ + mov x17, x30 + bl check_errata_1987031 + cbz x0, 1f + + /* Apply instruction patching sequence */ + ldr x0,=0x6 + msr S3_6_c15_c8_0,x0 + ldr x0,=0xF3A08002 + msr S3_6_c15_c8_2,x0 + ldr x0,=0xFFF0F7FE + msr S3_6_c15_c8_3,x0 + ldr x0,=0x40000001003ff + msr S3_6_c15_c8_1,x0 + ldr x0,=0x7 + msr S3_6_c15_c8_0,x0 + ldr x0,=0xBF200000 + msr S3_6_c15_c8_2,x0 + ldr x0,=0xFFEF0000 + msr S3_6_c15_c8_3,x0 + ldr x0,=0x40000001003f3 + msr S3_6_c15_c8_1,x0 + isb +1: + ret x17 +endfunc errata_a710_1987031_wa + +func check_errata_1987031 + /* Applies to r0p0, r1p0 and r2p0 */ + mov x1, #0x20 + b cpu_rev_var_ls +endfunc check_errata_1987031 + +/* -------------------------------------------------- + * Errata Workaround for Cortex-A710 Erratum 2081180. + * This applies to revision r0p0, r1p0 and r2p0 of Cortex-A710. + * It is still open. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_a710_2081180_wa + /* Check revision. */ + mov x17, x30 + bl check_errata_2081180 + cbz x0, 1f + + /* Apply instruction patching sequence */ + ldr x0,=0x3 + msr S3_6_c15_c8_0,x0 + ldr x0,=0xF3A08002 + msr S3_6_c15_c8_2,x0 + ldr x0,=0xFFF0F7FE + msr S3_6_c15_c8_3,x0 + ldr x0,=0x10002001003FF + msr S3_6_c15_c8_1,x0 + ldr x0,=0x4 + msr S3_6_c15_c8_0,x0 + ldr x0,=0xBF200000 + msr S3_6_c15_c8_2,x0 + ldr x0,=0xFFEF0000 + msr S3_6_c15_c8_3,x0 + ldr x0,=0x10002001003F3 + msr S3_6_c15_c8_1,x0 + isb +1: + ret x17 +endfunc errata_a710_2081180_wa + +func check_errata_2081180 + /* Applies to r0p0, r1p0 and r2p0 */ + mov x1, #0x20 + b cpu_rev_var_ls +endfunc check_errata_2081180 + +/* --------------------------------------------------------------------- + * Errata Workaround for Cortex-A710 Erratum 2055002. + * This applies to revision r1p0, r2p0 of Cortex-A710 and is still open. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * --------------------------------------------------------------------- + */ +func errata_a710_2055002_wa + /* Compare x0 against revision r2p0 */ + mov x17, x30 + bl check_errata_2055002 + cbz x0, 1f + mrs x1, CORTEX_A710_CPUACTLR_EL1 + orr x1, x1, CORTEX_A710_CPUACTLR_EL1_BIT_46 + msr CORTEX_A710_CPUACTLR_EL1, x1 +1: + ret x17 +endfunc errata_a710_2055002_wa + +func check_errata_2055002 + /* Applies to r1p0, r2p0 */ + mov x1, #0x20 + b cpu_rev_var_ls +endfunc check_errata_2055002 + +/* ------------------------------------------------------------- + * Errata Workaround for Cortex-A710 Erratum 2017096. + * This applies to revisions r0p0, r1p0 and r2p0 of Cortex-A710. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * ------------------------------------------------------------- + */ +func errata_a710_2017096_wa + /* Compare x0 against revision r0p0 to r2p0 */ + mov x17, x30 + bl check_errata_2017096 + cbz x0, 1f + mrs x1, CORTEX_A710_CPUECTLR_EL1 + orr x1, x1, CORTEX_A710_CPUECTLR_EL1_PFSTIDIS_BIT + msr CORTEX_A710_CPUECTLR_EL1, x1 + +1: + ret x17 +endfunc errata_a710_2017096_wa + +func check_errata_2017096 + /* Applies to r0p0, r1p0, r2p0 */ + mov x1, #0x20 + b cpu_rev_var_ls +endfunc check_errata_2017096 + + /* ---------------------------------------------------- + * HW will do the cache maintenance while powering down + * ---------------------------------------------------- + */ +func cortex_a710_core_pwr_dwn + /* --------------------------------------------------- + * Enable CPU power down bit in power control register + * --------------------------------------------------- + */ + mrs x0, CORTEX_A710_CPUPWRCTLR_EL1 + orr x0, x0, #CORTEX_A710_CPUPWRCTLR_EL1_CORE_PWRDN_BIT + msr CORTEX_A710_CPUPWRCTLR_EL1, x0 + isb + ret +endfunc cortex_a710_core_pwr_dwn + +#if REPORT_ERRATA + /* + * Errata printing function for Cortex-A710. Must follow AAPCS. + */ +func cortex_a710_errata_report + stp x8, x30, [sp, #-16]! + + bl cpu_get_rev_var + mov x8, x0 + + /* + * Report all errata. The revision-variant information is passed to + * checking functions of each errata. + */ + report_errata ERRATA_A710_1987031, cortex_a710, 1987031 + report_errata ERRATA_A710_2081180, cortex_a710, 2081180 + report_errata ERRATA_A710_2055002, cortex_a710, 2055002 + report_errata ERRATA_A710_2017096, cortex_a710, 2017096 + + ldp x8, x30, [sp], #16 + ret +endfunc cortex_a710_errata_report +#endif + +func cortex_a710_reset_func + mov x19, x30 + + /* Disable speculative loads */ + msr SSBS, xzr + + bl cpu_get_rev_var + mov x18, x0 + +#if ERRATA_A710_1987031 + mov x0, x18 + bl errata_a710_1987031_wa +#endif + +#if ERRATA_A710_2081180 + mov x0, x18 + bl errata_a710_2081180_wa +#endif + +#if ERRATA_A710_2055002 + mov x0, x18 + bl errata_a710_2055002_wa +#endif + +#if ERRATA_A710_2017096 + mov x0, x18 + bl errata_a710_2017096_wa +#endif + isb + ret x19 +endfunc cortex_a710_reset_func + + /* --------------------------------------------- + * This function provides Cortex-A710 specific + * register information for crash reporting. + * It needs to return with x6 pointing to + * a list of register names in ascii and + * x8 - x15 having values of registers to be + * reported. + * --------------------------------------------- + */ +.section .rodata.cortex_a710_regs, "aS" +cortex_a710_regs: /* The ascii list of register names to be reported */ + .asciz "cpuectlr_el1", "" + +func cortex_a710_cpu_reg_dump + adr x6, cortex_a710_regs + mrs x8, CORTEX_A710_CPUECTLR_EL1 + ret +endfunc cortex_a710_cpu_reg_dump + +declare_cpu_ops cortex_a710, CORTEX_A710_MIDR, \ + cortex_a710_reset_func, \ + cortex_a710_core_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S index e3a6f5fbf..8c8f4d3e9 100644 --- a/lib/cpus/aarch64/cortex_a77.S +++ b/lib/cpus/aarch64/cortex_a77.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -114,6 +114,86 @@ func check_errata_1925769 b cpu_rev_var_ls endfunc check_errata_1925769 + /* -------------------------------------------------- + * Errata Workaround for Cortex A77 Errata #1946167. + * This applies to revision <= r1p1 of Cortex A77. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_a77_1946167_wa + /* Compare x0 against revision <= r1p1 */ + mov x17, x30 + bl check_errata_1946167 + cbz x0, 1f + + ldr x0,=0x4 + msr CORTEX_A77_CPUPSELR_EL3,x0 + ldr x0,=0x10E3900002 + msr CORTEX_A77_CPUPOR_EL3,x0 + ldr x0,=0x10FFF00083 + msr CORTEX_A77_CPUPMR_EL3,x0 + ldr x0,=0x2001003FF + msr CORTEX_A77_CPUPCR_EL3,x0 + + ldr x0,=0x5 + msr CORTEX_A77_CPUPSELR_EL3,x0 + ldr x0,=0x10E3800082 + msr CORTEX_A77_CPUPOR_EL3,x0 + ldr x0,=0x10FFF00083 + msr CORTEX_A77_CPUPMR_EL3,x0 + ldr x0,=0x2001003FF + msr CORTEX_A77_CPUPCR_EL3,x0 + + ldr x0,=0x6 + msr CORTEX_A77_CPUPSELR_EL3,x0 + ldr x0,=0x10E3800200 + msr CORTEX_A77_CPUPOR_EL3,x0 + ldr x0,=0x10FFF003E0 + msr CORTEX_A77_CPUPMR_EL3,x0 + ldr x0,=0x2001003FF + msr CORTEX_A77_CPUPCR_EL3,x0 + + isb +1: + ret x17 +endfunc errata_a77_1946167_wa + +func check_errata_1946167 + /* Applies to everything <= r1p1 */ + mov x1, #0x11 + b cpu_rev_var_ls +endfunc check_errata_1946167 + + /* -------------------------------------------------- + * Errata Workaround for Cortex A77 Errata #1791578. + * This applies to revisions r0p0, r1p0, and r1p1 and is still open. + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_a77_1791578_wa + /* Check workaround compatibility. */ + mov x17, x30 + bl check_errata_1791578 + cbz x0, 1f + + /* Set bit 2 in ACTLR2_EL1 */ + mrs x1, CORTEX_A77_ACTLR2_EL1 + orr x1, x1, #CORTEX_A77_ACTLR2_EL1_BIT_2 + msr CORTEX_A77_ACTLR2_EL1, x1 + isb +1: + ret x17 +endfunc errata_a77_1791578_wa + +func check_errata_1791578 + /* Applies to r0p0, r1p0, and r1p1 right now */ + mov x1, #0x11 + b cpu_rev_var_ls +endfunc check_errata_1791578 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A77. * Shall clobber: x0-x19 @@ -134,6 +214,16 @@ func cortex_a77_reset_func bl errata_a77_1925769_wa #endif +#if ERRATA_A77_1946167 + mov x0, x18 + bl errata_a77_1946167_wa +#endif + +#if ERRATA_A77_1791578 + mov x0, x18 + bl errata_a77_1791578_wa +#endif + ret x19 endfunc cortex_a77_reset_func @@ -169,6 +259,8 @@ func cortex_a77_errata_report */ report_errata ERRATA_A77_1508412, cortex_a77, 1508412 report_errata ERRATA_A77_1925769, cortex_a77, 1925769 + report_errata ERRATA_A77_1946167, cortex_a77, 1946167 + report_errata ERRATA_A77_1791578, cortex_a77, 1791578 ldp x8, x30, [sp], #16 ret diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S index f61726b46..3a74571f0 100644 --- a/lib/cpus/aarch64/cortex_a78.S +++ b/lib/cpus/aarch64/cortex_a78.S @@ -44,13 +44,13 @@ func check_errata_1688305 b cpu_rev_var_ls endfunc check_errata_1688305 - /* -------------------------------------------------- - * Errata Workaround for Cortex A78 Errata #1941498. - * This applies to revisions r0p0, r1p0, and r1p1. - * x0: variant[4:7] and revision[0:3] of current cpu. - * Shall clobber: x0-x17 - * -------------------------------------------------- - */ +/* -------------------------------------------------- + * Errata Workaround for Cortex A78 Errata #1941498. + * This applies to revisions r0p0, r1p0, and r1p1. + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ func errata_a78_1941498_wa /* Compare x0 against revision <= r1p1 */ mov x17, x30 @@ -72,16 +72,16 @@ func check_errata_1941498 b cpu_rev_var_ls endfunc check_errata_1941498 - /* -------------------------------------------------- - * Errata Workaround for A78 Erratum 1951500. - * This applies to revisions r1p0 and r1p1 of A78. - * The issue also exists in r0p0 but there is no fix - * in that revision. - * Inputs: - * x0: variant[4:7] and revision[0:3] of current cpu. - * Shall clobber: x0-x17 - * -------------------------------------------------- - */ +/* -------------------------------------------------- + * Errata Workaround for A78 Erratum 1951500. + * This applies to revisions r1p0 and r1p1 of A78. + * The issue also exists in r0p0 but there is no fix + * in that revision. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ func errata_a78_1951500_wa /* Compare x0 against revisions r1p0 - r1p1 */ mov x17, x30 @@ -126,6 +126,78 @@ func check_errata_1951500 b cpu_rev_var_range endfunc check_errata_1951500 +/* -------------------------------------------------- + * Errata Workaround for Cortex A78 Errata #1821534. + * This applies to revisions r0p0 and r1p0. + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_a78_1821534_wa + /* Check revision. */ + mov x17, x30 + bl check_errata_1821534 + cbz x0, 1f + + /* Set bit 2 in ACTLR2_EL1 */ + mrs x1, CORTEX_A78_ACTLR2_EL1 + orr x1, x1, #CORTEX_A78_ACTLR2_EL1_BIT_2 + msr CORTEX_A78_ACTLR2_EL1, x1 + isb +1: + ret x17 +endfunc errata_a78_1821534_wa + +func check_errata_1821534 + /* Applies to r0p0 and r1p0 */ + mov x1, #0x10 + b cpu_rev_var_ls +endfunc check_errata_1821534 + +/* -------------------------------------------------- + * Errata Workaround for Cortex A78 Errata 1952683. + * This applies to revision r0p0. + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_a78_1952683_wa + /* Check revision. */ + mov x17, x30 + bl check_errata_1952683 + cbz x0, 1f + + ldr x0,=0x5 + msr S3_6_c15_c8_0,x0 + ldr x0,=0xEEE10A10 + msr S3_6_c15_c8_2,x0 + ldr x0,=0xFFEF0FFF + msr S3_6_c15_c8_3,x0 + ldr x0,=0x0010F000 + msr S3_6_c15_c8_4,x0 + ldr x0,=0x0010F000 + msr S3_6_c15_c8_5,x0 + ldr x0,=0x40000080023ff + msr S3_6_c15_c8_1,x0 + ldr x0,=0x6 + msr S3_6_c15_c8_0,x0 + ldr x0,=0xEE640F34 + msr S3_6_c15_c8_2,x0 + ldr x0,=0xFFEF0FFF + msr S3_6_c15_c8_3,x0 + ldr x0,=0x40000080023ff + msr S3_6_c15_c8_1,x0 + isb +1: + ret x17 +endfunc errata_a78_1952683_wa + +func check_errata_1952683 + /* Applies to r0p0 only */ + mov x1, #0x00 + b cpu_rev_var_ls +endfunc check_errata_1952683 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A78 * ------------------------------------------------- @@ -150,6 +222,16 @@ func cortex_a78_reset_func bl errata_a78_1951500_wa #endif +#if ERRATA_A78_1821534 + mov x0, x18 + bl errata_a78_1821534_wa +#endif + +#if ERRATA_A78_1952683 + mov x0, x18 + bl errata_a78_1952683_wa +#endif + #if ENABLE_AMU /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */ mrs x0, actlr_el3 @@ -207,6 +289,8 @@ func cortex_a78_errata_report report_errata ERRATA_A78_1688305, cortex_a78, 1688305 report_errata ERRATA_A78_1941498, cortex_a78, 1941498 report_errata ERRATA_A78_1951500, cortex_a78, 1951500 + report_errata ERRATA_A78_1821534, cortex_a78, 1821534 + report_errata ERRATA_A78_1952683, cortex_a78, 1952683 ldp x8, x30, [sp], #16 ret diff --git a/lib/cpus/aarch64/cortex_a78_ae.S b/lib/cpus/aarch64/cortex_a78_ae.S index 9aff9ac85..421c17433 100644 --- a/lib/cpus/aarch64/cortex_a78_ae.S +++ b/lib/cpus/aarch64/cortex_a78_ae.S @@ -1,5 +1,6 @@ /* * Copyright (c) 2019-2020, ARM Limited. All rights reserved. + * Copyright (c) 2021, NVIDIA Corporation. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -16,12 +17,108 @@ #error "cortex_a78_ae must be compiled with HW_ASSISTED_COHERENCY enabled" #endif +/* -------------------------------------------------- + * Errata Workaround for A78 AE Erratum 1941500. + * This applies to revisions r0p0 and r0p1 of A78 AE. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_a78_ae_1941500_wa + /* Compare x0 against revisions r0p0 - r0p1 */ + mov x17, x30 + bl check_errata_1941500 + cbz x0, 1f + + /* Set bit 8 in ECTLR_EL1 */ + mrs x0, CORTEX_A78_AE_CPUECTLR_EL1 + bic x0, x0, #CORTEX_A78_AE_CPUECTLR_EL1_BIT_8 + msr CORTEX_A78_AE_CPUECTLR_EL1, x0 + isb +1: + ret x17 +endfunc errata_a78_ae_1941500_wa + +func check_errata_1941500 + /* Applies to revisions r0p0 and r0p1. */ + mov x1, #CPU_REV(0, 0) + mov x2, #CPU_REV(0, 1) + b cpu_rev_var_range +endfunc check_errata_1941500 + +/* -------------------------------------------------- + * Errata Workaround for A78 AE Erratum 1951502. + * This applies to revisions r0p0 and r0p1 of A78 AE. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_a78_ae_1951502_wa + /* Compare x0 against revisions r0p0 - r0p1 */ + mov x17, x30 + bl check_errata_1951502 + cbz x0, 1f + + msr S3_6_c15_c8_0, xzr + ldr x0, =0x10E3900002 + msr S3_6_c15_c8_2, x0 + ldr x0, =0x10FFF00083 + msr S3_6_c15_c8_3, x0 + ldr x0, =0x2001003FF + msr S3_6_c15_c8_1, x0 + + mov x0, #1 + msr S3_6_c15_c8_0, x0 + ldr x0, =0x10E3800082 + msr S3_6_c15_c8_2, x0 + ldr x0, =0x10FFF00083 + msr S3_6_c15_c8_3, x0 + ldr x0, =0x2001003FF + msr S3_6_c15_c8_1, x0 + + mov x0, #2 + msr S3_6_c15_c8_0, x0 + ldr x0, =0x10E3800200 + msr S3_6_c15_c8_2, x0 + ldr x0, =0x10FFF003E0 + msr S3_6_c15_c8_3, x0 + ldr x0, =0x2001003FF + msr S3_6_c15_c8_1, x0 + + isb +1: + ret x17 +endfunc errata_a78_ae_1951502_wa + +func check_errata_1951502 + /* Applies to revisions r0p0 and r0p1. */ + mov x1, #CPU_REV(0, 0) + mov x2, #CPU_REV(0, 1) + b cpu_rev_var_range +endfunc check_errata_1951502 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A78-AE * ------------------------------------------------- */ -#if ENABLE_AMU func cortex_a78_ae_reset_func + mov x19, x30 + bl cpu_get_rev_var + mov x18, x0 + +#if ERRATA_A78_AE_1941500 + mov x0, x18 + bl errata_a78_ae_1941500_wa +#endif + +#if ERRATA_A78_AE_1951502 + mov x0, x18 + bl errata_a78_ae_1951502_wa +#endif + +#if ENABLE_AMU /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */ mrs x0, actlr_el3 bic x0, x0, #CORTEX_A78_ACTLR_TAM_BIT @@ -39,11 +136,12 @@ func cortex_a78_ae_reset_func /* Enable group1 counters */ mov x0, #CORTEX_A78_AMU_GROUP1_MASK msr CPUAMCNTENSET1_EL0, x0 +#endif + isb - ret + ret x19 endfunc cortex_a78_ae_reset_func -#endif /* ------------------------------------------------------- * HW will do the cache maintenance while powering down @@ -66,6 +164,19 @@ endfunc cortex_a78_ae_core_pwr_dwn */ #if REPORT_ERRATA func cortex_a78_ae_errata_report + stp x8, x30, [sp, #-16]! + + bl cpu_get_rev_var + mov x8, x0 + + /* + * Report all errata. The revision-variant information is passed to + * checking functions of each errata. + */ + report_errata ERRATA_A78_AE_1941500, cortex_a78_ae, 1941500 + report_errata ERRATA_A78_AE_1951502, cortex_a78_ae, 1951502 + + ldp x8, x30, [sp], #16 ret endfunc cortex_a78_ae_errata_report #endif @@ -89,12 +200,6 @@ func cortex_a78_ae_cpu_reg_dump ret endfunc cortex_a78_ae_cpu_reg_dump -#if ENABLE_AMU -#define A78_AE_RESET_FUNC cortex_a78_ae_reset_func -#else -#define A78_AE_RESET_FUNC CPU_NO_RESET_FUNC -#endif - declare_cpu_ops cortex_a78_ae, CORTEX_A78_AE_MIDR, \ - A78_AE_RESET_FUNC, \ + cortex_a78_ae_reset_func, \ cortex_a78_ae_core_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a78c.S b/lib/cpus/aarch64/cortex_a78c.S new file mode 100644 index 000000000..1b170fe65 --- /dev/null +++ b/lib/cpus/aarch64/cortex_a78c.S @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <common/bl_common.h> +#include <cortex_a78c.h> +#include <cpu_macros.S> +#include <plat_macros.S> + +/* Hardware handled coherency */ +#if HW_ASSISTED_COHERENCY == 0 +#error "cortex_a78c must be compiled with HW_ASSISTED_COHERENCY enabled" +#endif + + /* ---------------------------------------------------- + * HW will do the cache maintenance while powering down + * ---------------------------------------------------- + */ +func cortex_a78c_core_pwr_dwn + /* --------------------------------------------------- + * Enable CPU power down bit in power control register + * --------------------------------------------------- + */ + mrs x0, CORTEX_A78C_CPUPWRCTLR_EL1 + orr x0, x0, #CORTEX_A78C_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT + msr CORTEX_A78C_CPUPWRCTLR_EL1, x0 + isb + ret +endfunc cortex_a78c_core_pwr_dwn + +#if REPORT_ERRATA +/* + * Errata printing function for Cortex A78C. Must follow AAPCS. + */ +func cortex_a78c_errata_report + ret +endfunc cortex_a78c_errata_report +#endif + + /* --------------------------------------------- + * This function provides cortex_a78c specific + * register information for crash reporting. + * It needs to return with x6 pointing to + * a list of register names in ascii and + * x8 - x15 having values of registers to be + * reported. + * --------------------------------------------- + */ +.section .rodata.cortex_a78c_regs, "aS" +cortex_a78c_regs: /* The ascii list of register names to be reported */ + .asciz "cpuectlr_el1", "" + +func cortex_a78c_cpu_reg_dump + adr x6, cortex_a78c_regs + mrs x8, CORTEX_A78C_CPUECTLR_EL1 + ret +endfunc cortex_a78c_cpu_reg_dump + +declare_cpu_ops cortex_a78c, CORTEX_A78C_MIDR, \ + CPU_NO_RESET_FUNC, \ + cortex_a78c_core_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_demeter.S b/lib/cpus/aarch64/cortex_demeter.S new file mode 100644 index 000000000..9ad8b86fd --- /dev/null +++ b/lib/cpus/aarch64/cortex_demeter.S @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <common/bl_common.h> +#include <cortex_demeter.h> +#include <cpu_macros.S> +#include <plat_macros.S> + +/* Hardware handled coherency */ +#if HW_ASSISTED_COHERENCY == 0 +#error "Cortex Demeter must be compiled with HW_ASSISTED_COHERENCY enabled" +#endif + +/* 64-bit only core */ +#if CTX_INCLUDE_AARCH32_REGS == 1 +#error "Cortex Demeter supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" +#endif + + /* ---------------------------------------------------- + * HW will do the cache maintenance while powering down + * ---------------------------------------------------- + */ +func cortex_demeter_core_pwr_dwn + /* --------------------------------------------------- + * Enable CPU power down bit in power control register + * --------------------------------------------------- + */ + mrs x0, CORTEX_DEMETER_CPUPWRCTLR_EL1 + orr x0, x0, #CORTEX_DEMETER_CPUPWRCTLR_EL1_CORE_PWRDN_BIT + msr CORTEX_DEMETER_CPUPWRCTLR_EL1, x0 + isb + ret +endfunc cortex_demeter_core_pwr_dwn + +#if REPORT_ERRATA +/* + * Errata printing function for Cortex Demeter. Must follow AAPCS. + */ +func cortex_demeter_errata_report + ret +endfunc cortex_demeter_errata_report +#endif + +func cortex_demeter_reset_func + /* Disable speculative loads */ + msr SSBS, xzr + isb + ret +endfunc cortex_demeter_reset_func + + /* --------------------------------------------- + * This function provides Cortex Demeter- + * specific register information for crash + * reporting. It needs to return with x6 + * pointing to a list of register names in ascii + * and x8 - x15 having values of registers to be + * reported. + * --------------------------------------------- + */ +.section .rodata.cortex_demeter_regs, "aS" +cortex_demeter_regs: /* The ascii list of register names to be reported */ + .asciz "cpuectlr_el1", "" + +func cortex_demeter_cpu_reg_dump + adr x6, cortex_demeter_regs + mrs x8, CORTEX_DEMETER_CPUECTLR_EL1 + ret +endfunc cortex_demeter_cpu_reg_dump + +declare_cpu_ops cortex_demeter, CORTEX_DEMETER_MIDR, \ + cortex_demeter_reset_func, \ + cortex_demeter_core_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_makalu.S b/lib/cpus/aarch64/cortex_makalu.S new file mode 100644 index 000000000..98c7d6dfc --- /dev/null +++ b/lib/cpus/aarch64/cortex_makalu.S @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <common/bl_common.h> +#include <cortex_makalu.h> +#include <cpu_macros.S> +#include <plat_macros.S> + +/* Hardware handled coherency */ +#if HW_ASSISTED_COHERENCY == 0 +#error "Cortex Makalu must be compiled with HW_ASSISTED_COHERENCY enabled" +#endif + +/* 64-bit only core */ +#if CTX_INCLUDE_AARCH32_REGS == 1 +#error "Cortex Makalu supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" +#endif + +func cortex_makalu_reset_func + /* Disable speculative loads */ + msr SSBS, xzr + isb + ret +endfunc cortex_makalu_reset_func + + /* ---------------------------------------------------- + * HW will do the cache maintenance while powering down + * ---------------------------------------------------- + */ +func cortex_makalu_core_pwr_dwn + /* --------------------------------------------------- + * Enable CPU power down bit in power control register + * --------------------------------------------------- + */ + mrs x0, CORTEX_MAKALU_CPUPWRCTLR_EL1 + orr x0, x0, #CORTEX_MAKALU_CPUPWRCTLR_EL1_CORE_PWRDN_BIT + msr CORTEX_MAKALU_CPUPWRCTLR_EL1, x0 + isb + ret +endfunc cortex_makalu_core_pwr_dwn + +#if REPORT_ERRATA +/* + * Errata printing function for Cortex Makalu. Must follow AAPCS. + */ +func cortex_makalu_errata_report + ret +endfunc cortex_makalu_errata_report +#endif + + /* --------------------------------------------- + * This function provides Cortex Makalu-specific + * register information for crash reporting. + * It needs to return with x6 pointing to + * a list of register names in ascii and + * x8 - x15 having values of registers to be + * reported. + * --------------------------------------------- + */ +.section .rodata.cortex_makalu_regs, "aS" +cortex_makalu_regs: /* The ascii list of register names to be reported */ + .asciz "cpuectlr_el1", "" + +func cortex_makalu_cpu_reg_dump + adr x6, cortex_makalu_regs + mrs x8, CORTEX_MAKALU_CPUECTLR_EL1 + ret +endfunc cortex_makalu_cpu_reg_dump + +declare_cpu_ops cortex_makalu, CORTEX_MAKALU_MIDR, \ + cortex_makalu_reset_func, \ + cortex_makalu_core_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_makalu_elp_arm.S b/lib/cpus/aarch64/cortex_makalu_elp_arm.S new file mode 100644 index 000000000..fbbf20501 --- /dev/null +++ b/lib/cpus/aarch64/cortex_makalu_elp_arm.S @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <common/bl_common.h> +#include <cortex_makalu_elp_arm.h> +#include <cpu_macros.S> +#include <plat_macros.S> + +/* Hardware handled coherency */ +#if HW_ASSISTED_COHERENCY == 0 +#error "Cortex Makalu ELP must be compiled with HW_ASSISTED_COHERENCY enabled" +#endif + +/* 64-bit only core */ +#if CTX_INCLUDE_AARCH32_REGS == 1 +#error "Cortex Makalu ELP supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" +#endif + + /* ---------------------------------------------------- + * HW will do the cache maintenance while powering down + * ---------------------------------------------------- + */ +func cortex_makalu_elp_arm_core_pwr_dwn + /* --------------------------------------------------- + * Enable CPU power down bit in power control register + * --------------------------------------------------- + */ + mrs x0, CORTEX_MAKALU_ELP_ARM_CPUPWRCTLR_EL1 + orr x0, x0, #CORTEX_MAKALU_ELP_ARM_CPUPWRCTLR_EL1_CORE_PWRDN_BIT + msr CORTEX_MAKALU_ELP_ARM_CPUPWRCTLR_EL1, x0 + isb + ret +endfunc cortex_makalu_elp_arm_core_pwr_dwn + +#if REPORT_ERRATA +/* + * Errata printing function for Cortex Makalu ELP. Must follow AAPCS. + */ +func cortex_makalu_elp_arm_errata_report + ret +endfunc cortex_makalu_elp_arm_errata_report +#endif + +func cortex_makalu_elp_arm_reset_func + /* Disable speculative loads */ + msr SSBS, xzr + isb + ret +endfunc cortex_makalu_elp_arm_reset_func + + /* --------------------------------------------- + * This function provides Cortex Makalu ELP- + * specific register information for crash + * reporting. It needs to return with x6 + * pointing to a list of register names in ascii + * and x8 - x15 having values of registers to be + * reported. + * --------------------------------------------- + */ +.section .rodata.cortex_makalu_elp_arm_regs, "aS" +cortex_makalu_elp_arm_regs: /* The ascii list of register names to be reported */ + .asciz "cpuectlr_el1", "" + +func cortex_makalu_elp_arm_cpu_reg_dump + adr x6, cortex_makalu_elp_arm_regs + mrs x8, CORTEX_MAKALU_ELP_ARM_CPUECTLR_EL1 + ret +endfunc cortex_makalu_elp_arm_cpu_reg_dump + +declare_cpu_ops cortex_makalu_elp_arm, CORTEX_MAKALU_ELP_ARM_MIDR, \ + cortex_makalu_elp_arm_reset_func, \ + cortex_makalu_elp_arm_core_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_matterhorn.S b/lib/cpus/aarch64/cortex_matterhorn.S deleted file mode 100644 index 4156f3cf8..000000000 --- a/lib/cpus/aarch64/cortex_matterhorn.S +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2020, ARM Limited. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include <arch.h> -#include <asm_macros.S> -#include <common/bl_common.h> -#include <cortex_matterhorn.h> -#include <cpu_macros.S> -#include <plat_macros.S> - -/* Hardware handled coherency */ -#if HW_ASSISTED_COHERENCY == 0 -#error "Cortex Matterhorn must be compiled with HW_ASSISTED_COHERENCY enabled" -#endif - -/* 64-bit only core */ -#if CTX_INCLUDE_AARCH32_REGS == 1 -#error "Cortex Matterhorn supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" -#endif - - /* ---------------------------------------------------- - * HW will do the cache maintenance while powering down - * ---------------------------------------------------- - */ -func cortex_matterhorn_core_pwr_dwn - /* --------------------------------------------------- - * Enable CPU power down bit in power control register - * --------------------------------------------------- - */ - mrs x0, CORTEX_MATTERHORN_CPUPWRCTLR_EL1 - orr x0, x0, #CORTEX_MATTERHORN_CPUPWRCTLR_EL1_CORE_PWRDN_BIT - msr CORTEX_MATTERHORN_CPUPWRCTLR_EL1, x0 - isb - ret -endfunc cortex_matterhorn_core_pwr_dwn - - /* - * Errata printing function for Cortex Matterhorn. Must follow AAPCS. - */ -#if REPORT_ERRATA -func cortex_matterhorn_errata_report - ret -endfunc cortex_matterhorn_errata_report -#endif - -func cortex_matterhorn_reset_func - /* Disable speculative loads */ - msr SSBS, xzr - isb - ret -endfunc cortex_matterhorn_reset_func - - /* --------------------------------------------- - * This function provides Cortex-Matterhorn specific - * register information for crash reporting. - * It needs to return with x6 pointing to - * a list of register names in ascii and - * x8 - x15 having values of registers to be - * reported. - * --------------------------------------------- - */ -.section .rodata.cortex_matterhorn_regs, "aS" -cortex_matterhorn_regs: /* The ascii list of register names to be reported */ - .asciz "cpuectlr_el1", "" - -func cortex_matterhorn_cpu_reg_dump - adr x6, cortex_matterhorn_regs - mrs x8, CORTEX_MATTERHORN_CPUECTLR_EL1 - ret -endfunc cortex_matterhorn_cpu_reg_dump - -declare_cpu_ops cortex_matterhorn, CORTEX_MATTERHORN_MIDR, \ - cortex_matterhorn_reset_func, \ - cortex_matterhorn_core_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S new file mode 100644 index 000000000..87a9bdf2b --- /dev/null +++ b/lib/cpus/aarch64/cortex_x2.S @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <common/bl_common.h> +#include <cortex_x2.h> +#include <cpu_macros.S> +#include <plat_macros.S> + +/* Hardware handled coherency */ +#if HW_ASSISTED_COHERENCY == 0 +#error "Cortex X2 must be compiled with HW_ASSISTED_COHERENCY enabled" +#endif + +/* 64-bit only core */ +#if CTX_INCLUDE_AARCH32_REGS == 1 +#error "Cortex X2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" +#endif + + /* ---------------------------------------------------- + * HW will do the cache maintenance while powering down + * ---------------------------------------------------- + */ +func cortex_x2_core_pwr_dwn + /* --------------------------------------------------- + * Enable CPU power down bit in power control register + * --------------------------------------------------- + */ + mrs x0, CORTEX_X2_CPUPWRCTLR_EL1 + orr x0, x0, #CORTEX_X2_CPUPWRCTLR_EL1_CORE_PWRDN_BIT + msr CORTEX_X2_CPUPWRCTLR_EL1, x0 + isb + ret +endfunc cortex_x2_core_pwr_dwn + + /* + * Errata printing function for Cortex X2. Must follow AAPCS. + */ +#if REPORT_ERRATA +func cortex_x2_errata_report + ret +endfunc cortex_x2_errata_report +#endif + +func cortex_x2_reset_func + /* Disable speculative loads */ + msr SSBS, xzr + isb + ret +endfunc cortex_x2_reset_func + + /* --------------------------------------------- + * This function provides Cortex X2 specific + * register information for crash reporting. + * It needs to return with x6 pointing to + * a list of register names in ascii and + * x8 - x15 having values of registers to be + * reported. + * --------------------------------------------- + */ +.section .rodata.cortex_x2_regs, "aS" +cortex_x2_regs: /* The ascii list of register names to be reported */ + .asciz "cpuectlr_el1", "" + +func cortex_x2_cpu_reg_dump + adr x6, cortex_x2_regs + mrs x8, CORTEX_X2_CPUECTLR_EL1 + ret +endfunc cortex_x2_cpu_reg_dump + +declare_cpu_ops cortex_x2, CORTEX_X2_MIDR, \ + cortex_x2_reset_func, \ + cortex_x2_core_pwr_dwn diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S index 730b09beb..bd8f85f6d 100644 --- a/lib/cpus/aarch64/cpu_helpers.S +++ b/lib/cpus/aarch64/cpu_helpers.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -144,7 +144,7 @@ endfunc do_cpu_reg_dump * If cpu_ops for the MIDR_EL1 cannot be found and * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a * default cpu_ops with an MIDR value of 0. - * (Implementation number 0x0 should be reseverd for software use + * (Implementation number 0x0 should be reserved for software use * and therefore no clashes should happen with that default value). * * Return : diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S index 8d646cba5..9e7bbf7e6 100644 --- a/lib/cpus/aarch64/neoverse_n2.S +++ b/lib/cpus/aarch64/neoverse_n2.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Arm Limited. All rights reserved. + * Copyright (c) 2020-2021, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -19,11 +19,177 @@ #error "Neoverse-N2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif - /* ------------------------------------------------- +/* -------------------------------------------------- + * Errata Workaround for Neoverse N2 Erratum 2002655. + * This applies to revision r0p0 of Neoverse N2. it is still open. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_n2_2002655_wa + /* Check revision. */ + mov x17, x30 + bl check_errata_2002655 + cbz x0, 1f + + /* Apply instruction patching sequence */ + ldr x0,=0x6 + msr S3_6_c15_c8_0,x0 + ldr x0,=0xF3A08002 + msr S3_6_c15_c8_2,x0 + ldr x0,=0xFFF0F7FE + msr S3_6_c15_c8_3,x0 + ldr x0,=0x40000001003ff + msr S3_6_c15_c8_1,x0 + ldr x0,=0x7 + msr S3_6_c15_c8_0,x0 + ldr x0,=0xBF200000 + msr S3_6_c15_c8_2,x0 + ldr x0,=0xFFEF0000 + msr S3_6_c15_c8_3,x0 + ldr x0,=0x40000001003f3 + msr S3_6_c15_c8_1,x0 + isb +1: + ret x17 +endfunc errata_n2_2002655_wa + +func check_errata_2002655 + /* Applies to r0p0 */ + mov x1, #0x00 + b cpu_rev_var_ls +endfunc check_errata_2002655 + +/* --------------------------------------------------------------- + * Errata Workaround for Neoverse N2 Erratum 2067956. + * This applies to revision r0p0 of Neoverse N2 and is still open. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * --------------------------------------------------------------- + */ +func errata_n2_2067956_wa + /* Compare x0 against revision r0p0 */ + mov x17, x30 + bl check_errata_2067956 + cbz x0, 1f + mrs x1, NEOVERSE_N2_CPUACTLR_EL1 + orr x1, x1, NEOVERSE_N2_CPUACTLR_EL1_BIT_46 + msr NEOVERSE_N2_CPUACTLR_EL1, x1 +1: + ret x17 +endfunc errata_n2_2067956_wa + +func check_errata_2067956 + /* Applies to r0p0 */ + mov x1, #0x00 + b cpu_rev_var_ls +endfunc check_errata_2067956 + +/* --------------------------------------------------------------- + * Errata Workaround for Neoverse N2 Erratum 2025414. + * This applies to revision r0p0 of Neoverse N2 and is still open. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * --------------------------------------------------------------- + */ +func errata_n2_2025414_wa + /* Compare x0 against revision r0p0 */ + mov x17, x30 + bl check_errata_2025414 + cbz x0, 1f + mrs x1, NEOVERSE_N2_CPUECTLR_EL1 + orr x1, x1, NEOVERSE_N2_CPUECTLR_EL1_PFSTIDIS_BIT + msr NEOVERSE_N2_CPUECTLR_EL1, x1 + +1: + ret x17 +endfunc errata_n2_2025414_wa + +func check_errata_2025414 + /* Applies to r0p0 */ + mov x1, #0x00 + b cpu_rev_var_ls +endfunc check_errata_2025414 + +/* --------------------------------------------------------------- + * Errata Workaround for Neoverse N2 Erratum 2189731. + * This applies to revision r0p0 of Neoverse N2 and is still open. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * --------------------------------------------------------------- + */ +func errata_n2_2189731_wa + /* Compare x0 against revision r0p0 */ + mov x17, x30 + bl check_errata_2189731 + cbz x0, 1f + mrs x1, NEOVERSE_N2_CPUACTLR5_EL1 + orr x1, x1, NEOVERSE_N2_CPUACTLR5_EL1_BIT_44 + msr NEOVERSE_N2_CPUACTLR5_EL1, x1 + +1: + ret x17 +endfunc errata_n2_2189731_wa + +func check_errata_2189731 + /* Applies to r0p0 */ + mov x1, #0x00 + b cpu_rev_var_ls +endfunc check_errata_2189731 + +/* -------------------------------------------------- + * Errata Workaround for Neoverse N2 Erratum 2138956. + * This applies to revision r0p0 of Neoverse N2. it is still open. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_n2_2138956_wa + /* Check revision. */ + mov x17, x30 + bl check_errata_2138956 + cbz x0, 1f + + /* Apply instruction patching sequence */ + ldr x0,=0x3 + msr S3_6_c15_c8_0,x0 + ldr x0,=0xF3A08002 + msr S3_6_c15_c8_2,x0 + ldr x0,=0xFFF0F7FE + msr S3_6_c15_c8_3,x0 + ldr x0,=0x10002001003FF + msr S3_6_c15_c8_1,x0 + ldr x0,=0x4 + msr S3_6_c15_c8_0,x0 + ldr x0,=0xBF200000 + msr S3_6_c15_c8_2,x0 + ldr x0,=0xFFEF0000 + msr S3_6_c15_c8_3,x0 + ldr x0,=0x10002001003F3 + msr S3_6_c15_c8_1,x0 + isb +1: + ret x17 +endfunc errata_n2_2138956_wa + +func check_errata_2138956 + /* Applies to r0p0 */ + mov x1, #0x00 + b cpu_rev_var_ls +endfunc check_errata_2138956 + + /* ------------------------------------------- * The CPU Ops reset function for Neoverse N2. - * ------------------------------------------------- + * ------------------------------------------- */ func neoverse_n2_reset_func + mov x19, x30 + /* Check if the PE implements SSBS */ mrs x0, id_aa64pfr1_el1 tst x0, #(ID_AA64PFR1_EL1_SSBS_MASK << ID_AA64PFR1_EL1_SSBS_SHIFT) @@ -37,6 +203,27 @@ func neoverse_n2_reset_func orr x0, x0, #NEOVERSE_N2_CPUACTLR2_EL1_BIT_2 msr NEOVERSE_N2_CPUACTLR2_EL1, x0 +#if ERRATA_N2_2067956 + mov x0, x18 + bl errata_n2_2067956_wa +#endif + +#if ERRATA_N2_2025414 + mov x0, x18 + bl errata_n2_2025414_wa +#endif + +#if ERRATA_N2_2189731 + mov x0, x18 + bl errata_n2_2189731_wa +#endif + + +#if ERRATA_N2_2138956 + mov x0, x18 + bl errata_n2_2138956_wa +#endif + #if ENABLE_AMU /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */ mrs x0, cptr_el3 @@ -53,20 +240,28 @@ func neoverse_n2_reset_func #if NEOVERSE_Nx_EXTERNAL_LLC /* Some systems may have External LLC, core needs to be made aware */ - mrs x0, NEOVERSE_N2_CPUECTLR_EL1 - orr x0, x0, NEOVERSE_N2_CPUECTLR_EL1_EXTLLC_BIT - msr NEOVERSE_N2_CPUECTLR_EL1, x0 + mrs x0, NEOVERSE_N2_CPUECTLR_EL1 + orr x0, x0, NEOVERSE_N2_CPUECTLR_EL1_EXTLLC_BIT + msr NEOVERSE_N2_CPUECTLR_EL1, x0 +#endif + + bl cpu_get_rev_var + mov x18, x0 + +#if ERRATA_N2_2002655 + mov x0, x18 + bl errata_n2_2002655_wa #endif isb - ret + ret x19 endfunc neoverse_n2_reset_func func neoverse_n2_core_pwr_dwn - /* --------------------------------------------- + /* --------------------------------------------------- * Enable CPU power down bit in power control register * No need to do cache maintenance here. - * --------------------------------------------- + * --------------------------------------------------- */ mrs x0, NEOVERSE_N2_CPUPWRCTLR_EL1 orr x0, x0, #NEOVERSE_N2_CORE_PWRDN_EN_BIT @@ -80,7 +275,22 @@ endfunc neoverse_n2_core_pwr_dwn * Errata printing function for Neoverse N2 cores. Must follow AAPCS. */ func neoverse_n2_errata_report - /* No errata reported for Neoverse N2 cores */ + stp x8, x30, [sp, #-16]! + + bl cpu_get_rev_var + mov x8, x0 + + /* + * Report all errata. The revision-variant information is passed to + * checking functions of each errata. + */ + report_errata ERRATA_N2_2002655, neoverse_n2, 2002655 + report_errata ERRATA_N2_2067956, neoverse_n2, 2067956 + report_errata ERRATA_N2_2025414, neoverse_n2, 2025414 + report_errata ERRATA_N2_2189731, neoverse_n2, 2189731 + report_errata ERRATA_N2_2138956, neoverse_n2, 2138956 + + ldp x8, x30, [sp], #16 ret endfunc neoverse_n2_errata_report #endif diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S index 733629425..0bcf52a78 100644 --- a/lib/cpus/aarch64/neoverse_v1.S +++ b/lib/cpus/aarch64/neoverse_v1.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, ARM Limited. All rights reserved. + * Copyright (c) 2019-2021, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -21,6 +21,244 @@ #error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif + /* -------------------------------------------------- + * Errata Workaround for Neoverse V1 Errata #1774420. + * This applies to revisions r0p0 and r1p0, fixed in r1p1. + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_neoverse_v1_1774420_wa + /* Check workaround compatibility. */ + mov x17, x30 + bl check_errata_1774420 + cbz x0, 1f + + /* Set bit 53 in CPUECTLR_EL1 */ + mrs x1, NEOVERSE_V1_CPUECTLR_EL1 + orr x1, x1, #NEOVERSE_V1_CPUECTLR_EL1_BIT_53 + msr NEOVERSE_V1_CPUECTLR_EL1, x1 + isb +1: + ret x17 +endfunc errata_neoverse_v1_1774420_wa + +func check_errata_1774420 + /* Applies to r0p0 and r1p0. */ + mov x1, #0x10 + b cpu_rev_var_ls +endfunc check_errata_1774420 + + /* -------------------------------------------------- + * Errata Workaround for Neoverse V1 Errata #1791573. + * This applies to revisions r0p0 and r1p0, fixed in r1p1. + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_neoverse_v1_1791573_wa + /* Check workaround compatibility. */ + mov x17, x30 + bl check_errata_1791573 + cbz x0, 1f + + /* Set bit 2 in ACTLR2_EL1 */ + mrs x1, NEOVERSE_V1_ACTLR2_EL1 + orr x1, x1, #NEOVERSE_V1_ACTLR2_EL1_BIT_2 + msr NEOVERSE_V1_ACTLR2_EL1, x1 + isb +1: + ret x17 +endfunc errata_neoverse_v1_1791573_wa + +func check_errata_1791573 + /* Applies to r0p0 and r1p0. */ + mov x1, #0x10 + b cpu_rev_var_ls +endfunc check_errata_1791573 + + /* -------------------------------------------------- + * Errata Workaround for Neoverse V1 Errata #1852267. + * This applies to revisions r0p0 and r1p0, fixed in r1p1. + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_neoverse_v1_1852267_wa + /* Check workaround compatibility. */ + mov x17, x30 + bl check_errata_1852267 + cbz x0, 1f + + /* Set bit 28 in ACTLR2_EL1 */ + mrs x1, NEOVERSE_V1_ACTLR2_EL1 + orr x1, x1, #NEOVERSE_V1_ACTLR2_EL1_BIT_28 + msr NEOVERSE_V1_ACTLR2_EL1, x1 + isb +1: + ret x17 +endfunc errata_neoverse_v1_1852267_wa + +func check_errata_1852267 + /* Applies to r0p0 and r1p0. */ + mov x1, #0x10 + b cpu_rev_var_ls +endfunc check_errata_1852267 + + /* -------------------------------------------------- + * Errata Workaround for Neoverse V1 Errata #1925756. + * This applies to revisions <= r1p1. + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_neoverse_v1_1925756_wa + /* Check workaround compatibility. */ + mov x17, x30 + bl check_errata_1925756 + cbz x0, 1f + + /* Set bit 8 in CPUECTLR_EL1 */ + mrs x1, NEOVERSE_V1_CPUECTLR_EL1 + orr x1, x1, #NEOVERSE_V1_CPUECTLR_EL1_BIT_8 + msr NEOVERSE_V1_CPUECTLR_EL1, x1 + isb +1: + ret x17 +endfunc errata_neoverse_v1_1925756_wa + +func check_errata_1925756 + /* Applies to <= r1p1. */ + mov x1, #0x11 + b cpu_rev_var_ls +endfunc check_errata_1925756 + + /* -------------------------------------------------- + * Errata Workaround for Neoverse V1 Erratum #1940577 + * This applies to revisions r1p0 - r1p1 and is open. + * It also exists in r0p0 but there is no fix in that + * revision. + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_neoverse_v1_1940577_wa + /* Compare x0 against revisions r1p0 - r1p1 */ + mov x17, x30 + bl check_errata_1940577 + cbz x0, 1f + + mov x0, #0 + msr S3_6_C15_C8_0, x0 + ldr x0, =0x10E3900002 + msr S3_6_C15_C8_2, x0 + ldr x0, =0x10FFF00083 + msr S3_6_C15_C8_3, x0 + ldr x0, =0x2001003FF + msr S3_6_C15_C8_1, x0 + + mov x0, #1 + msr S3_6_C15_C8_0, x0 + ldr x0, =0x10E3800082 + msr S3_6_C15_C8_2, x0 + ldr x0, =0x10FFF00083 + msr S3_6_C15_C8_3, x0 + ldr x0, =0x2001003FF + msr S3_6_C15_C8_1, x0 + + mov x0, #2 + msr S3_6_C15_C8_0, x0 + ldr x0, =0x10E3800200 + msr S3_6_C15_C8_2, x0 + ldr x0, =0x10FFF003E0 + msr S3_6_C15_C8_3, x0 + ldr x0, =0x2001003FF + msr S3_6_C15_C8_1, x0 + + isb +1: + ret x17 +endfunc errata_neoverse_v1_1940577_wa + +func check_errata_1940577 + /* Applies to revisions r1p0 - r1p1. */ + mov x1, #0x10 + mov x2, #0x11 + b cpu_rev_var_range +endfunc check_errata_1940577 + + /* -------------------------------------------------- + * Errata Workaround for Neoverse V1 Errata #1966096 + * This applies to revisions r1p0 - r1p1 and is open. + * It also exists in r0p0 but there is no workaround + * for that revision. + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_neoverse_v1_1966096_wa + /* Check workaround compatibility. */ + mov x17, x30 + bl check_errata_1966096 + cbz x0, 1f + + /* Apply the workaround. */ + mov x0, #0x3 + msr S3_6_C15_C8_0, x0 + ldr x0, =0xEE010F12 + msr S3_6_C15_C8_2, x0 + ldr x0, =0xFFFF0FFF + msr S3_6_C15_C8_3, x0 + ldr x0, =0x80000000003FF + msr S3_6_C15_C8_1, x0 + isb + +1: + ret x17 +endfunc errata_neoverse_v1_1966096_wa + +func check_errata_1966096 + mov x1, #0x10 + mov x2, #0x11 + b cpu_rev_var_range +endfunc check_errata_1966096 + + /* -------------------------------------------------- + * Errata Workaround for Neoverse V1 Errata #2139242. + * This applies to revisions r0p0, r1p0, and r1p1, it + * is still open. + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_neoverse_v1_2139242_wa + /* Check workaround compatibility. */ + mov x17, x30 + bl check_errata_2139242 + cbz x0, 1f + + /* Apply the workaround. */ + mov x0, #0x3 + msr S3_6_C15_C8_0, x0 + ldr x0, =0xEE720F14 + msr S3_6_C15_C8_2, x0 + ldr x0, =0xFFFF0FDF + msr S3_6_C15_C8_3, x0 + ldr x0, =0x40000005003FF + msr S3_6_C15_C8_1, x0 + isb + +1: + ret x17 +endfunc errata_neoverse_v1_2139242_wa + +func check_errata_2139242 + /* Applies to r0p0, r1p0, r1p1 */ + mov x1, #0x11 + b cpu_rev_var_ls +endfunc check_errata_2139242 + /* --------------------------------------------- * HW will do the cache maintenance while powering down * --------------------------------------------- @@ -42,6 +280,24 @@ endfunc neoverse_v1_core_pwr_dwn */ #if REPORT_ERRATA func neoverse_v1_errata_report + stp x8, x30, [sp, #-16]! + + bl cpu_get_rev_var + mov x8, x0 + + /* + * Report all errata. The revision-variant information is passed to + * checking functions of each errata. + */ + report_errata ERRATA_V1_1774420, neoverse_v1, 1774420 + report_errata ERRATA_V1_1791573, neoverse_v1, 1791573 + report_errata ERRATA_V1_1852267, neoverse_v1, 1852267 + report_errata ERRATA_V1_1925756, neoverse_v1, 1925756 + report_errata ERRATA_V1_1940577, neoverse_v1, 1940577 + report_errata ERRATA_V1_1966096, neoverse_v1, 1966096 + report_errata ERRATA_V1_2139242, neoverse_v1, 2139242 + + ldp x8, x30, [sp], #16 ret endfunc neoverse_v1_errata_report #endif @@ -51,8 +307,43 @@ func neoverse_v1_reset_func /* Disable speculative loads */ msr SSBS, xzr - isb + +#if ERRATA_V1_1774420 + mov x0, x18 + bl errata_neoverse_v1_1774420_wa +#endif + +#if ERRATA_V1_1791573 + mov x0, x18 + bl errata_neoverse_v1_1791573_wa +#endif + +#if ERRATA_V1_1852267 + mov x0, x18 + bl errata_neoverse_v1_1852267_wa +#endif + +#if ERRATA_V1_1925756 + mov x0, x18 + bl errata_neoverse_v1_1925756_wa +#endif + +#if ERRATA_V1_1940577 + mov x0, x18 + bl errata_neoverse_v1_1940577_wa +#endif + +#if ERRATA_V1_1966096 + mov x0, x18 + bl errata_neoverse_v1_1966096_wa +#endif + +#if ERRATA_V1_2139242 + mov x0, x18 + bl errata_neoverse_v1_2139242_wa +#endif + ret x19 endfunc neoverse_v1_reset_func diff --git a/lib/cpus/aarch64/qemu_max.S b/lib/cpus/aarch64/qemu_max.S new file mode 100644 index 000000000..8948fda70 --- /dev/null +++ b/lib/cpus/aarch64/qemu_max.S @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include <arch.h> +#include <asm_macros.S> +#include <cpu_macros.S> +#include <qemu_max.h> + +func qemu_max_core_pwr_dwn + /* --------------------------------------------- + * Disable the Data Cache. + * --------------------------------------------- + */ + mrs x1, sctlr_el3 + bic x1, x1, #SCTLR_C_BIT + msr sctlr_el3, x1 + isb + + /* --------------------------------------------- + * Flush L1 cache to L2. + * --------------------------------------------- + */ + mov x18, lr + mov x0, #DCCISW + bl dcsw_op_level1 + mov lr, x18 + ret +endfunc qemu_max_core_pwr_dwn + +func qemu_max_cluster_pwr_dwn + /* --------------------------------------------- + * Disable the Data Cache. + * --------------------------------------------- + */ + mrs x1, sctlr_el3 + bic x1, x1, #SCTLR_C_BIT + msr sctlr_el3, x1 + isb + + /* --------------------------------------------- + * Flush all caches to PoC. + * --------------------------------------------- + */ + mov x0, #DCCISW + b dcsw_op_all +endfunc qemu_max_cluster_pwr_dwn + +#if REPORT_ERRATA +/* + * Errata printing function for QEMU "max". Must follow AAPCS. + */ +func qemu_max_errata_report + ret +endfunc qemu_max_errata_report +#endif + + /* --------------------------------------------- + * This function provides cpu specific + * register information for crash reporting. + * It needs to return with x6 pointing to + * a list of register names in ascii and + * x8 - x15 having values of registers to be + * reported. + * --------------------------------------------- + */ +.section .rodata.qemu_max_regs, "aS" +qemu_max_regs: /* The ascii list of register names to be reported */ + .asciz "" /* no registers to report */ + +func qemu_max_cpu_reg_dump + adr x6, qemu_max_regs + ret +endfunc qemu_max_cpu_reg_dump + + +/* cpu_ops for QEMU MAX */ +declare_cpu_ops qemu_max, QEMU_MAX_MIDR, CPU_NO_RESET_FUNC, \ + qemu_max_core_pwr_dwn, \ + qemu_max_cluster_pwr_dwn diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk index 64a4b4d47..6103a5a7b 100644 --- a/lib/cpus/cpu-ops.mk +++ b/lib/cpus/cpu-ops.mk @@ -1,6 +1,6 @@ # # Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved. -# Copyright (c) 2020, NVIDIA Corporation. All rights reserved. +# Copyright (c) 2020-2021, NVIDIA Corporation. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # @@ -290,6 +290,14 @@ ERRATA_A77_1508412 ?=0 # only to revision <= r1p1 of the Cortex A77 cpu. ERRATA_A77_1925769 ?=0 +# Flag to apply erratum 1946167 workaround during reset. This erratum applies +# only to revision <= r1p1 of the Cortex A77 cpu. +ERRATA_A77_1946167 ?=0 + +# Flag to apply erratum 1791578 workaround during reset. This erratum applies +# to revisions r0p0, r1p0, and r1p1, it is still open. +ERRATA_A77_1791578 ?=0 + # Flag to apply erratum 1688305 workaround during reset. This erratum applies # to revisions r0p0 - r1p0 of the A78 cpu. ERRATA_A78_1688305 ?=0 @@ -303,6 +311,22 @@ ERRATA_A78_1941498 ?=0 # well but there is no workaround for that revision. ERRATA_A78_1951500 ?=0 +# Flag to apply erratum 1941500 workaround during reset. This erratum applies +# to revisions r0p0 and r0p1 of the A78 AE cpu. It is still open. +ERRATA_A78_AE_1941500 ?=0 + +# Flag to apply erratum 1951502 workaround during reset. This erratum applies +# to revisions r0p0 and r0p1 of the A78 AE cpu. It is still open. +ERRATA_A78_AE_1951502 ?=0 + +# Flag to apply erratum 1821534 workaround during reset. This erratum applies +# to revisions r0p0 and r1p0 of the A78 cpu. +ERRATA_A78_1821534 ?=0 + +# Flag to apply erratum 1952683 workaround during reset. This erratum applies +# to revision r0p0 of the A78 cpu and was fixed in the revision r1p0. +ERRATA_A78_1952683 ?=0 + # Flag to apply T32 CLREX workaround during reset. This erratum applies # only to r0p0 and r1p0 of the Neoverse N1 cpu. ERRATA_N1_1043202 ?=0 @@ -360,6 +384,71 @@ ERRATA_N1_1868343 ?=0 # exists in revisions r0p0, r1p0, and r2p0 as well but there is no workaround. ERRATA_N1_1946160 ?=0 +# Flag to apply erratum 2002655 workaround during reset. This erratum applies +# to revisions r0p0 of the Neoverse-N2 cpu, it is still open. +ERRATA_N2_2002655 ?=0 + +# Flag to apply erratum 1774420 workaround during reset. This erratum applies +# to revisions r0p0 and r1p0 of the Neoverse V1 core, and was fixed in r1p1. +ERRATA_V1_1774420 ?=0 + +# Flag to apply erratum 1791573 workaround during reset. This erratum applies +# to revisions r0p0 and r1p0 of the Neoverse V1 core, and was fixed in r1p1. +ERRATA_V1_1791573 ?=0 + +# Flag to apply erratum 1852267 workaround during reset. This erratum applies +# to revisions r0p0 and r1p0 of the Neoverse V1 core, and was fixed in r1p1. +ERRATA_V1_1852267 ?=0 + +# Flag to apply erratum 1925756 workaround during reset. This needs to be +# enabled for r0p0, r1p0, and r1p1 of the Neoverse V1 core, it is still open. +ERRATA_V1_1925756 ?=0 + +# Flag to apply erratum 1940577 workaround during reset. This erratum applies +# to revisions r1p0 and r1p1 of the Neoverse V1 cpu. +ERRATA_V1_1940577 ?=0 + +# Flag to apply erratum 1966096 workaround during reset. This erratum applies +# to revisions r1p0 and r1p1 of the Neoverse V1 CPU and is open. This issue +# exists in r0p0 as well but there is no workaround for that revision. +ERRATA_V1_1966096 ?=0 + +# Flag to apply erratum 2139242 workaround during reset. This erratum applies +# to revisions r0p0, r1p0, and r1p1 of the Neoverse V1 cpu and is still open. +ERRATA_V1_2139242 ?=0 + +# Flag to apply erratum 1987031 workaround during reset. This erratum applies +# to revisions r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is still open. +ERRATA_A710_1987031 ?=0 + +# Flag to apply erratum 2081180 workaround during reset. This erratum applies +# to revisions r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is still open. +ERRATA_A710_2081180 ?=0 + +# Flag to apply erratum 2067956 workaround during reset. This erratum applies +# to revision r0p0 of the Neoverse N2 cpu and is still open. +ERRATA_N2_2067956 ?=0 + +# Flag to apply erratum 2025414 workaround during reset. This erratum applies +# to revision r0p0 of the Neoverse N2 cpu and is still open. +ERRATA_N2_2025414 ?=0 + +# Flag to apply erratum 2189731 workaround during reset. This erratum applies +# to revision r0p0 of the Neoverse N2 cpu and is still open. +ERRATA_N2_2189731 ?=0 + +# Flag to apply erratum 2138956 workaround during reset. This erratum applies +# to revision r0p0 of the Neoverse N2 cpu and is still open. +ERRATA_N2_2138956 ?=0 + +# Flag to apply erratum 2055002 workaround during reset. This erratum applies +# to revision r1p0, r2p0 of the Cortex-A710 cpu and is still open. +ERRATA_A710_2055002 ?=0 + +# Flag to apply erratum 2017096 workaround during reset. This erratum applies +# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is still open. +ERRATA_A710_2017096 ?=0 + # Flag to apply DSU erratum 798953. This erratum applies to DSUs revision r0p0. # Applying the workaround results in higher DSU power consumption on idle. ERRATA_DSU_798953 ?=0 @@ -585,6 +674,14 @@ $(eval $(call add_define,ERRATA_A77_1508412)) $(eval $(call assert_boolean,ERRATA_A77_1925769)) $(eval $(call add_define,ERRATA_A77_1925769)) +# Process ERRATA_A77_1946167 flag +$(eval $(call assert_boolean,ERRATA_A77_1946167)) +$(eval $(call add_define,ERRATA_A77_1946167)) + +# Process ERRATA_A77_1791578 flag +$(eval $(call assert_boolean,ERRATA_A77_1791578)) +$(eval $(call add_define,ERRATA_A77_1791578)) + # Process ERRATA_A78_1688305 flag $(eval $(call assert_boolean,ERRATA_A78_1688305)) $(eval $(call add_define,ERRATA_A78_1688305)) @@ -597,6 +694,22 @@ $(eval $(call add_define,ERRATA_A78_1941498)) $(eval $(call assert_boolean,ERRATA_A78_1951500)) $(eval $(call add_define,ERRATA_A78_1951500)) +# Process ERRATA_A78_AE_1941500 flag +$(eval $(call assert_boolean,ERRATA_A78_AE_1941500)) +$(eval $(call add_define,ERRATA_A78_AE_1941500)) + +# Process ERRATA_A78_AE_1951502 flag +$(eval $(call assert_boolean,ERRATA_A78_AE_1951502)) +$(eval $(call add_define,ERRATA_A78_AE_1951502)) + +# Process ERRATA_A78_1821534 flag +$(eval $(call assert_boolean,ERRATA_A78_1821534)) +$(eval $(call add_define,ERRATA_A78_1821534)) + +# Process ERRATA_A78_1952683 flag +$(eval $(call assert_boolean,ERRATA_A78_1952683)) +$(eval $(call add_define,ERRATA_A78_1952683)) + # Process ERRATA_N1_1043202 flag $(eval $(call assert_boolean,ERRATA_N1_1043202)) $(eval $(call add_define,ERRATA_N1_1043202)) @@ -653,6 +766,70 @@ $(eval $(call add_define,ERRATA_N1_1868343)) $(eval $(call assert_boolean,ERRATA_N1_1946160)) $(eval $(call add_define,ERRATA_N1_1946160)) +# Process ERRATA_N2_2002655 flag +$(eval $(call assert_boolean,ERRATA_N2_2002655)) +$(eval $(call add_define,ERRATA_N2_2002655)) + +# Process ERRATA_V1_1774420 flag +$(eval $(call assert_boolean,ERRATA_V1_1774420)) +$(eval $(call add_define,ERRATA_V1_1774420)) + +# Process ERRATA_V1_1791573 flag +$(eval $(call assert_boolean,ERRATA_V1_1791573)) +$(eval $(call add_define,ERRATA_V1_1791573)) + +# Process ERRATA_V1_1852267 flag +$(eval $(call assert_boolean,ERRATA_V1_1852267)) +$(eval $(call add_define,ERRATA_V1_1852267)) + +# Process ERRATA_V1_1925756 flag +$(eval $(call assert_boolean,ERRATA_V1_1925756)) +$(eval $(call add_define,ERRATA_V1_1925756)) + +# Process ERRATA_V1_1940577 flag +$(eval $(call assert_boolean,ERRATA_V1_1940577)) +$(eval $(call add_define,ERRATA_V1_1940577)) + +# Process ERRATA_V1_1966096 flag +$(eval $(call assert_boolean,ERRATA_V1_1966096)) +$(eval $(call add_define,ERRATA_V1_1966096)) + +# Process ERRATA_V1_2139242 flag +$(eval $(call assert_boolean,ERRATA_V1_2139242)) +$(eval $(call add_define,ERRATA_V1_2139242)) + +# Process ERRATA_A710_1987031 flag +$(eval $(call assert_boolean,ERRATA_A710_1987031)) +$(eval $(call add_define,ERRATA_A710_1987031)) + +# Process ERRATA_A710_2081180 flag +$(eval $(call assert_boolean,ERRATA_A710_2081180)) +$(eval $(call add_define,ERRATA_A710_2081180)) + +# Process ERRATA_N2_2067956 flag +$(eval $(call assert_boolean,ERRATA_N2_2067956)) +$(eval $(call add_define,ERRATA_N2_2067956)) + +# Process ERRATA_N2_2025414 flag +$(eval $(call assert_boolean,ERRATA_N2_2025414)) +$(eval $(call add_define,ERRATA_N2_2025414)) + +# Process ERRATA_N2_2189731 flag +$(eval $(call assert_boolean,ERRATA_N2_2189731)) +$(eval $(call add_define,ERRATA_N2_2189731)) + +# Process ERRATA_N2_2138956 flag +$(eval $(call assert_boolean,ERRATA_N2_2138956)) +$(eval $(call add_define,ERRATA_N2_2138956)) + +# Process ERRATA_A710_2055002 flag +$(eval $(call assert_boolean,ERRATA_A710_2055002)) +$(eval $(call add_define,ERRATA_A710_2055002)) + +# Process ERRATA_A710_2017096 flag +$(eval $(call assert_boolean,ERRATA_A710_2017096)) +$(eval $(call add_define,ERRATA_A710_2017096)) + # Process ERRATA_DSU_798953 flag $(eval $(call assert_boolean,ERRATA_DSU_798953)) $(eval $(call add_define,ERRATA_DSU_798953)) diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c index 2443001b8..81d793b46 100644 --- a/lib/el3_runtime/aarch32/context_mgmt.c +++ b/lib/el3_runtime/aarch32/context_mgmt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -49,7 +49,7 @@ void cm_init(void) * * To prepare the register state for entry call cm_prepare_el3_exit() and * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to - * cm_e1_sysreg_context_restore(). + * cm_el1_sysregs_context_restore(). ******************************************************************************/ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) { diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S index 75e214d9c..40e7ddfa1 100644 --- a/lib/el3_runtime/aarch64/context.S +++ b/lib/el3_runtime/aarch64/context.S @@ -30,7 +30,7 @@ /* ----------------------------------------------------- * The following function strictly follows the AArch64 - * PCS to use x9-x17 (temporary caller-saved registers) + * PCS to use x9-x16 (temporary caller-saved registers) * to save EL2 system register context. It assumes that * 'x0' is pointing to a 'el2_sys_regs' structure where * the register context will be saved. @@ -43,7 +43,6 @@ * ICH_LR<n>_EL2 * ----------------------------------------------------- */ - func el2_sysregs_context_save mrs x9, actlr_el2 mrs x10, afsr0_el2 @@ -54,185 +53,153 @@ func el2_sysregs_context_save stp x11, x12, [x0, #CTX_AFSR1_EL2] mrs x13, cnthctl_el2 - mrs x14, cnthp_ctl_el2 + mrs x14, cntvoff_el2 stp x13, x14, [x0, #CTX_CNTHCTL_EL2] - mrs x15, cnthp_cval_el2 - mrs x16, cnthp_tval_el2 - stp x15, x16, [x0, #CTX_CNTHP_CVAL_EL2] - - mrs x17, cntvoff_el2 - mrs x9, cptr_el2 - stp x17, x9, [x0, #CTX_CNTVOFF_EL2] + mrs x15, cptr_el2 + str x15, [x0, #CTX_CPTR_EL2] - mrs x11, elr_el2 #if CTX_INCLUDE_AARCH32_REGS - mrs x10, dbgvcr32_el2 - stp x10, x11, [x0, #CTX_DBGVCR32_EL2] -#else - str x11, [x0, #CTX_ELR_EL2] + mrs x16, dbgvcr32_el2 + str x16, [x0, #CTX_DBGVCR32_EL2] #endif - mrs x14, esr_el2 - mrs x15, far_el2 - stp x14, x15, [x0, #CTX_ESR_EL2] + mrs x9, elr_el2 + mrs x10, esr_el2 + stp x9, x10, [x0, #CTX_ELR_EL2] - mrs x16, hacr_el2 - mrs x17, hcr_el2 - stp x16, x17, [x0, #CTX_HACR_EL2] + mrs x11, far_el2 + mrs x12, hacr_el2 + stp x11, x12, [x0, #CTX_FAR_EL2] - mrs x9, hpfar_el2 - mrs x10, hstr_el2 - stp x9, x10, [x0, #CTX_HPFAR_EL2] + mrs x13, hcr_el2 + mrs x14, hpfar_el2 + stp x13, x14, [x0, #CTX_HCR_EL2] - mrs x11, ICC_SRE_EL2 - mrs x12, ICH_HCR_EL2 - stp x11, x12, [x0, #CTX_ICC_SRE_EL2] + mrs x15, hstr_el2 + mrs x16, ICC_SRE_EL2 + stp x15, x16, [x0, #CTX_HSTR_EL2] - mrs x13, ICH_VMCR_EL2 - mrs x14, mair_el2 - stp x13, x14, [x0, #CTX_ICH_VMCR_EL2] + mrs x9, ICH_HCR_EL2 + mrs x10, ICH_VMCR_EL2 + stp x9, x10, [x0, #CTX_ICH_HCR_EL2] + + mrs x11, mair_el2 + mrs x12, mdcr_el2 + stp x11, x12, [x0, #CTX_MAIR_EL2] - mrs x15, mdcr_el2 #if ENABLE_SPE_FOR_LOWER_ELS - mrs x16, PMSCR_EL2 - stp x15, x16, [x0, #CTX_MDCR_EL2] -#else - str x15, [x0, #CTX_MDCR_EL2] + mrs x13, PMSCR_EL2 + str x13, [x0, #CTX_PMSCR_EL2] #endif + mrs x14, sctlr_el2 + str x14, [x0, #CTX_SCTLR_EL2] - mrs x17, sctlr_el2 - mrs x9, spsr_el2 - stp x17, x9, [x0, #CTX_SCTLR_EL2] - - mrs x10, sp_el2 - mrs x11, tcr_el2 - stp x10, x11, [x0, #CTX_SP_EL2] + mrs x15, spsr_el2 + mrs x16, sp_el2 + stp x15, x16, [x0, #CTX_SPSR_EL2] - mrs x12, tpidr_el2 - mrs x13, ttbr0_el2 - stp x12, x13, [x0, #CTX_TPIDR_EL2] + mrs x9, tcr_el2 + mrs x10, tpidr_el2 + stp x9, x10, [x0, #CTX_TCR_EL2] - mrs x14, vbar_el2 - mrs x15, vmpidr_el2 - stp x14, x15, [x0, #CTX_VBAR_EL2] + mrs x11, ttbr0_el2 + mrs x12, vbar_el2 + stp x11, x12, [x0, #CTX_TTBR0_EL2] - mrs x16, vpidr_el2 - mrs x17, vtcr_el2 - stp x16, x17, [x0, #CTX_VPIDR_EL2] + mrs x13, vmpidr_el2 + mrs x14, vpidr_el2 + stp x13, x14, [x0, #CTX_VMPIDR_EL2] - mrs x9, vttbr_el2 - str x9, [x0, #CTX_VTTBR_EL2] + mrs x15, vtcr_el2 + mrs x16, vttbr_el2 + stp x15, x16, [x0, #CTX_VTCR_EL2] #if CTX_INCLUDE_MTE_REGS - mrs x10, TFSR_EL2 - str x10, [x0, #CTX_TFSR_EL2] + mrs x9, TFSR_EL2 + str x9, [x0, #CTX_TFSR_EL2] #endif #if ENABLE_MPAM_FOR_LOWER_ELS - mrs x9, MPAM2_EL2 - mrs x10, MPAMHCR_EL2 - stp x9, x10, [x0, #CTX_MPAM2_EL2] + mrs x10, MPAM2_EL2 + str x10, [x0, #CTX_MPAM2_EL2] - mrs x11, MPAMVPM0_EL2 - mrs x12, MPAMVPM1_EL2 - stp x11, x12, [x0, #CTX_MPAMVPM0_EL2] + mrs x11, MPAMHCR_EL2 + mrs x12, MPAMVPM0_EL2 + stp x11, x12, [x0, #CTX_MPAMHCR_EL2] - mrs x13, MPAMVPM2_EL2 - mrs x14, MPAMVPM3_EL2 - stp x13, x14, [x0, #CTX_MPAMVPM2_EL2] + mrs x13, MPAMVPM1_EL2 + mrs x14, MPAMVPM2_EL2 + stp x13, x14, [x0, #CTX_MPAMVPM1_EL2] - mrs x15, MPAMVPM4_EL2 - mrs x16, MPAMVPM5_EL2 - stp x15, x16, [x0, #CTX_MPAMVPM4_EL2] + mrs x15, MPAMVPM3_EL2 + mrs x16, MPAMVPM4_EL2 + stp x15, x16, [x0, #CTX_MPAMVPM3_EL2] - mrs x17, MPAMVPM6_EL2 - mrs x9, MPAMVPM7_EL2 - stp x17, x9, [x0, #CTX_MPAMVPM6_EL2] + mrs x9, MPAMVPM5_EL2 + mrs x10, MPAMVPM6_EL2 + stp x9, x10, [x0, #CTX_MPAMVPM5_EL2] - mrs x10, MPAMVPMV_EL2 - str x10, [x0, #CTX_MPAMVPMV_EL2] + mrs x11, MPAMVPM7_EL2 + mrs x12, MPAMVPMV_EL2 + stp x11, x12, [x0, #CTX_MPAMVPM7_EL2] #endif - #if ARM_ARCH_AT_LEAST(8, 6) - mrs x11, HAFGRTR_EL2 - mrs x12, HDFGRTR_EL2 - stp x11, x12, [x0, #CTX_HAFGRTR_EL2] + mrs x13, HAFGRTR_EL2 + mrs x14, HDFGRTR_EL2 + stp x13, x14, [x0, #CTX_HAFGRTR_EL2] - mrs x13, HDFGWTR_EL2 - mrs x14, HFGITR_EL2 - stp x13, x14, [x0, #CTX_HDFGWTR_EL2] + mrs x15, HDFGWTR_EL2 + mrs x16, HFGITR_EL2 + stp x15, x16, [x0, #CTX_HDFGWTR_EL2] - mrs x15, HFGRTR_EL2 - mrs x16, HFGWTR_EL2 - stp x15, x16, [x0, #CTX_HFGRTR_EL2] + mrs x9, HFGRTR_EL2 + mrs x10, HFGWTR_EL2 + stp x9, x10, [x0, #CTX_HFGRTR_EL2] - mrs x17, CNTPOFF_EL2 - str x17, [x0, #CTX_CNTPOFF_EL2] + mrs x11, CNTPOFF_EL2 + str x11, [x0, #CTX_CNTPOFF_EL2] #endif #if ARM_ARCH_AT_LEAST(8, 4) - mrs x9, cnthps_ctl_el2 - mrs x10, cnthps_cval_el2 - stp x9, x10, [x0, #CTX_CNTHPS_CTL_EL2] - - mrs x11, cnthps_tval_el2 - mrs x12, cnthvs_ctl_el2 - stp x11, x12, [x0, #CTX_CNTHPS_TVAL_EL2] - - mrs x13, cnthvs_cval_el2 - mrs x14, cnthvs_tval_el2 - stp x13, x14, [x0, #CTX_CNTHVS_CVAL_EL2] - - mrs x15, cnthv_ctl_el2 - mrs x16, cnthv_cval_el2 - stp x15, x16, [x0, #CTX_CNTHV_CTL_EL2] - - mrs x17, cnthv_tval_el2 - mrs x9, contextidr_el2 - stp x17, x9, [x0, #CTX_CNTHV_TVAL_EL2] + mrs x12, contextidr_el2 + str x12, [x0, #CTX_CONTEXTIDR_EL2] #if CTX_INCLUDE_AARCH32_REGS - mrs x10, sder32_el2 - str x10, [x0, #CTX_SDER32_EL2] + mrs x13, sder32_el2 + str x13, [x0, #CTX_SDER32_EL2] #endif - - mrs x11, ttbr1_el2 - str x11, [x0, #CTX_TTBR1_EL2] - - mrs x12, vdisr_el2 - str x12, [x0, #CTX_VDISR_EL2] + mrs x14, ttbr1_el2 + mrs x15, vdisr_el2 + stp x14, x15, [x0, #CTX_TTBR1_EL2] #if CTX_INCLUDE_NEVE_REGS - mrs x13, vncr_el2 - str x13, [x0, #CTX_VNCR_EL2] + mrs x16, vncr_el2 + str x16, [x0, #CTX_VNCR_EL2] #endif - mrs x14, vsesr_el2 - str x14, [x0, #CTX_VSESR_EL2] - - mrs x15, vstcr_el2 - str x15, [x0, #CTX_VSTCR_EL2] + mrs x9, vsesr_el2 + mrs x10, vstcr_el2 + stp x9, x10, [x0, #CTX_VSESR_EL2] - mrs x16, vsttbr_el2 - str x16, [x0, #CTX_VSTTBR_EL2] - - mrs x17, TRFCR_EL2 - str x17, [x0, #CTX_TRFCR_EL2] + mrs x11, vsttbr_el2 + mrs x12, TRFCR_EL2 + stp x11, x12, [x0, #CTX_VSTTBR_EL2] #endif #if ARM_ARCH_AT_LEAST(8, 5) - mrs x9, scxtnum_el2 - str x9, [x0, #CTX_SCXTNUM_EL2] + mrs x13, scxtnum_el2 + str x13, [x0, #CTX_SCXTNUM_EL2] #endif ret endfunc el2_sysregs_context_save + /* ----------------------------------------------------- * The following function strictly follows the AArch64 - * PCS to use x9-x17 (temporary caller-saved registers) + * PCS to use x9-x16 (temporary caller-saved registers) * to restore EL2 system register context. It assumes * that 'x0' is pointing to a 'el2_sys_regs' structure * from where the register context will be restored @@ -246,7 +213,6 @@ endfunc el2_sysregs_context_save * ----------------------------------------------------- */ func el2_sysregs_context_restore - ldp x9, x10, [x0, #CTX_ACTLR_EL2] msr actlr_el2, x9 msr afsr0_el2, x10 @@ -257,74 +223,66 @@ func el2_sysregs_context_restore ldp x13, x14, [x0, #CTX_CNTHCTL_EL2] msr cnthctl_el2, x13 - msr cnthp_ctl_el2, x14 + msr cntvoff_el2, x14 - ldp x15, x16, [x0, #CTX_CNTHP_CVAL_EL2] - msr cnthp_cval_el2, x15 - msr cnthp_tval_el2, x16 - - ldp x17, x9, [x0, #CTX_CNTVOFF_EL2] - msr cntvoff_el2, x17 - msr cptr_el2, x9 + ldr x15, [x0, #CTX_CPTR_EL2] + msr cptr_el2, x15 #if CTX_INCLUDE_AARCH32_REGS - ldp x10, x11, [x0, #CTX_DBGVCR32_EL2] - msr dbgvcr32_el2, x10 -#else - ldr x11, [x0, #CTX_ELR_EL2] + ldr x16, [x0, #CTX_DBGVCR32_EL2] + msr dbgvcr32_el2, x16 #endif - msr elr_el2, x11 - ldp x14, x15, [x0, #CTX_ESR_EL2] - msr esr_el2, x14 - msr far_el2, x15 + ldp x9, x10, [x0, #CTX_ELR_EL2] + msr elr_el2, x9 + msr esr_el2, x10 + + ldp x11, x12, [x0, #CTX_FAR_EL2] + msr far_el2, x11 + msr hacr_el2, x12 - ldp x16, x17, [x0, #CTX_HACR_EL2] - msr hacr_el2, x16 - msr hcr_el2, x17 + ldp x13, x14, [x0, #CTX_HCR_EL2] + msr hcr_el2, x13 + msr hpfar_el2, x14 - ldp x9, x10, [x0, #CTX_HPFAR_EL2] - msr hpfar_el2, x9 - msr hstr_el2, x10 + ldp x15, x16, [x0, #CTX_HSTR_EL2] + msr hstr_el2, x15 + msr ICC_SRE_EL2, x16 - ldp x11, x12, [x0, #CTX_ICC_SRE_EL2] - msr ICC_SRE_EL2, x11 - msr ICH_HCR_EL2, x12 + ldp x9, x10, [x0, #CTX_ICH_HCR_EL2] + msr ICH_HCR_EL2, x9 + msr ICH_VMCR_EL2, x10 - ldp x13, x14, [x0, #CTX_ICH_VMCR_EL2] - msr ICH_VMCR_EL2, x13 - msr mair_el2, x14 + ldp x11, x12, [x0, #CTX_MAIR_EL2] + msr mair_el2, x11 + msr mdcr_el2, x12 #if ENABLE_SPE_FOR_LOWER_ELS - ldp x15, x16, [x0, #CTX_MDCR_EL2] - msr PMSCR_EL2, x16 -#else - ldr x15, [x0, #CTX_MDCR_EL2] + ldr x13, [x0, #CTX_PMSCR_EL2] + msr PMSCR_EL2, x13 #endif - msr mdcr_el2, x15 - - ldp x17, x9, [x0, #CTX_SCTLR_EL2] - msr sctlr_el2, x17 - msr spsr_el2, x9 + ldr x14, [x0, #CTX_SCTLR_EL2] + msr sctlr_el2, x14 - ldp x10, x11, [x0, #CTX_SP_EL2] - msr sp_el2, x10 - msr tcr_el2, x11 + ldp x15, x16, [x0, #CTX_SPSR_EL2] + msr spsr_el2, x15 + msr sp_el2, x16 - ldp x12, x13, [x0, #CTX_TPIDR_EL2] - msr tpidr_el2, x12 - msr ttbr0_el2, x13 + ldp x9, x10, [x0, #CTX_TCR_EL2] + msr tcr_el2, x9 + msr tpidr_el2, x10 - ldp x13, x14, [x0, #CTX_VBAR_EL2] - msr vbar_el2, x13 - msr vmpidr_el2, x14 + ldp x11, x12, [x0, #CTX_TTBR0_EL2] + msr ttbr0_el2, x11 + msr vbar_el2, x12 - ldp x15, x16, [x0, #CTX_VPIDR_EL2] - msr vpidr_el2, x15 - msr vtcr_el2, x16 + ldp x13, x14, [x0, #CTX_VMPIDR_EL2] + msr vmpidr_el2, x13 + msr vpidr_el2, x14 - ldr x17, [x0, #CTX_VTTBR_EL2] - msr vttbr_el2, x17 + ldp x15, x16, [x0, #CTX_VTCR_EL2] + msr vtcr_el2, x15 + msr vttbr_el2, x16 #if CTX_INCLUDE_MTE_REGS ldr x9, [x0, #CTX_TFSR_EL2] @@ -332,100 +290,76 @@ func el2_sysregs_context_restore #endif #if ENABLE_MPAM_FOR_LOWER_ELS - ldp x10, x11, [x0, #CTX_MPAM2_EL2] + ldr x10, [x0, #CTX_MPAM2_EL2] msr MPAM2_EL2, x10 - msr MPAMHCR_EL2, x11 - ldp x12, x13, [x0, #CTX_MPAMVPM0_EL2] + ldp x11, x12, [x0, #CTX_MPAMHCR_EL2] + msr MPAMHCR_EL2, x11 msr MPAMVPM0_EL2, x12 - msr MPAMVPM1_EL2, x13 - ldp x14, x15, [x0, #CTX_MPAMVPM2_EL2] + ldp x13, x14, [x0, #CTX_MPAMVPM1_EL2] + msr MPAMVPM1_EL2, x13 msr MPAMVPM2_EL2, x14 - msr MPAMVPM3_EL2, x15 - ldp x16, x17, [x0, #CTX_MPAMVPM4_EL2] + ldp x15, x16, [x0, #CTX_MPAMVPM3_EL2] + msr MPAMVPM3_EL2, x15 msr MPAMVPM4_EL2, x16 - msr MPAMVPM5_EL2, x17 - ldp x9, x10, [x0, #CTX_MPAMVPM6_EL2] - msr MPAMVPM6_EL2, x9 - msr MPAMVPM7_EL2, x10 + ldp x9, x10, [x0, #CTX_MPAMVPM5_EL2] + msr MPAMVPM5_EL2, x9 + msr MPAMVPM6_EL2, x10 - ldr x11, [x0, #CTX_MPAMVPMV_EL2] - msr MPAMVPMV_EL2, x11 + ldp x11, x12, [x0, #CTX_MPAMVPM7_EL2] + msr MPAMVPM7_EL2, x11 + msr MPAMVPMV_EL2, x12 #endif #if ARM_ARCH_AT_LEAST(8, 6) - ldp x12, x13, [x0, #CTX_HAFGRTR_EL2] - msr HAFGRTR_EL2, x12 - msr HDFGRTR_EL2, x13 + ldp x13, x14, [x0, #CTX_HAFGRTR_EL2] + msr HAFGRTR_EL2, x13 + msr HDFGRTR_EL2, x14 - ldp x14, x15, [x0, #CTX_HDFGWTR_EL2] - msr HDFGWTR_EL2, x14 - msr HFGITR_EL2, x15 + ldp x15, x16, [x0, #CTX_HDFGWTR_EL2] + msr HDFGWTR_EL2, x15 + msr HFGITR_EL2, x16 - ldp x16, x17, [x0, #CTX_HFGRTR_EL2] - msr HFGRTR_EL2, x16 - msr HFGWTR_EL2, x17 + ldp x9, x10, [x0, #CTX_HFGRTR_EL2] + msr HFGRTR_EL2, x9 + msr HFGWTR_EL2, x10 - ldr x9, [x0, #CTX_CNTPOFF_EL2] - msr CNTPOFF_EL2, x9 + ldr x11, [x0, #CTX_CNTPOFF_EL2] + msr CNTPOFF_EL2, x11 #endif #if ARM_ARCH_AT_LEAST(8, 4) - ldp x10, x11, [x0, #CTX_CNTHPS_CTL_EL2] - msr cnthps_ctl_el2, x10 - msr cnthps_cval_el2, x11 - - ldp x12, x13, [x0, #CTX_CNTHPS_TVAL_EL2] - msr cnthps_tval_el2, x12 - msr cnthvs_ctl_el2, x13 - - ldp x14, x15, [x0, #CTX_CNTHVS_CVAL_EL2] - msr cnthvs_cval_el2, x14 - msr cnthvs_tval_el2, x15 - - ldp x16, x17, [x0, #CTX_CNTHV_CTL_EL2] - msr cnthv_ctl_el2, x16 - msr cnthv_cval_el2, x17 - - ldp x9, x10, [x0, #CTX_CNTHV_TVAL_EL2] - msr cnthv_tval_el2, x9 - msr contextidr_el2, x10 + ldr x12, [x0, #CTX_CONTEXTIDR_EL2] + msr contextidr_el2, x12 #if CTX_INCLUDE_AARCH32_REGS - ldr x11, [x0, #CTX_SDER32_EL2] - msr sder32_el2, x11 + ldr x13, [x0, #CTX_SDER32_EL2] + msr sder32_el2, x13 #endif - - ldr x12, [x0, #CTX_TTBR1_EL2] - msr ttbr1_el2, x12 - - ldr x13, [x0, #CTX_VDISR_EL2] - msr vdisr_el2, x13 + ldp x14, x15, [x0, #CTX_TTBR1_EL2] + msr ttbr1_el2, x14 + msr vdisr_el2, x15 #if CTX_INCLUDE_NEVE_REGS - ldr x14, [x0, #CTX_VNCR_EL2] - msr vncr_el2, x14 + ldr x16, [x0, #CTX_VNCR_EL2] + msr vncr_el2, x16 #endif - ldr x15, [x0, #CTX_VSESR_EL2] - msr vsesr_el2, x15 - - ldr x16, [x0, #CTX_VSTCR_EL2] - msr vstcr_el2, x16 - - ldr x17, [x0, #CTX_VSTTBR_EL2] - msr vsttbr_el2, x17 + ldp x9, x10, [x0, #CTX_VSESR_EL2] + msr vsesr_el2, x9 + msr vstcr_el2, x10 - ldr x9, [x0, #CTX_TRFCR_EL2] - msr TRFCR_EL2, x9 + ldp x11, x12, [x0, #CTX_VSTTBR_EL2] + msr vsttbr_el2, x11 + msr TRFCR_EL2, x12 #endif #if ARM_ARCH_AT_LEAST(8, 5) - ldr x10, [x0, #CTX_SCXTNUM_EL2] - msr scxtnum_el2, x10 + ldr x13, [x0, #CTX_SCXTNUM_EL2] + msr scxtnum_el2, x13 #endif ret @@ -763,13 +697,14 @@ func save_gp_pmcr_pauth_regs str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] /* ---------------------------------------------------------- - * Check if earlier initialization MDCR_EL3.SCCD to 1 failed, - * meaning that ARMv8-PMU is not implemented and PMCR_EL0 - * should be saved in non-secure context. + * Check if earlier initialization MDCR_EL3.SCCD/MCCD to 1 + * failed, meaning that FEAT_PMUv3p5/7 is not implemented and + * PMCR_EL0 should be saved in non-secure context. * ---------------------------------------------------------- */ + mov_imm x10, (MDCR_SCCD_BIT | MDCR_MCCD_BIT) mrs x9, mdcr_el3 - tst x9, #MDCR_SCCD_BIT + tst x9, x10 bne 1f /* Secure Cycle Counter is not disabled */ @@ -858,13 +793,14 @@ func restore_gp_pmcr_pauth_regs /* ---------------------------------------------------------- * Back to Non-secure state. - * Check if earlier initialization MDCR_EL3.SCCD to 1 failed, - * meaning that ARMv8-PMU is not implemented and PMCR_EL0 - * should be restored from non-secure context. + * Check if earlier initialization MDCR_EL3.SCCD/MCCD to 1 + * failed, meaning that FEAT_PMUv3p5/7 is not implemented and + * PMCR_EL0 should be restored from non-secure context. * ---------------------------------------------------------- */ + mov_imm x1, (MDCR_SCCD_BIT | MDCR_MCCD_BIT) mrs x0, mdcr_el3 - tst x0, #MDCR_SCCD_BIT + tst x0, x1 bne 2f ldr x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0] msr pmcr_el0, x0 @@ -965,6 +901,24 @@ func el3_exit msr spsr_el3, x16 msr elr_el3, x17 +#if IMAGE_BL31 + /* ---------------------------------------------------------- + * Restore CPTR_EL3. + * ZCR is only restored if SVE is supported and enabled. + * Synchronization is required before zcr_el3 is addressed. + * ---------------------------------------------------------- + */ + ldp x19, x20, [sp, #CTX_EL3STATE_OFFSET + CTX_CPTR_EL3] + msr cptr_el3, x19 + + ands x19, x19, #CPTR_EZ_BIT + beq sve_not_enabled + + isb + msr S3_6_C1_C2_0, x20 /* zcr_el3 */ +sve_not_enabled: +#endif + #if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 /* ---------------------------------------------------------- * Restore mitigation state as it was on entry to EL3 diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c index 72d463b71..7c6f953b2 100644 --- a/lib/el3_runtime/aarch64/context_mgmt.c +++ b/lib/el3_runtime/aarch64/context_mgmt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -25,6 +25,7 @@ #include <lib/extensions/twed.h> #include <lib/utils.h> +static void enable_extensions_secure(cpu_context_t *ctx); /******************************************************************************* * Context management library initialisation routine. This library is used by @@ -60,7 +61,7 @@ void __init cm_init(void) * * To prepare the register state for entry call cm_prepare_el3_exit() and * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to - * cm_e1_sysreg_context_restore(). + * cm_el1_sysregs_context_restore(). ******************************************************************************/ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) { @@ -180,6 +181,12 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) scr_el3 |= get_scr_el3_from_routing_model(security_state); #endif + /* Save the initialized value of CPTR_EL3 register */ + write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, read_cptr_el3()); + if (security_state == SECURE) { + enable_extensions_secure(ctx); + } + /* * SCR_EL3.HCE: Enable HVC instructions if next execution state is * AArch64 and next EL is EL2, or if next execution state is AArch32 and @@ -217,6 +224,16 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) } /* + * FEAT_AMUv1p1 virtual offset registers are only accessible from EL3 + * and EL2, when clear, this bit traps accesses from EL2 so we set it + * to 1 when EL2 is present. + */ + if (is_armv8_6_feat_amuv1p1_present() && + (el_implemented(2) != EL_IMPL_NONE)) { + scr_el3 |= SCR_AMVOFFEN_BIT; + } + + /* * Initialise SCTLR_EL1 to the reset value corresponding to the target * execution state setting all fields rather than relying of the hw. * Some fields have architecturally UNKNOWN reset values and these are @@ -276,7 +293,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) /* * Store the initialised SCTLR_EL1 value in the cpu_context - SCTLR_EL2 - * and other EL2 registers are set up by cm_prepare_ns_entry() as they + * and other EL2 registers are set up by cm_prepare_el3_exit() as they * are not part of the stored cpu_context. */ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); @@ -313,7 +330,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise * it is zero. ******************************************************************************/ -static void enable_extensions_nonsecure(bool el2_unused) +static void enable_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx) { #if IMAGE_BL31 #if ENABLE_SPE_FOR_LOWER_ELS @@ -321,11 +338,11 @@ static void enable_extensions_nonsecure(bool el2_unused) #endif #if ENABLE_AMU - amu_enable(el2_unused); + amu_enable(el2_unused, ctx); #endif #if ENABLE_SVE_FOR_NS - sve_enable(el2_unused); + sve_enable(ctx); #endif #if ENABLE_MPAM_FOR_LOWER_ELS @@ -335,6 +352,18 @@ static void enable_extensions_nonsecure(bool el2_unused) } /******************************************************************************* + * Enable architecture extensions on first entry to Secure world. + ******************************************************************************/ +static void enable_extensions_secure(cpu_context_t *ctx) +{ +#if IMAGE_BL31 +#if ENABLE_SVE_FOR_SWD + sve_enable(ctx); +#endif +#endif +} + +/******************************************************************************* * The following function initializes the cpu_context for a CPU specified by * its `cpu_idx` for first use, and sets the initial entrypoint state as * specified by the entry_point_info structure. @@ -568,7 +597,7 @@ void cm_prepare_el3_exit(uint32_t security_state) write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & ~(CNTHP_CTL_ENABLE_BIT)); } - enable_extensions_nonsecure(el2_unused); + enable_extensions_nonsecure(el2_unused, ctx); } cm_el1_sysregs_context_restore(security_state); diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c index 0f75f0791..ed56dddc9 100644 --- a/lib/extensions/amu/aarch32/amu.c +++ b/lib/extensions/amu/aarch32/amu.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -18,13 +18,17 @@ static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT]; -/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */ -bool amu_supported(void) +/* + * Get AMU version value from pfr0. + * Return values + * ID_PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4) + * ID_PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6) + * ID_PFR0_AMU_NOT_SUPPORTED: not supported + */ +unsigned int amu_get_version(void) { - uint32_t features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT; - - features &= ID_PFR0_AMU_MASK; - return ((features == 1U) || (features == 2U)); + return (unsigned int)(read_id_pfr0() >> ID_PFR0_AMU_SHIFT) & + ID_PFR0_AMU_MASK; } #if AMU_GROUP1_NR_COUNTERS @@ -43,7 +47,7 @@ bool amu_group1_supported(void) */ void amu_enable(bool el2_unused) { - if (!amu_supported()) { + if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) { return; } @@ -87,12 +91,31 @@ void amu_enable(bool el2_unused) /* Enable group 1 counters */ write_amcntenset1(AMU_GROUP1_COUNTERS_MASK); #endif + + /* Initialize FEAT_AMUv1p1 features if present. */ + if (amu_get_version() < ID_PFR0_AMU_V1P1) { + return; + } + +#if AMU_RESTRICT_COUNTERS + /* + * FEAT_AMUv1p1 adds a register field to restrict access to group 1 + * counters at all but the highest implemented EL. This is controlled + * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system + * register reads at lower ELs return zero. Reads from the memory + * mapped view are unaffected. + */ + VERBOSE("AMU group 1 counter access restricted.\n"); + write_amcr(read_amcr() | AMCR_CG1RZ_BIT); +#else + write_amcr(read_amcr() & ~AMCR_CG1RZ_BIT); +#endif } /* Read the group 0 counter identified by the given `idx`. */ uint64_t amu_group0_cnt_read(unsigned int idx) { - assert(amu_supported()); + assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); assert(idx < AMU_GROUP0_NR_COUNTERS); return amu_group0_cnt_read_internal(idx); @@ -101,7 +124,7 @@ uint64_t amu_group0_cnt_read(unsigned int idx) /* Write the group 0 counter identified by the given `idx` with `val` */ void amu_group0_cnt_write(unsigned int idx, uint64_t val) { - assert(amu_supported()); + assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); assert(idx < AMU_GROUP0_NR_COUNTERS); amu_group0_cnt_write_internal(idx, val); @@ -112,7 +135,7 @@ void amu_group0_cnt_write(unsigned int idx, uint64_t val) /* Read the group 1 counter identified by the given `idx` */ uint64_t amu_group1_cnt_read(unsigned int idx) { - assert(amu_supported()); + assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); assert(amu_group1_supported()); assert(idx < AMU_GROUP1_NR_COUNTERS); @@ -122,7 +145,7 @@ uint64_t amu_group1_cnt_read(unsigned int idx) /* Write the group 1 counter identified by the given `idx` with `val` */ void amu_group1_cnt_write(unsigned int idx, uint64_t val) { - assert(amu_supported()); + assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); assert(amu_group1_supported()); assert(idx < AMU_GROUP1_NR_COUNTERS); @@ -136,7 +159,7 @@ void amu_group1_cnt_write(unsigned int idx, uint64_t val) */ void amu_group1_set_evtype(unsigned int idx, unsigned int val) { - assert(amu_supported()); + assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); assert(amu_group1_supported()); assert(idx < AMU_GROUP1_NR_COUNTERS); @@ -150,7 +173,7 @@ static void *amu_context_save(const void *arg) struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; unsigned int i; - if (!amu_supported()) { + if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) { return (void *)-1; } @@ -197,7 +220,7 @@ static void *amu_context_restore(const void *arg) struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; unsigned int i; - if (!amu_supported()) { + if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) { return (void *)-1; } diff --git a/lib/extensions/amu/aarch32/amu_helpers.S b/lib/extensions/amu/aarch32/amu_helpers.S index effb8e50f..d387341f7 100644 --- a/lib/extensions/amu/aarch32/amu_helpers.S +++ b/lib/extensions/amu/aarch32/amu_helpers.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -75,13 +75,13 @@ func amu_group0_cnt_write_internal 1: stcopr16 r2, r3, AMEVCNTR00 /* index 0 */ - bx lr + bx lr stcopr16 r2, r3, AMEVCNTR01 /* index 1 */ - bx lr + bx lr stcopr16 r2, r3, AMEVCNTR02 /* index 2 */ - bx lr + bx lr stcopr16 r2, r3, AMEVCNTR03 /* index 3 */ - bx lr + bx lr endfunc amu_group0_cnt_write_internal /* @@ -169,37 +169,37 @@ func amu_group1_cnt_write_internal bx r1 1: - stcopr16 r2, r3, AMEVCNTR10 /* index 0 */ + stcopr16 r2, r3, AMEVCNTR10 /* index 0 */ bx lr - stcopr16 r2, r3, AMEVCNTR11 /* index 1 */ + stcopr16 r2, r3, AMEVCNTR11 /* index 1 */ bx lr - stcopr16 r2, r3, AMEVCNTR12 /* index 2 */ + stcopr16 r2, r3, AMEVCNTR12 /* index 2 */ bx lr - stcopr16 r2, r3, AMEVCNTR13 /* index 3 */ + stcopr16 r2, r3, AMEVCNTR13 /* index 3 */ bx lr - stcopr16 r2, r3, AMEVCNTR14 /* index 4 */ + stcopr16 r2, r3, AMEVCNTR14 /* index 4 */ bx lr - stcopr16 r2, r3, AMEVCNTR15 /* index 5 */ + stcopr16 r2, r3, AMEVCNTR15 /* index 5 */ bx lr - stcopr16 r2, r3, AMEVCNTR16 /* index 6 */ + stcopr16 r2, r3, AMEVCNTR16 /* index 6 */ bx lr - stcopr16 r2, r3, AMEVCNTR17 /* index 7 */ + stcopr16 r2, r3, AMEVCNTR17 /* index 7 */ bx lr - stcopr16 r2, r3, AMEVCNTR18 /* index 8 */ + stcopr16 r2, r3, AMEVCNTR18 /* index 8 */ bx lr - stcopr16 r2, r3, AMEVCNTR19 /* index 9 */ + stcopr16 r2, r3, AMEVCNTR19 /* index 9 */ bx lr - stcopr16 r2, r3, AMEVCNTR1A /* index 10 */ + stcopr16 r2, r3, AMEVCNTR1A /* index 10 */ bx lr - stcopr16 r2, r3, AMEVCNTR1B /* index 11 */ + stcopr16 r2, r3, AMEVCNTR1B /* index 11 */ bx lr - stcopr16 r2, r3, AMEVCNTR1C /* index 12 */ + stcopr16 r2, r3, AMEVCNTR1C /* index 12 */ bx lr - stcopr16 r2, r3, AMEVCNTR1D /* index 13 */ + stcopr16 r2, r3, AMEVCNTR1D /* index 13 */ bx lr - stcopr16 r2, r3, AMEVCNTR1E /* index 14 */ + stcopr16 r2, r3, AMEVCNTR1E /* index 14 */ bx lr - stcopr16 r2, r3, AMEVCNTR1F /* index 15 */ + stcopr16 r2, r3, AMEVCNTR1F /* index 15 */ bx lr endfunc amu_group1_cnt_write_internal @@ -234,36 +234,36 @@ func amu_group1_set_evtype_internal bx r2 1: - stcopr r1, AMEVTYPER10 /* index 0 */ + stcopr r1, AMEVTYPER10 /* index 0 */ bx lr - stcopr r1, AMEVTYPER11 /* index 1 */ + stcopr r1, AMEVTYPER11 /* index 1 */ bx lr - stcopr r1, AMEVTYPER12 /* index 2 */ + stcopr r1, AMEVTYPER12 /* index 2 */ bx lr - stcopr r1, AMEVTYPER13 /* index 3 */ + stcopr r1, AMEVTYPER13 /* index 3 */ bx lr - stcopr r1, AMEVTYPER14 /* index 4 */ + stcopr r1, AMEVTYPER14 /* index 4 */ bx lr - stcopr r1, AMEVTYPER15 /* index 5 */ + stcopr r1, AMEVTYPER15 /* index 5 */ bx lr - stcopr r1, AMEVTYPER16 /* index 6 */ + stcopr r1, AMEVTYPER16 /* index 6 */ bx lr - stcopr r1, AMEVTYPER17 /* index 7 */ + stcopr r1, AMEVTYPER17 /* index 7 */ bx lr - stcopr r1, AMEVTYPER18 /* index 8 */ + stcopr r1, AMEVTYPER18 /* index 8 */ bx lr - stcopr r1, AMEVTYPER19 /* index 9 */ + stcopr r1, AMEVTYPER19 /* index 9 */ bx lr - stcopr r1, AMEVTYPER1A /* index 10 */ + stcopr r1, AMEVTYPER1A /* index 10 */ bx lr - stcopr r1, AMEVTYPER1B /* index 11 */ + stcopr r1, AMEVTYPER1B /* index 11 */ bx lr - stcopr r1, AMEVTYPER1C /* index 12 */ + stcopr r1, AMEVTYPER1C /* index 12 */ bx lr - stcopr r1, AMEVTYPER1D /* index 13 */ + stcopr r1, AMEVTYPER1D /* index 13 */ bx lr - stcopr r1, AMEVTYPER1E /* index 14 */ + stcopr r1, AMEVTYPER1E /* index 14 */ bx lr - stcopr r1, AMEVTYPER1F /* index 15 */ + stcopr r1, AMEVTYPER1F /* index 15 */ bx lr endfunc amu_group1_set_evtype_internal diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c index 499736345..295c0d569 100644 --- a/lib/extensions/amu/aarch64/amu.c +++ b/lib/extensions/amu/aarch64/amu.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -8,6 +8,7 @@ #include <stdbool.h> #include <arch.h> +#include <arch_features.h> #include <arch_helpers.h> #include <lib/el3_runtime/pubsub_events.h> @@ -18,13 +19,17 @@ static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT]; -/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */ -bool amu_supported(void) +/* + * Get AMU version value from aa64pfr0. + * Return values + * ID_AA64PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4) + * ID_AA64PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6) + * ID_AA64PFR0_AMU_NOT_SUPPORTED: not supported + */ +unsigned int amu_get_version(void) { - uint64_t features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT; - - features &= ID_AA64PFR0_AMU_MASK; - return ((features == 1U) || (features == 2U)); + return (unsigned int)(read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) & + ID_AA64PFR0_AMU_MASK; } #if AMU_GROUP1_NR_COUNTERS @@ -41,11 +46,12 @@ bool amu_group1_supported(void) * Enable counters. This function is meant to be invoked * by the context management library before exiting from EL3. */ -void amu_enable(bool el2_unused) +void amu_enable(bool el2_unused, cpu_context_t *ctx) { uint64_t v; + unsigned int amu_version = amu_get_version(); - if (!amu_supported()) { + if (amu_version == ID_AA64PFR0_AMU_NOT_SUPPORTED) { return; } @@ -82,12 +88,13 @@ void amu_enable(bool el2_unused) } /* - * CPTR_EL3.TAM: Set to zero so that any accesses to + * Retrieve and update the CPTR_EL3 value from the context mentioned + * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to * the Activity Monitor registers do not trap to EL3. */ - v = read_cptr_el3(); + v = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3); v &= ~TAM_BIT; - write_cptr_el3(v); + write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, v); /* Enable group 0 counters */ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK); @@ -96,12 +103,36 @@ void amu_enable(bool el2_unused) /* Enable group 1 counters */ write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK); #endif + + /* Initialize FEAT_AMUv1p1 features if present. */ + if (amu_version < ID_AA64PFR0_AMU_V1P1) { + return; + } + + if (el2_unused) { + /* Make sure virtual offsets are disabled if EL2 not used. */ + write_hcr_el2(read_hcr_el2() & ~HCR_AMVOFFEN_BIT); + } + +#if AMU_RESTRICT_COUNTERS + /* + * FEAT_AMUv1p1 adds a register field to restrict access to group 1 + * counters at all but the highest implemented EL. This is controlled + * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system + * register reads at lower ELs return zero. Reads from the memory + * mapped view are unaffected. + */ + VERBOSE("AMU group 1 counter access restricted.\n"); + write_amcr_el0(read_amcr_el0() | AMCR_CG1RZ_BIT); +#else + write_amcr_el0(read_amcr_el0() & ~AMCR_CG1RZ_BIT); +#endif } /* Read the group 0 counter identified by the given `idx`. */ uint64_t amu_group0_cnt_read(unsigned int idx) { - assert(amu_supported()); + assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED); assert(idx < AMU_GROUP0_NR_COUNTERS); return amu_group0_cnt_read_internal(idx); @@ -110,18 +141,49 @@ uint64_t amu_group0_cnt_read(unsigned int idx) /* Write the group 0 counter identified by the given `idx` with `val` */ void amu_group0_cnt_write(unsigned int idx, uint64_t val) { - assert(amu_supported()); + assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED); assert(idx < AMU_GROUP0_NR_COUNTERS); amu_group0_cnt_write_internal(idx, val); isb(); } +/* + * Read the group 0 offset register for a given index. Index must be 0, 2, + * or 3, the register for 1 does not exist. + * + * Using this function requires FEAT_AMUv1p1 support. + */ +uint64_t amu_group0_voffset_read(unsigned int idx) +{ + assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1); + assert(idx < AMU_GROUP0_NR_COUNTERS); + assert(idx != 1U); + + return amu_group0_voffset_read_internal(idx); +} + +/* + * Write the group 0 offset register for a given index. Index must be 0, 2, or + * 3, the register for 1 does not exist. + * + * Using this function requires FEAT_AMUv1p1 support. + */ +void amu_group0_voffset_write(unsigned int idx, uint64_t val) +{ + assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1); + assert(idx < AMU_GROUP0_NR_COUNTERS); + assert(idx != 1U); + + amu_group0_voffset_write_internal(idx, val); + isb(); +} + #if AMU_GROUP1_NR_COUNTERS /* Read the group 1 counter identified by the given `idx` */ -uint64_t amu_group1_cnt_read(unsigned int idx) +uint64_t amu_group1_cnt_read(unsigned int idx) { - assert(amu_supported()); + assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED); assert(amu_group1_supported()); assert(idx < AMU_GROUP1_NR_COUNTERS); @@ -129,9 +191,9 @@ uint64_t amu_group1_cnt_read(unsigned int idx) } /* Write the group 1 counter identified by the given `idx` with `val` */ -void amu_group1_cnt_write(unsigned int idx, uint64_t val) +void amu_group1_cnt_write(unsigned int idx, uint64_t val) { - assert(amu_supported()); + assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED); assert(amu_group1_supported()); assert(idx < AMU_GROUP1_NR_COUNTERS); @@ -140,12 +202,45 @@ void amu_group1_cnt_write(unsigned int idx, uint64_t val) } /* + * Read the group 1 offset register for a given index. + * + * Using this function requires FEAT_AMUv1p1 support. + */ +uint64_t amu_group1_voffset_read(unsigned int idx) +{ + assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1); + assert(amu_group1_supported()); + assert(idx < AMU_GROUP1_NR_COUNTERS); + assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) & + (1ULL << idx)) != 0ULL); + + return amu_group1_voffset_read_internal(idx); +} + +/* + * Write the group 1 offset register for a given index. + * + * Using this function requires FEAT_AMUv1p1 support. + */ +void amu_group1_voffset_write(unsigned int idx, uint64_t val) +{ + assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1); + assert(amu_group1_supported()); + assert(idx < AMU_GROUP1_NR_COUNTERS); + assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) & + (1ULL << idx)) != 0ULL); + + amu_group1_voffset_write_internal(idx, val); + isb(); +} + +/* * Program the event type register for the given `idx` with * the event number `val` */ void amu_group1_set_evtype(unsigned int idx, unsigned int val) { - assert(amu_supported()); + assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED); assert(amu_group1_supported()); assert(idx < AMU_GROUP1_NR_COUNTERS); @@ -159,7 +254,7 @@ static void *amu_context_save(const void *arg) struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; unsigned int i; - if (!amu_supported()) { + if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) { return (void *)-1; } @@ -190,13 +285,37 @@ static void *amu_context_save(const void *arg) ctx->group0_cnts[i] = amu_group0_cnt_read(i); } + /* Save group 0 virtual offsets if supported and enabled. */ + if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) && + ((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) { + /* Not using a loop because count is fixed and index 1 DNE. */ + ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U); + ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U); + ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U); + } + #if AMU_GROUP1_NR_COUNTERS /* Save group 1 counters */ for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { - if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) { + if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) { ctx->group1_cnts[i] = amu_group1_cnt_read(i); } } + + /* Save group 1 virtual offsets if supported and enabled. */ + if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) && + ((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) { + u_register_t amcg1idr = read_amcg1idr_el0() >> + AMCG1IDR_VOFF_SHIFT; + amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK; + + for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { + if (((amcg1idr >> i) & 1ULL) != 0ULL) { + ctx->group1_voffsets[i] = + amu_group1_voffset_read(i); + } + } + } #endif return (void *)0; } @@ -206,7 +325,7 @@ static void *amu_context_restore(const void *arg) struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; unsigned int i; - if (!amu_supported()) { + if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) { return (void *)-1; } @@ -227,17 +346,41 @@ static void *amu_context_restore(const void *arg) amu_group0_cnt_write(i, ctx->group0_cnts[i]); } + /* Restore group 0 virtual offsets if supported and enabled. */ + if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) && + ((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) { + /* Not using a loop because count is fixed and index 1 DNE. */ + amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]); + amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]); + amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]); + } + /* Restore group 0 counter configuration */ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK); #if AMU_GROUP1_NR_COUNTERS /* Restore group 1 counters */ for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { - if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) { + if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) { amu_group1_cnt_write(i, ctx->group1_cnts[i]); } } + /* Restore group 1 virtual offsets if supported and enabled. */ + if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) && + ((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) { + u_register_t amcg1idr = read_amcg1idr_el0() >> + AMCG1IDR_VOFF_SHIFT; + amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK; + + for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { + if (((amcg1idr >> i) & 1ULL) != 0ULL) { + amu_group1_voffset_write(i, + ctx->group1_voffsets[i]); + } + } + } + /* Restore group 1 counter configuration */ write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK); #endif diff --git a/lib/extensions/amu/aarch64/amu_helpers.S b/lib/extensions/amu/aarch64/amu_helpers.S index 89007a3fb..9989abdeb 100644 --- a/lib/extensions/amu/aarch64/amu_helpers.S +++ b/lib/extensions/amu/aarch64/amu_helpers.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -14,6 +14,12 @@ .globl amu_group1_cnt_write_internal .globl amu_group1_set_evtype_internal + /* FEAT_AMUv1p1 virtualisation offset register functions */ + .globl amu_group0_voffset_read_internal + .globl amu_group0_voffset_write_internal + .globl amu_group1_voffset_read_internal + .globl amu_group1_voffset_write_internal + /* * uint64_t amu_group0_cnt_read_internal(int idx); * @@ -211,3 +217,169 @@ func amu_group1_set_evtype_internal write AMEVTYPER1E_EL0 /* index 14 */ write AMEVTYPER1F_EL0 /* index 15 */ endfunc amu_group1_set_evtype_internal + +/* + * Accessor functions for virtual offset registers added with FEAT_AMUv1p1 + */ + +/* + * uint64_t amu_group0_voffset_read_internal(int idx); + * + * Given `idx`, read the corresponding AMU virtual offset register + * and return it in `x0`. + */ +func amu_group0_voffset_read_internal + adr x1, 1f +#if ENABLE_ASSERTIONS + /* + * It can be dangerous to call this function with an + * out of bounds index. Ensure `idx` is valid. + */ + tst x0, #~3 + ASM_ASSERT(eq) + /* Make sure idx != 1 since AMEVCNTVOFF01_EL2 does not exist */ + cmp x0, #1 + ASM_ASSERT(ne) +#endif + /* + * Given `idx` calculate address of mrs/ret instruction pair + * in the table below. + */ + add x1, x1, x0, lsl #3 /* each mrs/ret sequence is 8 bytes */ +#if ENABLE_BTI + add x1, x1, x0, lsl #2 /* + "bti j" instruction */ +#endif + br x1 + +1: read AMEVCNTVOFF00_EL2 /* index 0 */ + .skip 8 /* AMEVCNTVOFF01_EL2 does not exist */ +#if ENABLE_BTI + .skip 4 +#endif + read AMEVCNTVOFF02_EL2 /* index 2 */ + read AMEVCNTVOFF03_EL2 /* index 3 */ +endfunc amu_group0_voffset_read_internal + +/* + * void amu_group0_voffset_write_internal(int idx, uint64_t val); + * + * Given `idx`, write `val` to the corresponding AMU virtual offset register. + */ +func amu_group0_voffset_write_internal + adr x2, 1f +#if ENABLE_ASSERTIONS + /* + * It can be dangerous to call this function with an + * out of bounds index. Ensure `idx` is valid. + */ + tst x0, #~3 + ASM_ASSERT(eq) + /* Make sure idx != 1 since AMEVCNTVOFF01_EL2 does not exist */ + cmp x0, #1 + ASM_ASSERT(ne) +#endif + /* + * Given `idx` calculate address of mrs/ret instruction pair + * in the table below. + */ + add x2, x2, x0, lsl #3 /* each msr/ret sequence is 8 bytes */ +#if ENABLE_BTI + add x2, x2, x0, lsl #2 /* + "bti j" instruction */ +#endif + br x2 + +1: write AMEVCNTVOFF00_EL2 /* index 0 */ + .skip 8 /* AMEVCNTVOFF01_EL2 does not exist */ +#if ENABLE_BTI + .skip 4 +#endif + write AMEVCNTVOFF02_EL2 /* index 2 */ + write AMEVCNTVOFF03_EL2 /* index 3 */ +endfunc amu_group0_voffset_write_internal + +/* + * uint64_t amu_group1_voffset_read_internal(int idx); + * + * Given `idx`, read the corresponding AMU virtual offset register + * and return it in `x0`. + */ +func amu_group1_voffset_read_internal + adr x1, 1f +#if ENABLE_ASSERTIONS + /* + * It can be dangerous to call this function with an + * out of bounds index. Ensure `idx` is valid. + */ + tst x0, #~0xF + ASM_ASSERT(eq) +#endif + /* + * Given `idx` calculate address of mrs/ret instruction pair + * in the table below. + */ + add x1, x1, x0, lsl #3 /* each mrs/ret sequence is 8 bytes */ +#if ENABLE_BTI + add x1, x1, x0, lsl #2 /* + "bti j" instruction */ +#endif + br x1 + +1: read AMEVCNTVOFF10_EL2 /* index 0 */ + read AMEVCNTVOFF11_EL2 /* index 1 */ + read AMEVCNTVOFF12_EL2 /* index 2 */ + read AMEVCNTVOFF13_EL2 /* index 3 */ + read AMEVCNTVOFF14_EL2 /* index 4 */ + read AMEVCNTVOFF15_EL2 /* index 5 */ + read AMEVCNTVOFF16_EL2 /* index 6 */ + read AMEVCNTVOFF17_EL2 /* index 7 */ + read AMEVCNTVOFF18_EL2 /* index 8 */ + read AMEVCNTVOFF19_EL2 /* index 9 */ + read AMEVCNTVOFF1A_EL2 /* index 10 */ + read AMEVCNTVOFF1B_EL2 /* index 11 */ + read AMEVCNTVOFF1C_EL2 /* index 12 */ + read AMEVCNTVOFF1D_EL2 /* index 13 */ + read AMEVCNTVOFF1E_EL2 /* index 14 */ + read AMEVCNTVOFF1F_EL2 /* index 15 */ +endfunc amu_group1_voffset_read_internal + +/* + * void amu_group1_voffset_write_internal(int idx, uint64_t val); + * + * Given `idx`, write `val` to the corresponding AMU virtual offset register. + */ +func amu_group1_voffset_write_internal + adr x2, 1f +#if ENABLE_ASSERTIONS + /* + * It can be dangerous to call this function with an + * out of bounds index. Ensure `idx` is valid. + */ + tst x0, #~0xF + ASM_ASSERT(eq) +#endif + /* + * Given `idx` calculate address of mrs/ret instruction pair + * in the table below. + */ + add x2, x2, x0, lsl #3 /* each msr/ret sequence is 8 bytes */ +#if ENABLE_BTI + add x2, x2, x0, lsl #2 /* + "bti j" instruction */ +#endif + br x2 + +1: write AMEVCNTVOFF10_EL2 /* index 0 */ + write AMEVCNTVOFF11_EL2 /* index 1 */ + write AMEVCNTVOFF12_EL2 /* index 2 */ + write AMEVCNTVOFF13_EL2 /* index 3 */ + write AMEVCNTVOFF14_EL2 /* index 4 */ + write AMEVCNTVOFF15_EL2 /* index 5 */ + write AMEVCNTVOFF16_EL2 /* index 6 */ + write AMEVCNTVOFF17_EL2 /* index 7 */ + write AMEVCNTVOFF18_EL2 /* index 8 */ + write AMEVCNTVOFF19_EL2 /* index 9 */ + write AMEVCNTVOFF1A_EL2 /* index 10 */ + write AMEVCNTVOFF1B_EL2 /* index 11 */ + write AMEVCNTVOFF1C_EL2 /* index 12 */ + write AMEVCNTVOFF1D_EL2 /* index 13 */ + write AMEVCNTVOFF1E_EL2 /* index 14 */ + write AMEVCNTVOFF1F_EL2 /* index 15 */ +endfunc amu_group1_voffset_write_internal diff --git a/lib/extensions/ras/ras_common.c b/lib/extensions/ras/ras_common.c index 36f9a95b6..622879efa 100644 --- a/lib/extensions/ras/ras_common.c +++ b/lib/extensions/ras/ras_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause @@ -139,7 +139,7 @@ static int ras_interrupt_handler(uint32_t intr_raw, uint32_t flags, assert(ras_interrupt_mappings.num_intrs > 0UL); start = 0; - end = (int) ras_interrupt_mappings.num_intrs; + end = (int)ras_interrupt_mappings.num_intrs - 1; while (start <= end) { mid = ((end + start) / 2); if (intr_raw == ras_inrs[mid].intr_number) { diff --git a/lib/extensions/sve/sve.c b/lib/extensions/sve/sve.c index fa4ac7758..2702c30f3 100644 --- a/lib/extensions/sve/sve.c +++ b/lib/extensions/sve/sve.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -11,7 +11,13 @@ #include <lib/el3_runtime/pubsub.h> #include <lib/extensions/sve.h> -bool sve_supported(void) +/* + * Converts SVE vector size restriction in bytes to LEN according to ZCR_EL3 documentation. + * VECTOR_SIZE = (LEN+1) * 128 + */ +#define CONVERT_SVE_LENGTH(x) (((x / 128) - 1)) + +static bool sve_supported(void) { uint64_t features; @@ -19,113 +25,21 @@ bool sve_supported(void) return (features & ID_AA64PFR0_SVE_MASK) == 1U; } -static void *disable_sve_hook(const void *arg) -{ - uint64_t cptr; - - if (!sve_supported()) - return (void *)-1; - - /* - * Disable SVE, SIMD and FP access for the Secure world. - * As the SIMD/FP registers are part of the SVE Z-registers, any - * use of SIMD/FP functionality will corrupt the SVE registers. - * Therefore it is necessary to prevent use of SIMD/FP support - * in the Secure world as well as SVE functionality. - */ - cptr = read_cptr_el3(); - cptr = (cptr | TFP_BIT) & ~(CPTR_EZ_BIT); - write_cptr_el3(cptr); - - /* - * No explicit ISB required here as ERET to switch to Secure - * world covers it - */ - return (void *)0; -} - -static void *enable_sve_hook(const void *arg) -{ - uint64_t cptr; - - if (!sve_supported()) - return (void *)-1; - - /* - * Enable SVE, SIMD and FP access for the Non-secure world. - */ - cptr = read_cptr_el3(); - cptr = (cptr | CPTR_EZ_BIT) & ~(TFP_BIT); - write_cptr_el3(cptr); - - /* - * No explicit ISB required here as ERET to switch to Non-secure - * world covers it - */ - return (void *)0; -} - -void sve_enable(bool el2_unused) +void sve_enable(cpu_context_t *context) { - uint64_t cptr; + u_register_t cptr_el3; - if (!sve_supported()) + if (!sve_supported()) { return; + } -#if CTX_INCLUDE_FPREGS - /* - * CTX_INCLUDE_FPREGS is not supported on SVE enabled systems. - */ - assert(0); -#endif - /* - * Update CPTR_EL3 to enable access to SVE functionality for the - * Non-secure world. - * NOTE - assumed that CPTR_EL3.TFP is set to allow access to - * the SIMD, floating-point and SVE support. - * - * CPTR_EL3.EZ: Set to 1 to enable access to SVE functionality - * in the Non-secure world. - */ - cptr = read_cptr_el3(); - cptr |= CPTR_EZ_BIT; - write_cptr_el3(cptr); - - /* - * Need explicit ISB here to guarantee that update to ZCR_ELx - * and CPTR_EL2.TZ do not result in trap to EL3. - */ - isb(); - - /* - * Ensure lower ELs have access to full vector length. - */ - write_zcr_el3(ZCR_EL3_LEN_MASK); + cptr_el3 = read_ctx_reg(get_el3state_ctx(context), CTX_CPTR_EL3); - if (el2_unused) { - /* - * Update CPTR_EL2 to enable access to SVE functionality - * for Non-secure world, EL2 and Non-secure EL1 and EL0. - * NOTE - assumed that CPTR_EL2.TFP is set to allow - * access to the SIMD, floating-point and SVE support. - * - * CPTR_EL2.TZ: Set to 0 to enable access to SVE support - * for EL2 and Non-secure EL1 and EL0. - */ - cptr = read_cptr_el2(); - cptr &= ~(CPTR_EL2_TZ_BIT); - write_cptr_el2(cptr); + /* Enable access to SVE functionality for all ELs. */ + cptr_el3 = (cptr_el3 | CPTR_EZ_BIT) & ~(TFP_BIT); + write_ctx_reg(get_el3state_ctx(context), CTX_CPTR_EL3, cptr_el3); - /* - * Ensure lower ELs have access to full vector length. - */ - write_zcr_el2(ZCR_EL2_LEN_MASK); - } - /* - * No explicit ISB required here as ERET to switch to - * Non-secure world covers it. - */ + /* Restrict maximum SVE vector length (SVE_VECTOR_LENGTH+1) * 128. */ + write_ctx_reg(get_el3state_ctx(context), CTX_ZCR_EL3, + (ZCR_EL3_LEN_MASK & CONVERT_SVE_LENGTH(512))); } - -SUBSCRIBE_TO_EVENT(cm_exited_normal_world, disable_sve_hook); -SUBSCRIBE_TO_EVENT(cm_entering_normal_world, enable_sve_hook); diff --git a/lib/libc/memset.c b/lib/libc/memset.c index f9dd4c5db..17f798cb9 100644 --- a/lib/libc/memset.c +++ b/lib/libc/memset.c @@ -10,19 +10,20 @@ void *memset(void *dst, int val, size_t count) { - char *ptr = dst; + uint8_t *ptr = dst; uint64_t *ptr64; uint64_t fill = (unsigned char)val; /* Simplify code below by making sure we write at least one byte. */ - if (count == 0) { + if (count == 0U) { return dst; } /* Handle the first part, until the pointer becomes 64-bit aligned. */ - while (((uintptr_t)ptr & 7)) { - *ptr++ = val; - if (--count == 0) { + while (((uintptr_t)ptr & 7U) != 0U) { + *ptr = (uint8_t)val; + ptr++; + if (--count == 0U) { return dst; } } @@ -33,15 +34,17 @@ void *memset(void *dst, int val, size_t count) fill |= fill << 32; /* Use 64-bit writes for as long as possible. */ - ptr64 = (void *)ptr; - for (; count >= 8; count -= 8) { - *ptr64++ = fill; + ptr64 = (uint64_t *)ptr; + for (; count >= 8U; count -= 8) { + *ptr64 = fill; + ptr64++; } /* Handle the remaining part byte-per-byte. */ - ptr = (void *)ptr64; - while (count--) { - *ptr++ = val; + ptr = (uint8_t *)ptr64; + while (count-- > 0U) { + *ptr = (uint8_t)val; + ptr++; } return dst; diff --git a/lib/zlib/tf_gunzip.c b/lib/zlib/tf_gunzip.c index fd56dfc7c..3ac80bc5b 100644 --- a/lib/zlib/tf_gunzip.c +++ b/lib/zlib/tf_gunzip.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -9,6 +9,7 @@ #include <string.h> #include <common/debug.h> +#include <common/tf_crc32.h> #include <lib/utils.h> #include <tf_gunzip.h> @@ -100,3 +101,15 @@ int gunzip(uintptr_t *in_buf, size_t in_len, uintptr_t *out_buf, return ret; } + +/* Wrapper function to calculate CRC + * @crc: previous accumulated CRC + * @buf: buffer base address + * @size: size of the buffer + * + * Return calculated CRC32 value + */ +uint32_t tf_crc32(uint32_t crc, const unsigned char *buf, size_t size) +{ + return (uint32_t)crc32((unsigned long)crc, buf, size); +} |