diff options
Diffstat (limited to 'lib/extensions')
-rw-r--r-- | lib/extensions/amu/aarch32/amu.c | 428 | ||||
-rw-r--r-- | lib/extensions/amu/aarch32/amu_helpers.S | 2 | ||||
-rw-r--r-- | lib/extensions/amu/aarch64/amu.c | 656 | ||||
-rw-r--r-- | lib/extensions/amu/aarch64/amu_helpers.S | 4 | ||||
-rw-r--r-- | lib/extensions/amu/amu.mk | 24 | ||||
-rw-r--r-- | lib/extensions/amu/amu_private.h | 38 | ||||
-rw-r--r-- | lib/extensions/sme/sme.c | 103 | ||||
-rw-r--r-- | lib/extensions/sve/sve.c | 20 | ||||
-rw-r--r-- | lib/extensions/sys_reg_trace/aarch32/sys_reg_trace.c | 36 | ||||
-rw-r--r-- | lib/extensions/sys_reg_trace/aarch64/sys_reg_trace.c | 37 | ||||
-rw-r--r-- | lib/extensions/trbe/trbe.c | 63 | ||||
-rw-r--r-- | lib/extensions/trf/aarch32/trf.c | 35 | ||||
-rw-r--r-- | lib/extensions/trf/aarch64/trf.c | 36 |
13 files changed, 1142 insertions, 340 deletions
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c index ed56dddc9..57b115825 100644 --- a/lib/extensions/amu/aarch32/amu.c +++ b/lib/extensions/amu/aarch32/amu.c @@ -5,95 +5,224 @@ */ #include <assert.h> +#include <cdefs.h> #include <stdbool.h> +#include "../amu_private.h" #include <arch.h> #include <arch_helpers.h> - +#include <common/debug.h> #include <lib/el3_runtime/pubsub_events.h> #include <lib/extensions/amu.h> -#include <lib/extensions/amu_private.h> #include <plat/common/platform.h> -static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT]; +struct amu_ctx { + uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS]; +#if ENABLE_AMU_AUXILIARY_COUNTERS + uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS]; +#endif -/* - * Get AMU version value from pfr0. - * Return values - * ID_PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4) - * ID_PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6) - * ID_PFR0_AMU_NOT_SUPPORTED: not supported - */ -unsigned int amu_get_version(void) + uint16_t group0_enable; +#if ENABLE_AMU_AUXILIARY_COUNTERS + uint16_t group1_enable; +#endif +}; + +static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT]; + +CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS, + amu_ctx_group0_enable_cannot_represent_all_group0_counters); + +#if ENABLE_AMU_AUXILIARY_COUNTERS +CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS, + amu_ctx_group1_enable_cannot_represent_all_group1_counters); +#endif + +static inline __unused uint32_t read_id_pfr0_amu(void) { - return (unsigned int)(read_id_pfr0() >> ID_PFR0_AMU_SHIFT) & + return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) & ID_PFR0_AMU_MASK; } -#if AMU_GROUP1_NR_COUNTERS -/* Check if group 1 counters is implemented */ -bool amu_group1_supported(void) +static inline __unused void write_hcptr_tam(uint32_t value) +{ + write_hcptr((read_hcptr() & ~TAM_BIT) | + ((value << TAM_SHIFT) & TAM_BIT)); +} + +static inline __unused void write_amcr_cg1rz(uint32_t value) +{ + write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) | + ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT)); +} + +static inline __unused uint32_t read_amcfgr_ncg(void) +{ + return (read_amcfgr() >> AMCFGR_NCG_SHIFT) & + AMCFGR_NCG_MASK; +} + +static inline __unused uint32_t read_amcgcr_cg0nc(void) +{ + return (read_amcgcr() >> AMCGCR_CG0NC_SHIFT) & + AMCGCR_CG0NC_MASK; +} + +static inline __unused uint32_t read_amcgcr_cg1nc(void) +{ + return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) & + AMCGCR_CG1NC_MASK; +} + +static inline __unused uint32_t read_amcntenset0_px(void) +{ + return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) & + AMCNTENSET0_Pn_MASK; +} + +static inline __unused uint32_t read_amcntenset1_px(void) +{ + return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) & + AMCNTENSET1_Pn_MASK; +} + +static inline __unused void write_amcntenset0_px(uint32_t px) +{ + uint32_t value = read_amcntenset0(); + + value &= ~AMCNTENSET0_Pn_MASK; + value |= (px << AMCNTENSET0_Pn_SHIFT) & + AMCNTENSET0_Pn_MASK; + + write_amcntenset0(value); +} + +static inline __unused void write_amcntenset1_px(uint32_t px) +{ + uint32_t value = read_amcntenset1(); + + value &= ~AMCNTENSET1_Pn_MASK; + value |= (px << AMCNTENSET1_Pn_SHIFT) & + AMCNTENSET1_Pn_MASK; + + write_amcntenset1(value); +} + +static inline __unused void write_amcntenclr0_px(uint32_t px) +{ + uint32_t value = read_amcntenclr0(); + + value &= ~AMCNTENCLR0_Pn_MASK; + value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK; + + write_amcntenclr0(value); +} + +static inline __unused void write_amcntenclr1_px(uint32_t px) +{ + uint32_t value = read_amcntenclr1(); + + value &= ~AMCNTENCLR1_Pn_MASK; + value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK; + + write_amcntenclr1(value); +} + +static __unused bool amu_supported(void) { - uint32_t features = read_amcfgr() >> AMCFGR_NCG_SHIFT; + return read_id_pfr0_amu() >= ID_PFR0_AMU_V1; +} - return (features & AMCFGR_NCG_MASK) == 1U; +#if ENABLE_AMU_AUXILIARY_COUNTERS +static __unused bool amu_group1_supported(void) +{ + return read_amcfgr_ncg() > 0U; } #endif /* - * Enable counters. This function is meant to be invoked - * by the context management library before exiting from EL3. + * Enable counters. This function is meant to be invoked by the context + * management library before exiting from EL3. */ void amu_enable(bool el2_unused) { - if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) { - return; - } + uint32_t id_pfr0_amu; /* AMU version */ -#if AMU_GROUP1_NR_COUNTERS - /* Check and set presence of group 1 counters */ - if (!amu_group1_supported()) { - ERROR("AMU Counter Group 1 is not implemented\n"); - panic(); - } + uint32_t amcfgr_ncg; /* Number of counter groups */ + uint32_t amcgcr_cg0nc; /* Number of group 0 counters */ - /* Check number of group 1 counters */ - uint32_t cnt_num = (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) & - AMCGCR_CG1NC_MASK; - VERBOSE("%s%u. %s%u\n", - "Number of AMU Group 1 Counters ", cnt_num, - "Requested number ", AMU_GROUP1_NR_COUNTERS); - - if (cnt_num < AMU_GROUP1_NR_COUNTERS) { - ERROR("%s%u is less than %s%u\n", - "Number of AMU Group 1 Counters ", cnt_num, - "Requested number ", AMU_GROUP1_NR_COUNTERS); - panic(); + uint32_t amcntenset0_px = 0x0; /* Group 0 enable mask */ + uint32_t amcntenset1_px = 0x0; /* Group 1 enable mask */ + + id_pfr0_amu = read_id_pfr0_amu(); + if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) { + /* + * If the AMU is unsupported, nothing needs to be done. + */ + + return; } -#endif if (el2_unused) { - uint64_t v; /* - * Non-secure access from EL0 or EL1 to the Activity Monitor - * registers do not trap to EL2. + * HCPTR.TAM: Set to zero so any accesses to the Activity + * Monitor registers do not trap to EL2. */ - v = read_hcptr(); - v &= ~TAM_BIT; - write_hcptr(v); + write_hcptr_tam(0U); + } + + /* + * Retrieve the number of architected counters. All of these counters + * are enabled by default. + */ + + amcgcr_cg0nc = read_amcgcr_cg0nc(); + amcntenset0_px = (UINT32_C(1) << (amcgcr_cg0nc)) - 1U; + + assert(amcgcr_cg0nc <= AMU_AMCGCR_CG0NC_MAX); + + /* + * The platform may opt to enable specific auxiliary counters. This can + * be done via the common FCONF getter, or via the platform-implemented + * function. + */ + +#if ENABLE_AMU_AUXILIARY_COUNTERS + const struct amu_topology *topology; + +#if ENABLE_AMU_FCONF + topology = FCONF_GET_PROPERTY(amu, config, topology); +#else + topology = plat_amu_topology(); +#endif /* ENABLE_AMU_FCONF */ + + if (topology != NULL) { + unsigned int core_pos = plat_my_core_pos(); + + amcntenset1_el0_px = topology->cores[core_pos].enable; + } else { + ERROR("AMU: failed to generate AMU topology\n"); } +#endif /* ENABLE_AMU_AUXILIARY_COUNTERS */ + + /* + * Enable the requested counters. + */ + + write_amcntenset0_px(amcntenset0_px); - /* Enable group 0 counters */ - write_amcntenset0(AMU_GROUP0_COUNTERS_MASK); + amcfgr_ncg = read_amcfgr_ncg(); + if (amcfgr_ncg > 0U) { + write_amcntenset1_px(amcntenset1_px); -#if AMU_GROUP1_NR_COUNTERS - /* Enable group 1 counters */ - write_amcntenset1(AMU_GROUP1_COUNTERS_MASK); +#if !ENABLE_AMU_AUXILIARY_COUNTERS + VERBOSE("AMU: auxiliary counters detected but support is disabled\n"); #endif + } /* Initialize FEAT_AMUv1p1 features if present. */ - if (amu_get_version() < ID_PFR0_AMU_V1P1) { + if (id_pfr0_amu < ID_PFR0_AMU_V1P1) { return; } @@ -106,154 +235,183 @@ void amu_enable(bool el2_unused) * mapped view are unaffected. */ VERBOSE("AMU group 1 counter access restricted.\n"); - write_amcr(read_amcr() | AMCR_CG1RZ_BIT); + write_amcr_cg1rz(1U); #else - write_amcr(read_amcr() & ~AMCR_CG1RZ_BIT); + write_amcr_cg1rz(0U); #endif } /* Read the group 0 counter identified by the given `idx`. */ -uint64_t amu_group0_cnt_read(unsigned int idx) +static uint64_t amu_group0_cnt_read(unsigned int idx) { - assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); - assert(idx < AMU_GROUP0_NR_COUNTERS); + assert(amu_supported()); + assert(idx < read_amcgcr_cg0nc()); return amu_group0_cnt_read_internal(idx); } /* Write the group 0 counter identified by the given `idx` with `val` */ -void amu_group0_cnt_write(unsigned int idx, uint64_t val) +static void amu_group0_cnt_write(unsigned int idx, uint64_t val) { - assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); - assert(idx < AMU_GROUP0_NR_COUNTERS); + assert(amu_supported()); + assert(idx < read_amcgcr_cg0nc()); amu_group0_cnt_write_internal(idx, val); isb(); } -#if AMU_GROUP1_NR_COUNTERS +#if ENABLE_AMU_AUXILIARY_COUNTERS /* Read the group 1 counter identified by the given `idx` */ -uint64_t amu_group1_cnt_read(unsigned int idx) +static uint64_t amu_group1_cnt_read(unsigned int idx) { - assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); + assert(amu_supported()); assert(amu_group1_supported()); - assert(idx < AMU_GROUP1_NR_COUNTERS); + assert(idx < read_amcgcr_cg1nc()); return amu_group1_cnt_read_internal(idx); } /* Write the group 1 counter identified by the given `idx` with `val` */ -void amu_group1_cnt_write(unsigned int idx, uint64_t val) +static void amu_group1_cnt_write(unsigned int idx, uint64_t val) { - assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); + assert(amu_supported()); assert(amu_group1_supported()); - assert(idx < AMU_GROUP1_NR_COUNTERS); + assert(idx < read_amcgcr_cg1nc()); amu_group1_cnt_write_internal(idx, val); isb(); } +#endif -/* - * Program the event type register for the given `idx` with - * the event number `val` - */ -void amu_group1_set_evtype(unsigned int idx, unsigned int val) +static void *amu_context_save(const void *arg) { - assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED); - assert(amu_group1_supported()); - assert(idx < AMU_GROUP1_NR_COUNTERS); + uint32_t i; - amu_group1_set_evtype_internal(idx, val); - isb(); -} -#endif /* AMU_GROUP1_NR_COUNTERS */ + unsigned int core_pos; + struct amu_ctx *ctx; -static void *amu_context_save(const void *arg) -{ - struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; - unsigned int i; + uint32_t id_pfr0_amu; /* AMU version */ + uint32_t amcgcr_cg0nc; /* Number of group 0 counters */ - if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) { - return (void *)-1; - } +#if ENABLE_AMU_AUXILIARY_COUNTERS + uint32_t amcfgr_ncg; /* Number of counter groups */ + uint32_t amcgcr_cg1nc; /* Number of group 1 counters */ +#endif -#if AMU_GROUP1_NR_COUNTERS - if (!amu_group1_supported()) { - return (void *)-1; + id_pfr0_amu = read_id_pfr0_amu(); + if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) { + return (void *)0; } -#endif - /* Assert that group 0/1 counter configuration is what we expect */ - assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK); -#if AMU_GROUP1_NR_COUNTERS - assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK); + core_pos = plat_my_core_pos(); + ctx = &amu_ctxs_[core_pos]; + + amcgcr_cg0nc = read_amcgcr_cg0nc(); + +#if ENABLE_AMU_AUXILIARY_COUNTERS + amcfgr_ncg = read_amcfgr_ncg(); + amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U; #endif + /* - * Disable group 0/1 counters to avoid other observers like SCP sampling - * counter values from the future via the memory mapped view. + * Disable all AMU counters. */ - write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK); -#if AMU_GROUP1_NR_COUNTERS - write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK); + ctx->group0_enable = read_amcntenset0_px(); + write_amcntenclr0_px(ctx->group0_enable); + +#if ENABLE_AMU_AUXILIARY_COUNTERS + if (amcfgr_ncg > 0U) { + ctx->group1_enable = read_amcntenset1_px(); + write_amcntenclr1_px(ctx->group1_enable); + } #endif - isb(); - /* Save all group 0 counters */ - for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) { + /* + * Save the counters to the local context. + */ + + isb(); /* Ensure counters have been stopped */ + + for (i = 0U; i < amcgcr_cg0nc; i++) { ctx->group0_cnts[i] = amu_group0_cnt_read(i); } -#if AMU_GROUP1_NR_COUNTERS - /* Save group 1 counters */ - for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { - if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) { - ctx->group1_cnts[i] = amu_group1_cnt_read(i); - } +#if ENABLE_AMU_AUXILIARY_COUNTERS + for (i = 0U; i < amcgcr_cg1nc; i++) { + ctx->group1_cnts[i] = amu_group1_cnt_read(i); } #endif + return (void *)0; } static void *amu_context_restore(const void *arg) { - struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; - unsigned int i; + uint32_t i; - if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) { - return (void *)-1; - } + unsigned int core_pos; + struct amu_ctx *ctx; -#if AMU_GROUP1_NR_COUNTERS - if (!amu_group1_supported()) { - return (void *)-1; - } + uint32_t id_pfr0_amu; /* AMU version */ + + uint32_t amcfgr_ncg; /* Number of counter groups */ + uint32_t amcgcr_cg0nc; /* Number of group 0 counters */ + +#if ENABLE_AMU_AUXILIARY_COUNTERS + uint32_t amcgcr_cg1nc; /* Number of group 1 counters */ #endif - /* Counters were disabled in `amu_context_save()` */ - assert(read_amcntenset0_el0() == 0U); -#if AMU_GROUP1_NR_COUNTERS - assert(read_amcntenset1_el0() == 0U); + id_pfr0_amu = read_id_pfr0_amu(); + if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) { + return (void *)0; + } + + core_pos = plat_my_core_pos(); + ctx = &amu_ctxs_[core_pos]; + + amcfgr_ncg = read_amcfgr_ncg(); + amcgcr_cg0nc = read_amcgcr_cg0nc(); + +#if ENABLE_AMU_AUXILIARY_COUNTERS + amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U; #endif - /* Restore all group 0 counters */ - for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) { - amu_group0_cnt_write(i, ctx->group0_cnts[i]); + /* + * Sanity check that all counters were disabled when the context was + * previously saved. + */ + + assert(read_amcntenset0_px() == 0U); + + if (amcfgr_ncg > 0U) { + assert(read_amcntenset1_px() == 0U); } - /* Restore group 0 counter configuration */ - write_amcntenset0(AMU_GROUP0_COUNTERS_MASK); + /* + * Restore the counter values from the local context. + */ + + for (i = 0U; i < amcgcr_cg0nc; i++) { + amu_group0_cnt_write(i, ctx->group0_cnts[i]); + } -#if AMU_GROUP1_NR_COUNTERS - /* Restore group 1 counters */ - for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { - if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) { - amu_group1_cnt_write(i, ctx->group1_cnts[i]); - } +#if ENABLE_AMU_AUXILIARY_COUNTERS + for (i = 0U; i < amcgcr_cg1nc; i++) { + amu_group1_cnt_write(i, ctx->group1_cnts[i]); } +#endif + + /* + * Re-enable counters that were disabled during context save. + */ - /* Restore group 1 counter configuration */ - write_amcntenset1(AMU_GROUP1_COUNTERS_MASK); + write_amcntenset0_px(ctx->group0_enable); + +#if ENABLE_AMU_AUXILIARY_COUNTERS + if (amcfgr_ncg > 0U) { + write_amcntenset1_px(ctx->group1_enable); + } #endif return (void *)0; diff --git a/lib/extensions/amu/aarch32/amu_helpers.S b/lib/extensions/amu/aarch32/amu_helpers.S index d387341f7..8ac76785a 100644 --- a/lib/extensions/amu/aarch32/amu_helpers.S +++ b/lib/extensions/amu/aarch32/amu_helpers.S @@ -84,6 +84,7 @@ func amu_group0_cnt_write_internal bx lr endfunc amu_group0_cnt_write_internal +#if ENABLE_AMU_AUXILIARY_COUNTERS /* * uint64_t amu_group1_cnt_read_internal(int idx); * @@ -267,3 +268,4 @@ func amu_group1_set_evtype_internal stcopr r1, AMEVTYPER1F /* index 15 */ bx lr endfunc amu_group1_set_evtype_internal +#endif diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c index 295c0d569..d329c3d33 100644 --- a/lib/extensions/amu/aarch64/amu.c +++ b/lib/extensions/amu/aarch64/amu.c @@ -5,86 +5,220 @@ */ #include <assert.h> +#include <cdefs.h> +#include <inttypes.h> #include <stdbool.h> +#include <stdint.h> +#include "../amu_private.h" #include <arch.h> #include <arch_features.h> #include <arch_helpers.h> - +#include <common/debug.h> #include <lib/el3_runtime/pubsub_events.h> #include <lib/extensions/amu.h> -#include <lib/extensions/amu_private.h> #include <plat/common/platform.h> -static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT]; +#if ENABLE_AMU_FCONF +# include <lib/fconf/fconf.h> +# include <lib/fconf/fconf_amu_getter.h> +#endif -/* - * Get AMU version value from aa64pfr0. - * Return values - * ID_AA64PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4) - * ID_AA64PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6) - * ID_AA64PFR0_AMU_NOT_SUPPORTED: not supported - */ -unsigned int amu_get_version(void) +#if ENABLE_MPMM +# include <lib/mpmm/mpmm.h> +#endif + +struct amu_ctx { + uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS]; +#if ENABLE_AMU_AUXILIARY_COUNTERS + uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS]; +#endif + + /* Architected event counter 1 does not have an offset register */ + uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U]; +#if ENABLE_AMU_AUXILIARY_COUNTERS + uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS]; +#endif + + uint16_t group0_enable; +#if ENABLE_AMU_AUXILIARY_COUNTERS + uint16_t group1_enable; +#endif +}; + +static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT]; + +CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS, + amu_ctx_group0_enable_cannot_represent_all_group0_counters); + +#if ENABLE_AMU_AUXILIARY_COUNTERS +CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS, + amu_ctx_group1_enable_cannot_represent_all_group1_counters); +#endif + +static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void) { - return (unsigned int)(read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) & + return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) & ID_AA64PFR0_AMU_MASK; } -#if AMU_GROUP1_NR_COUNTERS -/* Check if group 1 counters is implemented */ -bool amu_group1_supported(void) +static inline __unused uint64_t read_hcr_el2_amvoffen(void) +{ + return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >> + HCR_AMVOFFEN_SHIFT; +} + +static inline __unused void write_cptr_el2_tam(uint64_t value) { - uint64_t features = read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT; + write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) | + ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT)); +} + +static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam) +{ + uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3); - return (features & AMCFGR_EL0_NCG_MASK) == 1U; + value &= ~TAM_BIT; + value |= (tam << TAM_SHIFT) & TAM_BIT; + + write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value); +} + +static inline __unused void write_hcr_el2_amvoffen(uint64_t value) +{ + write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) | + ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT)); +} + +static inline __unused void write_amcr_el0_cg1rz(uint64_t value) +{ + write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) | + ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT)); +} + +static inline __unused uint64_t read_amcfgr_el0_ncg(void) +{ + return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) & + AMCFGR_EL0_NCG_MASK; +} + +static inline __unused uint64_t read_amcgcr_el0_cg0nc(void) +{ + return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) & + AMCGCR_EL0_CG0NC_MASK; +} + +static inline __unused uint64_t read_amcg1idr_el0_voff(void) +{ + return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) & + AMCG1IDR_VOFF_MASK; +} + +static inline __unused uint64_t read_amcgcr_el0_cg1nc(void) +{ + return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) & + AMCGCR_EL0_CG1NC_MASK; +} + +static inline __unused uint64_t read_amcntenset0_el0_px(void) +{ + return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) & + AMCNTENSET0_EL0_Pn_MASK; +} + +static inline __unused uint64_t read_amcntenset1_el0_px(void) +{ + return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) & + AMCNTENSET1_EL0_Pn_MASK; +} + +static inline __unused void write_amcntenset0_el0_px(uint64_t px) +{ + uint64_t value = read_amcntenset0_el0(); + + value &= ~AMCNTENSET0_EL0_Pn_MASK; + value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK; + + write_amcntenset0_el0(value); +} + +static inline __unused void write_amcntenset1_el0_px(uint64_t px) +{ + uint64_t value = read_amcntenset1_el0(); + + value &= ~AMCNTENSET1_EL0_Pn_MASK; + value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK; + + write_amcntenset1_el0(value); +} + +static inline __unused void write_amcntenclr0_el0_px(uint64_t px) +{ + uint64_t value = read_amcntenclr0_el0(); + + value &= ~AMCNTENCLR0_EL0_Pn_MASK; + value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK; + + write_amcntenclr0_el0(value); +} + +static inline __unused void write_amcntenclr1_el0_px(uint64_t px) +{ + uint64_t value = read_amcntenclr1_el0(); + + value &= ~AMCNTENCLR1_EL0_Pn_MASK; + value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK; + + write_amcntenclr1_el0(value); +} + +static __unused bool amu_supported(void) +{ + return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1; +} + +static __unused bool amu_v1p1_supported(void) +{ + return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1; +} + +#if ENABLE_AMU_AUXILIARY_COUNTERS +static __unused bool amu_group1_supported(void) +{ + return read_amcfgr_el0_ncg() > 0U; } #endif /* - * Enable counters. This function is meant to be invoked - * by the context management library before exiting from EL3. + * Enable counters. This function is meant to be invoked by the context + * management library before exiting from EL3. */ void amu_enable(bool el2_unused, cpu_context_t *ctx) { - uint64_t v; - unsigned int amu_version = amu_get_version(); + uint64_t id_aa64pfr0_el1_amu; /* AMU version */ - if (amu_version == ID_AA64PFR0_AMU_NOT_SUPPORTED) { - return; - } + uint64_t amcfgr_el0_ncg; /* Number of counter groups */ + uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ -#if AMU_GROUP1_NR_COUNTERS - /* Check and set presence of group 1 counters */ - if (!amu_group1_supported()) { - ERROR("AMU Counter Group 1 is not implemented\n"); - panic(); - } + uint64_t amcntenset0_el0_px = 0x0; /* Group 0 enable mask */ + uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */ - /* Check number of group 1 counters */ - uint64_t cnt_num = (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) & - AMCGCR_EL0_CG1NC_MASK; - VERBOSE("%s%llu. %s%u\n", - "Number of AMU Group 1 Counters ", cnt_num, - "Requested number ", AMU_GROUP1_NR_COUNTERS); - - if (cnt_num < AMU_GROUP1_NR_COUNTERS) { - ERROR("%s%llu is less than %s%u\n", - "Number of AMU Group 1 Counters ", cnt_num, - "Requested number ", AMU_GROUP1_NR_COUNTERS); - panic(); + id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu(); + if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) { + /* + * If the AMU is unsupported, nothing needs to be done. + */ + + return; } -#endif if (el2_unused) { /* - * CPTR_EL2.TAM: Set to zero so any accesses to - * the Activity Monitor registers do not trap to EL2. + * CPTR_EL2.TAM: Set to zero so any accesses to the Activity + * Monitor registers do not trap to EL2. */ - v = read_cptr_el2(); - v &= ~CPTR_EL2_TAM_BIT; - write_cptr_el2(v); + write_cptr_el2_tam(0U); } /* @@ -92,72 +226,141 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx) * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to * the Activity Monitor registers do not trap to EL3. */ - v = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3); - v &= ~TAM_BIT; - write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, v); + write_cptr_el3_tam(ctx, 0U); - /* Enable group 0 counters */ - write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK); + /* + * Retrieve the number of architected counters. All of these counters + * are enabled by default. + */ -#if AMU_GROUP1_NR_COUNTERS - /* Enable group 1 counters */ - write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK); -#endif + amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); + amcntenset0_el0_px = (UINT64_C(1) << (amcgcr_el0_cg0nc)) - 1U; - /* Initialize FEAT_AMUv1p1 features if present. */ - if (amu_version < ID_AA64PFR0_AMU_V1P1) { - return; - } + assert(amcgcr_el0_cg0nc <= AMU_AMCGCR_CG0NC_MAX); - if (el2_unused) { - /* Make sure virtual offsets are disabled if EL2 not used. */ - write_hcr_el2(read_hcr_el2() & ~HCR_AMVOFFEN_BIT); + /* + * The platform may opt to enable specific auxiliary counters. This can + * be done via the common FCONF getter, or via the platform-implemented + * function. + */ + +#if ENABLE_AMU_AUXILIARY_COUNTERS + const struct amu_topology *topology; + +#if ENABLE_AMU_FCONF + topology = FCONF_GET_PROPERTY(amu, config, topology); +#else + topology = plat_amu_topology(); +#endif /* ENABLE_AMU_FCONF */ + + if (topology != NULL) { + unsigned int core_pos = plat_my_core_pos(); + + amcntenset1_el0_px = topology->cores[core_pos].enable; + } else { + ERROR("AMU: failed to generate AMU topology\n"); } +#endif /* ENABLE_AMU_AUXILIARY_COUNTERS */ -#if AMU_RESTRICT_COUNTERS /* - * FEAT_AMUv1p1 adds a register field to restrict access to group 1 - * counters at all but the highest implemented EL. This is controlled - * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system - * register reads at lower ELs return zero. Reads from the memory - * mapped view are unaffected. + * Enable the requested counters. */ - VERBOSE("AMU group 1 counter access restricted.\n"); - write_amcr_el0(read_amcr_el0() | AMCR_CG1RZ_BIT); + + write_amcntenset0_el0_px(amcntenset0_el0_px); + + amcfgr_el0_ncg = read_amcfgr_el0_ncg(); + if (amcfgr_el0_ncg > 0U) { + write_amcntenset1_el0_px(amcntenset1_el0_px); + +#if !ENABLE_AMU_AUXILIARY_COUNTERS + VERBOSE("AMU: auxiliary counters detected but support is disabled\n"); +#endif + } + + /* Initialize FEAT_AMUv1p1 features if present. */ + if (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) { + if (el2_unused) { + /* + * Make sure virtual offsets are disabled if EL2 not + * used. + */ + write_hcr_el2_amvoffen(0U); + } + +#if AMU_RESTRICT_COUNTERS + /* + * FEAT_AMUv1p1 adds a register field to restrict access to + * group 1 counters at all but the highest implemented EL. This + * is controlled with the `AMU_RESTRICT_COUNTERS` compile time + * flag, when set, system register reads at lower ELs return + * zero. Reads from the memory mapped view are unaffected. + */ + VERBOSE("AMU group 1 counter access restricted.\n"); + write_amcr_el0_cg1rz(1U); #else - write_amcr_el0(read_amcr_el0() & ~AMCR_CG1RZ_BIT); + write_amcr_el0_cg1rz(0U); +#endif + } + +#if ENABLE_MPMM + mpmm_enable(); #endif } /* Read the group 0 counter identified by the given `idx`. */ -uint64_t amu_group0_cnt_read(unsigned int idx) +static uint64_t amu_group0_cnt_read(unsigned int idx) { - assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED); - assert(idx < AMU_GROUP0_NR_COUNTERS); + assert(amu_supported()); + assert(idx < read_amcgcr_el0_cg0nc()); return amu_group0_cnt_read_internal(idx); } /* Write the group 0 counter identified by the given `idx` with `val` */ -void amu_group0_cnt_write(unsigned int idx, uint64_t val) +static void amu_group0_cnt_write(unsigned int idx, uint64_t val) { - assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED); - assert(idx < AMU_GROUP0_NR_COUNTERS); + assert(amu_supported()); + assert(idx < read_amcgcr_el0_cg0nc()); amu_group0_cnt_write_internal(idx, val); isb(); } /* + * Unlike with auxiliary counters, we cannot detect at runtime whether an + * architected counter supports a virtual offset. These are instead fixed + * according to FEAT_AMUv1p1, but this switch will need to be updated if later + * revisions of FEAT_AMU add additional architected counters. + */ +static bool amu_group0_voffset_supported(uint64_t idx) +{ + switch (idx) { + case 0U: + case 2U: + case 3U: + return true; + + case 1U: + return false; + + default: + ERROR("AMU: can't set up virtual offset for unknown " + "architected counter %" PRIu64 "!\n", idx); + + panic(); + } +} + +/* * Read the group 0 offset register for a given index. Index must be 0, 2, * or 3, the register for 1 does not exist. * * Using this function requires FEAT_AMUv1p1 support. */ -uint64_t amu_group0_voffset_read(unsigned int idx) +static uint64_t amu_group0_voffset_read(unsigned int idx) { - assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1); - assert(idx < AMU_GROUP0_NR_COUNTERS); + assert(amu_v1p1_supported()); + assert(idx < read_amcgcr_el0_cg0nc()); assert(idx != 1U); return amu_group0_voffset_read_internal(idx); @@ -169,33 +372,33 @@ uint64_t amu_group0_voffset_read(unsigned int idx) * * Using this function requires FEAT_AMUv1p1 support. */ -void amu_group0_voffset_write(unsigned int idx, uint64_t val) +static void amu_group0_voffset_write(unsigned int idx, uint64_t val) { - assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1); - assert(idx < AMU_GROUP0_NR_COUNTERS); + assert(amu_v1p1_supported()); + assert(idx < read_amcgcr_el0_cg0nc()); assert(idx != 1U); amu_group0_voffset_write_internal(idx, val); isb(); } -#if AMU_GROUP1_NR_COUNTERS +#if ENABLE_AMU_AUXILIARY_COUNTERS /* Read the group 1 counter identified by the given `idx` */ -uint64_t amu_group1_cnt_read(unsigned int idx) +static uint64_t amu_group1_cnt_read(unsigned int idx) { - assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED); + assert(amu_supported()); assert(amu_group1_supported()); - assert(idx < AMU_GROUP1_NR_COUNTERS); + assert(idx < read_amcgcr_el0_cg1nc()); return amu_group1_cnt_read_internal(idx); } /* Write the group 1 counter identified by the given `idx` with `val` */ -void amu_group1_cnt_write(unsigned int idx, uint64_t val) +static void amu_group1_cnt_write(unsigned int idx, uint64_t val) { - assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED); + assert(amu_supported()); assert(amu_group1_supported()); - assert(idx < AMU_GROUP1_NR_COUNTERS); + assert(idx < read_amcgcr_el0_cg1nc()); amu_group1_cnt_write_internal(idx, val); isb(); @@ -206,13 +409,12 @@ void amu_group1_cnt_write(unsigned int idx, uint64_t val) * * Using this function requires FEAT_AMUv1p1 support. */ -uint64_t amu_group1_voffset_read(unsigned int idx) +static uint64_t amu_group1_voffset_read(unsigned int idx) { - assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1); + assert(amu_v1p1_supported()); assert(amu_group1_supported()); - assert(idx < AMU_GROUP1_NR_COUNTERS); - assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) & - (1ULL << idx)) != 0ULL); + assert(idx < read_amcgcr_el0_cg1nc()); + assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U); return amu_group1_voffset_read_internal(idx); } @@ -222,167 +424,211 @@ uint64_t amu_group1_voffset_read(unsigned int idx) * * Using this function requires FEAT_AMUv1p1 support. */ -void amu_group1_voffset_write(unsigned int idx, uint64_t val) +static void amu_group1_voffset_write(unsigned int idx, uint64_t val) { - assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1); + assert(amu_v1p1_supported()); assert(amu_group1_supported()); - assert(idx < AMU_GROUP1_NR_COUNTERS); - assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) & - (1ULL << idx)) != 0ULL); + assert(idx < read_amcgcr_el0_cg1nc()); + assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U); amu_group1_voffset_write_internal(idx, val); isb(); } +#endif -/* - * Program the event type register for the given `idx` with - * the event number `val` - */ -void amu_group1_set_evtype(unsigned int idx, unsigned int val) +static void *amu_context_save(const void *arg) { - assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED); - assert(amu_group1_supported()); - assert(idx < AMU_GROUP1_NR_COUNTERS); + uint64_t i, j; - amu_group1_set_evtype_internal(idx, val); - isb(); -} -#endif /* AMU_GROUP1_NR_COUNTERS */ + unsigned int core_pos; + struct amu_ctx *ctx; -static void *amu_context_save(const void *arg) -{ - struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; - unsigned int i; + uint64_t id_aa64pfr0_el1_amu; /* AMU version */ + uint64_t hcr_el2_amvoffen; /* AMU virtual offsets enabled */ + uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ - if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) { - return (void *)-1; - } +#if ENABLE_AMU_AUXILIARY_COUNTERS + uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */ + uint64_t amcfgr_el0_ncg; /* Number of counter groups */ + uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */ +#endif -#if AMU_GROUP1_NR_COUNTERS - if (!amu_group1_supported()) { - return (void *)-1; + id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu(); + if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) { + return (void *)0; } -#endif - /* Assert that group 0/1 counter configuration is what we expect */ - assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK); -#if AMU_GROUP1_NR_COUNTERS - assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK); + core_pos = plat_my_core_pos(); + ctx = &amu_ctxs_[core_pos]; + + amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); + hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ? + read_hcr_el2_amvoffen() : 0U; + +#if ENABLE_AMU_AUXILIARY_COUNTERS + amcfgr_el0_ncg = read_amcfgr_el0_ncg(); + amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U; + amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U; #endif + /* - * Disable group 0/1 counters to avoid other observers like SCP sampling - * counter values from the future via the memory mapped view. + * Disable all AMU counters. */ - write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK); -#if AMU_GROUP1_NR_COUNTERS - write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK); + ctx->group0_enable = read_amcntenset0_el0_px(); + write_amcntenclr0_el0_px(ctx->group0_enable); + +#if ENABLE_AMU_AUXILIARY_COUNTERS + if (amcfgr_el0_ncg > 0U) { + ctx->group1_enable = read_amcntenset1_el0_px(); + write_amcntenclr1_el0_px(ctx->group1_enable); + } #endif - isb(); - /* Save all group 0 counters */ - for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) { + /* + * Save the counters to the local context. + */ + + isb(); /* Ensure counters have been stopped */ + + for (i = 0U; i < amcgcr_el0_cg0nc; i++) { ctx->group0_cnts[i] = amu_group0_cnt_read(i); } - /* Save group 0 virtual offsets if supported and enabled. */ - if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) && - ((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) { - /* Not using a loop because count is fixed and index 1 DNE. */ - ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U); - ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U); - ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U); +#if ENABLE_AMU_AUXILIARY_COUNTERS + for (i = 0U; i < amcgcr_el0_cg1nc; i++) { + ctx->group1_cnts[i] = amu_group1_cnt_read(i); } +#endif + + /* + * Save virtual offsets for counters that offer them. + */ + + if (hcr_el2_amvoffen != 0U) { + for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) { + if (!amu_group0_voffset_supported(i)) { + continue; /* No virtual offset */ + } -#if AMU_GROUP1_NR_COUNTERS - /* Save group 1 counters */ - for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { - if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) { - ctx->group1_cnts[i] = amu_group1_cnt_read(i); + ctx->group0_voffsets[j++] = amu_group0_voffset_read(i); } - } - /* Save group 1 virtual offsets if supported and enabled. */ - if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) && - ((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) { - u_register_t amcg1idr = read_amcg1idr_el0() >> - AMCG1IDR_VOFF_SHIFT; - amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK; - - for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { - if (((amcg1idr >> i) & 1ULL) != 0ULL) { - ctx->group1_voffsets[i] = - amu_group1_voffset_read(i); +#if ENABLE_AMU_AUXILIARY_COUNTERS + for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) { + if ((amcg1idr_el0_voff >> i) & 1U) { + continue; /* No virtual offset */ } + + ctx->group1_voffsets[j++] = amu_group1_voffset_read(i); } - } #endif + } + return (void *)0; } static void *amu_context_restore(const void *arg) { - struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; - unsigned int i; + uint64_t i, j; - if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) { - return (void *)-1; - } + unsigned int core_pos; + struct amu_ctx *ctx; -#if AMU_GROUP1_NR_COUNTERS - if (!amu_group1_supported()) { - return (void *)-1; - } + uint64_t id_aa64pfr0_el1_amu; /* AMU version */ + + uint64_t hcr_el2_amvoffen; /* AMU virtual offsets enabled */ + + uint64_t amcfgr_el0_ncg; /* Number of counter groups */ + uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */ + +#if ENABLE_AMU_AUXILIARY_COUNTERS + uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */ + uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */ #endif - /* Counters were disabled in `amu_context_save()` */ - assert(read_amcntenset0_el0() == 0U); -#if AMU_GROUP1_NR_COUNTERS - assert(read_amcntenset1_el0() == 0U); + id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu(); + if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) { + return (void *)0; + } + + core_pos = plat_my_core_pos(); + ctx = &amu_ctxs_[core_pos]; + + amcfgr_el0_ncg = read_amcfgr_el0_ncg(); + amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc(); + + hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ? + read_hcr_el2_amvoffen() : 0U; + +#if ENABLE_AMU_AUXILIARY_COUNTERS + amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U; + amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U; #endif - /* Restore all group 0 counters */ - for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) { + /* + * Sanity check that all counters were disabled when the context was + * previously saved. + */ + + assert(read_amcntenset0_el0_px() == 0U); + + if (amcfgr_el0_ncg > 0U) { + assert(read_amcntenset1_el0_px() == 0U); + } + + /* + * Restore the counter values from the local context. + */ + + for (i = 0U; i < amcgcr_el0_cg0nc; i++) { amu_group0_cnt_write(i, ctx->group0_cnts[i]); } - /* Restore group 0 virtual offsets if supported and enabled. */ - if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) && - ((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) { - /* Not using a loop because count is fixed and index 1 DNE. */ - amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]); - amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]); - amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]); +#if ENABLE_AMU_AUXILIARY_COUNTERS + for (i = 0U; i < amcgcr_el0_cg1nc; i++) { + amu_group1_cnt_write(i, ctx->group1_cnts[i]); } +#endif + + /* + * Restore virtual offsets for counters that offer them. + */ - /* Restore group 0 counter configuration */ - write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK); + if (hcr_el2_amvoffen != 0U) { + for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) { + if (!amu_group0_voffset_supported(i)) { + continue; /* No virtual offset */ + } -#if AMU_GROUP1_NR_COUNTERS - /* Restore group 1 counters */ - for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { - if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) { - amu_group1_cnt_write(i, ctx->group1_cnts[i]); + amu_group0_voffset_write(i, ctx->group0_voffsets[j++]); } - } - /* Restore group 1 virtual offsets if supported and enabled. */ - if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) && - ((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) { - u_register_t amcg1idr = read_amcg1idr_el0() >> - AMCG1IDR_VOFF_SHIFT; - amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK; - - for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) { - if (((amcg1idr >> i) & 1ULL) != 0ULL) { - amu_group1_voffset_write(i, - ctx->group1_voffsets[i]); +#if ENABLE_AMU_AUXILIARY_COUNTERS + for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) { + if ((amcg1idr_el0_voff >> i) & 1U) { + continue; /* No virtual offset */ } + + amu_group1_voffset_write(i, ctx->group1_voffsets[j++]); } +#endif + } + + /* + * Re-enable counters that were disabled during context save. + */ + + write_amcntenset0_el0_px(ctx->group0_enable); + +#if ENABLE_AMU_AUXILIARY_COUNTERS + if (amcfgr_el0_ncg > 0) { + write_amcntenset1_el0_px(ctx->group1_enable); } +#endif - /* Restore group 1 counter configuration */ - write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK); +#if ENABLE_MPMM + mpmm_enable(); #endif return (void *)0; diff --git a/lib/extensions/amu/aarch64/amu_helpers.S b/lib/extensions/amu/aarch64/amu_helpers.S index 9989abdeb..0f6d799ea 100644 --- a/lib/extensions/amu/aarch64/amu_helpers.S +++ b/lib/extensions/amu/aarch64/amu_helpers.S @@ -83,6 +83,7 @@ func amu_group0_cnt_write_internal write AMEVCNTR03_EL0 /* index 3 */ endfunc amu_group0_cnt_write_internal +#if ENABLE_AMU_AUXILIARY_COUNTERS /* * uint64_t amu_group1_cnt_read_internal(int idx); * @@ -217,6 +218,7 @@ func amu_group1_set_evtype_internal write AMEVTYPER1E_EL0 /* index 14 */ write AMEVTYPER1F_EL0 /* index 15 */ endfunc amu_group1_set_evtype_internal +#endif /* * Accessor functions for virtual offset registers added with FEAT_AMUv1p1 @@ -297,6 +299,7 @@ func amu_group0_voffset_write_internal write AMEVCNTVOFF03_EL2 /* index 3 */ endfunc amu_group0_voffset_write_internal +#if ENABLE_AMU_AUXILIARY_COUNTERS /* * uint64_t amu_group1_voffset_read_internal(int idx); * @@ -383,3 +386,4 @@ func amu_group1_voffset_write_internal write AMEVCNTVOFF1E_EL2 /* index 14 */ write AMEVCNTVOFF1F_EL2 /* index 15 */ endfunc amu_group1_voffset_write_internal +#endif diff --git a/lib/extensions/amu/amu.mk b/lib/extensions/amu/amu.mk new file mode 100644 index 000000000..0d203cb1f --- /dev/null +++ b/lib/extensions/amu/amu.mk @@ -0,0 +1,24 @@ +# +# Copyright (c) 2021, Arm Limited. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +include lib/fconf/fconf.mk + +AMU_SOURCES := lib/extensions/amu/${ARCH}/amu.c \ + lib/extensions/amu/${ARCH}/amu_helpers.S + +ifneq (${ENABLE_AMU_AUXILIARY_COUNTERS},0) + ifeq (${ENABLE_AMU},0) + $(error AMU auxiliary counter support (`ENABLE_AMU_AUXILIARY_COUNTERS`) requires AMU support (`ENABLE_AMU`)) + endif +endif + +ifneq (${ENABLE_AMU_FCONF},0) + ifeq (${ENABLE_AMU_AUXILIARY_COUNTERS},0) + $(error AMU FCONF support (`ENABLE_AMU_FCONF`) is not necessary when auxiliary counter support (`ENABLE_AMU_AUXILIARY_COUNTERS`) is disabled) + endif + + AMU_SOURCES += ${FCONF_AMU_SOURCES} +endif diff --git a/lib/extensions/amu/amu_private.h b/lib/extensions/amu/amu_private.h new file mode 100644 index 000000000..eb7ff0e89 --- /dev/null +++ b/lib/extensions/amu/amu_private.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef AMU_PRIVATE_H +#define AMU_PRIVATE_H + +#include <stdint.h> + +#include <lib/cassert.h> +#include <lib/extensions/amu.h> +#include <lib/utils_def.h> + +#include <platform_def.h> + +#define AMU_GROUP0_MAX_COUNTERS U(16) +#define AMU_GROUP1_MAX_COUNTERS U(16) + +#define AMU_AMCGCR_CG0NC_MAX U(16) + +uint64_t amu_group0_cnt_read_internal(unsigned int idx); +void amu_group0_cnt_write_internal(unsigned int idx, uint64_t val); + +uint64_t amu_group1_cnt_read_internal(unsigned int idx); +void amu_group1_cnt_write_internal(unsigned int idx, uint64_t val); +void amu_group1_set_evtype_internal(unsigned int idx, unsigned int val); + +#if __aarch64__ +uint64_t amu_group0_voffset_read_internal(unsigned int idx); +void amu_group0_voffset_write_internal(unsigned int idx, uint64_t val); + +uint64_t amu_group1_voffset_read_internal(unsigned int idx); +void amu_group1_voffset_write_internal(unsigned int idx, uint64_t val); +#endif + +#endif /* AMU_PRIVATE_H */ diff --git a/lib/extensions/sme/sme.c b/lib/extensions/sme/sme.c new file mode 100644 index 000000000..1c2b98448 --- /dev/null +++ b/lib/extensions/sme/sme.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/debug.h> +#include <lib/el3_runtime/context_mgmt.h> +#include <lib/extensions/sme.h> +#include <lib/extensions/sve.h> + +static bool feat_sme_supported(void) +{ + uint64_t features; + + features = read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SME_SHIFT; + return (features & ID_AA64PFR1_EL1_SME_MASK) != 0U; +} + +static bool feat_sme_fa64_supported(void) +{ + uint64_t features; + + features = read_id_aa64smfr0_el1(); + return (features & ID_AA64SMFR0_EL1_FA64_BIT) != 0U; +} + +void sme_enable(cpu_context_t *context) +{ + u_register_t reg; + u_register_t cptr_el3; + el3_state_t *state; + + /* Make sure SME is implemented in hardware before continuing. */ + if (!feat_sme_supported()) { + return; + } + + /* Get the context state. */ + state = get_el3state_ctx(context); + + /* Enable SME in CPTR_EL3. */ + reg = read_ctx_reg(state, CTX_CPTR_EL3); + reg |= ESM_BIT; + write_ctx_reg(state, CTX_CPTR_EL3, reg); + + /* Set the ENTP2 bit in SCR_EL3 to enable access to TPIDR2_EL0. */ + reg = read_ctx_reg(state, CTX_SCR_EL3); + reg |= SCR_ENTP2_BIT; + write_ctx_reg(state, CTX_SCR_EL3, reg); + + /* Set CPTR_EL3.ESM bit so we can write SMCR_EL3 without trapping. */ + cptr_el3 = read_cptr_el3(); + write_cptr_el3(cptr_el3 | ESM_BIT); + + /* + * Set the max LEN value and FA64 bit. This register is set up globally + * to be the least restrictive, then lower ELs can restrict as needed + * using SMCR_EL2 and SMCR_EL1. + */ + reg = SMCR_ELX_LEN_MASK; + if (feat_sme_fa64_supported()) { + VERBOSE("[SME] FA64 enabled\n"); + reg |= SMCR_ELX_FA64_BIT; + } + write_smcr_el3(reg); + + /* Reset CPTR_EL3 value. */ + write_cptr_el3(cptr_el3); + + /* Enable SVE/FPU in addition to SME. */ + sve_enable(context); +} + +void sme_disable(cpu_context_t *context) +{ + u_register_t reg; + el3_state_t *state; + + /* Make sure SME is implemented in hardware before continuing. */ + if (!feat_sme_supported()) { + return; + } + + /* Get the context state. */ + state = get_el3state_ctx(context); + + /* Disable SME, SVE, and FPU since they all share registers. */ + reg = read_ctx_reg(state, CTX_CPTR_EL3); + reg &= ~ESM_BIT; /* Trap SME */ + reg &= ~CPTR_EZ_BIT; /* Trap SVE */ + reg |= TFP_BIT; /* Trap FPU/SIMD */ + write_ctx_reg(state, CTX_CPTR_EL3, reg); + + /* Disable access to TPIDR2_EL0. */ + reg = read_ctx_reg(state, CTX_SCR_EL3); + reg &= ~SCR_ENTP2_BIT; + write_ctx_reg(state, CTX_SCR_EL3, reg); +} diff --git a/lib/extensions/sve/sve.c b/lib/extensions/sve/sve.c index 2702c30f3..aa8904b9b 100644 --- a/lib/extensions/sve/sve.c +++ b/lib/extensions/sve/sve.c @@ -43,3 +43,23 @@ void sve_enable(cpu_context_t *context) write_ctx_reg(get_el3state_ctx(context), CTX_ZCR_EL3, (ZCR_EL3_LEN_MASK & CONVERT_SVE_LENGTH(512))); } + +void sve_disable(cpu_context_t *context) +{ + u_register_t reg; + el3_state_t *state; + + /* Make sure SME is implemented in hardware before continuing. */ + if (!sve_supported()) { + return; + } + + /* Get the context state. */ + state = get_el3state_ctx(context); + + /* Disable SVE and FPU since they share registers. */ + reg = read_ctx_reg(state, CTX_CPTR_EL3); + reg &= ~CPTR_EZ_BIT; /* Trap SVE */ + reg |= TFP_BIT; /* Trap FPU/SIMD */ + write_ctx_reg(state, CTX_CPTR_EL3, reg); +} diff --git a/lib/extensions/sys_reg_trace/aarch32/sys_reg_trace.c b/lib/extensions/sys_reg_trace/aarch32/sys_reg_trace.c new file mode 100644 index 000000000..89b8029ca --- /dev/null +++ b/lib/extensions/sys_reg_trace/aarch32/sys_reg_trace.c @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <lib/extensions/sys_reg_trace.h> + +static bool sys_reg_trace_supported(void) +{ + uint32_t features; + + features = read_id_dfr0() >> ID_DFR0_COPTRC_SHIFT; + return ((features & ID_DFR0_COPTRC_MASK) == + ID_DFR0_COPTRC_SUPPORTED); +} + +void sys_reg_trace_enable(void) +{ + uint32_t val; + + if (sys_reg_trace_supported()) { + /* + * NSACR.NSTRCDIS = b0 + * enable NS system register access to implemented trace + * registers. + */ + val = read_nsacr(); + val &= ~NSTRCDIS_BIT; + write_nsacr(val); + } +} diff --git a/lib/extensions/sys_reg_trace/aarch64/sys_reg_trace.c b/lib/extensions/sys_reg_trace/aarch64/sys_reg_trace.c new file mode 100644 index 000000000..960d69842 --- /dev/null +++ b/lib/extensions/sys_reg_trace/aarch64/sys_reg_trace.c @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <lib/extensions/sys_reg_trace.h> + +static bool sys_reg_trace_supported(void) +{ + uint64_t features; + + features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_TRACEVER_SHIFT; + return ((features & ID_AA64DFR0_TRACEVER_MASK) == + ID_AA64DFR0_TRACEVER_SUPPORTED); +} + +void sys_reg_trace_enable(cpu_context_t *ctx) +{ + uint64_t val; + + if (sys_reg_trace_supported()) { + /* Retrieve CPTR_EL3 value from the given context 'ctx', + * and update CPTR_EL3.TTA bit to 0. + * This function is called while switching context to NS to + * allow system trace register access to NS-EL2 and NS-EL1 + * when NS-EL2 is implemented but not used. + */ + val = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3); + val &= ~TTA_BIT; + write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, val); + } +} diff --git a/lib/extensions/trbe/trbe.c b/lib/extensions/trbe/trbe.c new file mode 100644 index 000000000..9f754d521 --- /dev/null +++ b/lib/extensions/trbe/trbe.c @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <arch_helpers.h> +#include <lib/el3_runtime/pubsub.h> +#include <lib/extensions/trbe.h> + +static void tsb_csync(void) +{ + /* + * The assembler does not yet understand the tsb csync mnemonic + * so use the equivalent hint instruction. + */ + __asm__ volatile("hint #18"); +} + +static bool trbe_supported(void) +{ + uint64_t features; + + features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_TRACEBUFFER_SHIFT; + return ((features & ID_AA64DFR0_TRACEBUFFER_MASK) == + ID_AA64DFR0_TRACEBUFFER_SUPPORTED); +} + +void trbe_enable(void) +{ + uint64_t val; + + if (trbe_supported()) { + /* + * MDCR_EL3.NSTB = 0b11 + * Allow access of trace buffer control registers from NS-EL1 + * and NS-EL2, tracing is prohibited in Secure and Realm state + * (if implemented). + */ + val = read_mdcr_el3(); + val |= MDCR_NSTB(MDCR_NSTB_EL1); + write_mdcr_el3(val); + } +} + +static void *trbe_drain_trace_buffers_hook(const void *arg __unused) +{ + if (trbe_supported()) { + /* + * Before switching from normal world to secure world + * the trace buffers need to be drained out to memory. This is + * required to avoid an invalid memory access when TTBR is switched + * for entry to S-EL1. + */ + tsb_csync(); + dsbnsh(); + } + + return (void *)0; +} + +SUBSCRIBE_TO_EVENT(cm_entering_secure_world, trbe_drain_trace_buffers_hook); diff --git a/lib/extensions/trf/aarch32/trf.c b/lib/extensions/trf/aarch32/trf.c new file mode 100644 index 000000000..834092d5a --- /dev/null +++ b/lib/extensions/trf/aarch32/trf.c @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <lib/extensions/trf.h> + +static bool trf_supported(void) +{ + uint32_t features; + + features = read_id_dfr0() >> ID_DFR0_TRACEFILT_SHIFT; + return ((features & ID_DFR0_TRACEFILT_MASK) == + ID_DFR0_TRACEFILT_SUPPORTED); +} + +void trf_enable(void) +{ + uint32_t val; + + if (trf_supported()) { + /* + * Allow access of trace filter control registers from + * non-monitor mode + */ + val = read_sdcr(); + val &= ~SDCR_TTRF_BIT; + write_sdcr(val); + } +} diff --git a/lib/extensions/trf/aarch64/trf.c b/lib/extensions/trf/aarch64/trf.c new file mode 100644 index 000000000..1da5dcee0 --- /dev/null +++ b/lib/extensions/trf/aarch64/trf.c @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <lib/extensions/trf.h> + +static bool trf_supported(void) +{ + uint64_t features; + + features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_TRACEFILT_SHIFT; + return ((features & ID_AA64DFR0_TRACEFILT_MASK) == + ID_AA64DFR0_TRACEFILT_SUPPORTED); +} + +void trf_enable(void) +{ + uint64_t val; + + if (trf_supported()) { + /* + * MDCR_EL3.TTRF = b0 + * Allow access of trace filter control registers from NS-EL2 + * and NS-EL1 when NS-EL2 is implemented but not used + */ + val = read_mdcr_el3(); + val &= ~MDCR_TTRF_BIT; + write_mdcr_el3(val); + } +} |