From 1ae481fd68aca9f5d399529cf1a13d547516270c Mon Sep 17 00:00:00 2001 From: Raphael Herouart Date: Wed, 29 Mar 2023 14:59:16 +0000 Subject: app/stdcalltest: smc call to clobber sve registers In order to test tf-a-test properly one must assert that sve resisters are preserved after an SMC call into trusty. There is no call clobbering them yet. No-Typo-Check: Because Typo Check gives wrong errors and block submit Bug: 270942549 Change-Id: I53fa163aef4eae2599048aa1e4c181e0e70a7008 --- app/stdcalltest/rules.mk | 4 + app/stdcalltest/stdcalltest.c | 76 +++++++++++++++++ app/stdcalltest/stdcalltest.h | 10 +++ app/stdcalltest/sve_helper.S | 189 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 279 insertions(+) create mode 100644 app/stdcalltest/sve_helper.S (limited to 'app') diff --git a/app/stdcalltest/rules.mk b/app/stdcalltest/rules.mk index ed66477..7b1be9b 100644 --- a/app/stdcalltest/rules.mk +++ b/app/stdcalltest/rules.mk @@ -28,4 +28,8 @@ MODULE := $(LOCAL_DIR) MODULE_SRCS += \ $(LOCAL_DIR)/stdcalltest.c +ifeq ($(ARCH),arm64) +MODULE_SRCS += $(LOCAL_DIR)/sve_helper.S +endif + include make/module.mk diff --git a/app/stdcalltest/stdcalltest.c b/app/stdcalltest/stdcalltest.c index 24b48a7..845c8f3 100644 --- a/app/stdcalltest/stdcalltest.c +++ b/app/stdcalltest/stdcalltest.c @@ -21,13 +21,18 @@ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include +#include #include #include #include #include #include +#include #include +#include #include +#include #include #include "stdcalltest.h" @@ -95,6 +100,72 @@ err_map: return status; } +#if ARCH_ARM64 +long clobber_sve_asm(uint32_t byte_clobber); +long load_sve_asm(uint8_t* arr, uint64_t len); + +#define SVE_VEC_LEN_BITS 128 +#define SVE_NB_BYTE_VEC_LEN SVE_VEC_LEN_BITS / 8 +#define SVE_SVE_REGS_COUNT 32 + +#define SMC_FC_TRNG_VERSION SMC_FASTCALL_NR(SMC_ENTITY_STD, 0x50) + +static uint8_t sve_regs[SMP_MAX_CPUS][SVE_SVE_REGS_COUNT * SVE_NB_BYTE_VEC_LEN] + __attribute__((aligned(16))); + +enum clobber_restore_error { + SVE_NO_ERROR = 0, + SVE_GENERIC_ERROR = 1, + SVE_REGISTER_NOT_RESTORED = 2, + SVE_ERROR_LONG_TYPE = LONG_MAX +}; + +long stdcalltest_clobber_sve(struct smc32_args* args) { + enum clobber_restore_error ret = SVE_NO_ERROR; + if (!arch_sve_supported()) { + /* test is OK, if there is no SVE there is nothing to assert but this is + * not an ERROR */ + return ret; + } + + uint64_t v_cpacr_el1 = arch_enable_sve(); + uint cpuid = arch_curr_cpu_num(); + long call_nb = args->params[1]; + + /* First Call on cpu needs to Clobber ASM registers */ + if (call_nb == 1) { + ret = clobber_sve_asm(args->params[0]); + if (ret != SVE_NO_ERROR) { + panic("Failed to Clobber ARM SVE registers: %lx\n", ret); + ret = SVE_GENERIC_ERROR; + goto end_stdcalltest_clobber_sve; + } + } + + /* Make sure registers are as expected */ + const uint8_t EXPECTED = (uint8_t)args->params[0]; + ret = load_sve_asm(sve_regs[cpuid], SVE_NB_BYTE_VEC_LEN); + if (ret != SVE_NO_ERROR) { + panic("Failed to Load ARM SVE registers: %lx\n", ret); + ret = SVE_GENERIC_ERROR; + goto end_stdcalltest_clobber_sve; + } + + for (size_t idx = 0; idx < countof(sve_regs[cpuid]); ++idx) { + uint8_t val = sve_regs[cpuid][idx]; + + if (val != EXPECTED) { + ret = SVE_REGISTER_NOT_RESTORED; + goto end_stdcalltest_clobber_sve; + } + } + +end_stdcalltest_clobber_sve: + ARM64_WRITE_SYSREG(cpacr_el1, v_cpacr_el1); + return ret; +} +#endif + static long stdcalltest_stdcall(struct smc32_args* args) { switch (args->smc_nr) { case SMC_SC_TEST_VERSION: @@ -102,6 +173,11 @@ static long stdcalltest_stdcall(struct smc32_args* args) { case SMC_SC_TEST_SHARED_MEM_RW: return stdcalltest_sharedmem_rw(args->client_id, args_get_id(args), args_get_sz(args)); +#if ARCH_ARM64 + case SMC_SC_TEST_CLOBBER_SVE: { + return stdcalltest_clobber_sve(args); + } +#endif default: return SM_ERR_UNDEFINED_SMC; } diff --git a/app/stdcalltest/stdcalltest.h b/app/stdcalltest/stdcalltest.h index 5ad3008..979432e 100644 --- a/app/stdcalltest/stdcalltest.h +++ b/app/stdcalltest/stdcalltest.h @@ -53,4 +53,14 @@ */ #define SMC_SC_TEST_SHARED_MEM_RW SMC_STDCALL_NR(SMC_ENTITY_TEST, 1) +/** + * SMC_SC_TEST_CLOBBER_SVE - Test save and restore of SVE/SIMD registers during + * an TFTF <-> TF-A <-> Trusty roundtrip for all participants when multiple cpus + * are involved. + * + * Return: 0 on success. 1 on "technical" error. 2 if registers have not + * recovered expected value. + */ +#define SMC_SC_TEST_CLOBBER_SVE SMC_STDCALL_NR(SMC_ENTITY_TEST, 2) + #define TRUSTY_STDCALLTEST_API_VERSION 1 diff --git a/app/stdcalltest/sve_helper.S b/app/stdcalltest/sve_helper.S new file mode 100644 index 0000000..6a824c2 --- /dev/null +++ b/app/stdcalltest/sve_helper.S @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2023, Google Inc. All rights reserved + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +.arch armv8-a+sve + + +FUNCTION(clobber_sve_asm) + MOV x9, x0 + + // Clobber Registers + mov z0.b, w9 + mov z1.b, w9 + mov z2.b, w9 + mov z3.b, w9 + mov z4.b, w9 + mov z5.b, w9 + mov z6.b, w9 + mov z7.b, w9 + mov z8.b, w9 + mov z9.b, w9 + mov z10.b, w9 + mov z11.b, w9 + mov z12.b, w9 + mov z13.b, w9 + mov z14.b, w9 + mov z15.b, w9 + mov z16.b, w9 + mov z17.b, w9 + mov z18.b, w9 + mov z19.b, w9 + mov z20.b, w9 + mov z21.b, w9 + mov z22.b, w9 + mov z23.b, w9 + mov z24.b, w9 + mov z25.b, w9 + mov z26.b, w9 + mov z27.b, w9 + mov z28.b, w9 + mov z29.b, w9 + mov z30.b, w9 + mov z31.b, w9 + + ptrue p0.B + ptrue p1.B + ptrue p2.B + ptrue p3.B + ptrue p4.B + ptrue p5.B + ptrue p6.B + ptrue p7.B + ptrue p8.B + ptrue p9.B + ptrue p10.B + ptrue p11.B + ptrue p12.B + ptrue p13.B + ptrue p14.B + ptrue p15.B + + // If sve is available return #0x0 'NO Error' + mov x0, #0 + ret + +FUNCTION(load_sve_asm) + mov x9, x0 // base address array + mov x8, #0 // offset + mov x7, x1 // offset increment + + ST1B z0.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z1.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z2.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z3.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z4.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z5.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z6.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z7.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z8.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z9.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z10.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z11.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z12.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z13.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z14.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z15.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z16.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z17.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z18.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z19.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z20.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z21.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z22.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z23.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z24.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z25.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z26.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z27.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z28.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z29.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z30.b, p0, [x9, x8] + + ADD x8, x8, x7 + ST1B z31.b, p0, [x9, x8] + + mov x0, #0 + ret -- cgit v1.2.3