summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArve Hjønnevåg <arve@android.com>2013-11-18 20:46:48 -0800
committerArve Hjønnevåg <arve@android.com>2021-05-11 12:59:49 -0700
commiteee82e61630d1545f17414d9ce6e290209f7a33f (patch)
tree9e4dd0b331a4fba54f9683c5c7c177c8a0508bdf
downloadtrusty-eee82e61630d1545f17414d9ce6e290209f7a33f.tar.gz
ANDROID: trusty: Add trusty-core driver
Original commits: android-trusty-3.10: 33dbf3395558 ("trusty: Add trusty driver") e30482e7bc33 ("trusty: Add notifier before and after every smc call.") 2e55f95a657b ("trusty: Provide trusty_std_call32 and trusty_fast_call32 stubs if CONFIG_TRUSTY is not set.") 0da972cc7510 ("trusty: Add arm64 support") 6b1a2d31a568 ("trusty: Add trusty_fast_call64 api on 64 bit systems.") bd9b849d25e6 ("trusty: move probe to subsys_initcall") e909ef01e6ea ("trusty: Retry std_calls on SM_ERR_BUSY") af1ac76fcfaa ("trusty: Get version string from trusty") 489d59d67530 ("trusty: add couple non-secure memory related helper routines") 7f12378c1422 ("trusty: Select api version") 63f6b6cdcad0 ("trusty: Handle fiqs without calling notifier and enabling interrupts") 51132bfbacfc ("trusty: Add smp support") android-trusty-3.18: 4af79968ae6e ("trusty: add support for parameterized NOP ops") android-trusty-4.19: 3f1b06f500d0 ("trusty: Stop calling into trusty if trusty panicked") 29aa7dfaa7ac ("trusty: Always call back into trusty on interrupted CPUs") 8bfcfdcd6ab6 ("trusty: Don't break nop loop on failed custom nop-calls") 4009cfccc996 ("trusty: Add share memory api and call it where needed") 9a27b1c48cb0 ("trusty: Remove trusty_call32_mem_buf") 412267edb42b ("trusty: Define values passed to trusty") 2cb851ca5bd1 ("trusty: Set dma_mask") c6e617832d34 ("trusty: Extend smc function and move to arch specific asm file") android-trusty-5.4: 939619581245 ("ANDROID: trusty: Implement share-memory interface based on Arm FF-A 1.0 EAC 1_0") b364e0926539 ("ANDROID: trusty: Modularize trusty core driver") e3e543f64456 ("ANDROID: trusty: fix format specifiers") 5d6c1cceba30 ("ANDROID: trusty: fix compiler warnings for 32-bit builds") 3b4e7241359f ("ANDROID: trusty: don't use tristate for non-modules") d78b103c28b8 ("ANDROID: trusty: add missing ENDPROC statements") 1d7b4458df52 ("ANDROID: trusty: use SPDX license identifiers") a91f687b026d ("ANDROID: trusty: fix some checkpatch warnings") eb7697cc4105 ("ANDROID: trusty: Kconfig: add missing dependency on ARM || ARM64") 13614d015410 ("ANDROID: trusty: Kconfig: remove unneeded 'default n'") 33c189719c25 ("ANDROID: trusty: Kconfig: remove CONFIG_TRUSTY_SMC_ARM*") 048a0d174651 ("ANDROID: trusty: add missing license declarations") 2a2f4891ce20 ("ANDROID: trusty: add missing 'static' keywords") eaa27254294c ("ANDROID: trusty: don't use '#pragma once'") 110d72546a67 ("ANDROID: trusty: remove unneeded assignments to device_driver::owner") 19b581bd06e1 ("ANDROID: trusty-core: export trusty_fast_call64()") 5c8d24185047 ("ANDROID: trusty-core: use dev_groups for trusty_version attribute") 0f32db5ee94a ("ANDROID: trusty: remove trivial function entry logging") ac7a02aed444 ("ANDROID: trusty: use dev_*() instead of pr_*()") 0e5803182b45 ("ANDROID: trusty: avoid using BUG_ON()") 3f6ac3c63792 ("ANDROID: trusty: Kconfig: add help text") 86a6aff89332 ("ANDROID: trusty: use in-kernel integer types") e3699d9e8190 ("ANDROID: trusty: don't use weird sysv integer types") bd616a0d2b14 ("ANDROID: trusty: fix up headers shared with Trusty") Bug: 187853933 Change-Id: I9bcf4563edee478223579776f5d0dd956ad031a6 Signed-off-by: Arve Hjønnevåg <arve@android.com> Signed-off-by: Tri Vo <trong@google.com> Signed-off-by: Michael Ryleev <gmar@google.com> Signed-off-by: Eric Biggers <ebiggers@google.com>
-rw-r--r--Documentation/devicetree/bindings/trusty/trusty-smc.txt6
-rw-r--r--drivers/trusty/Kconfig22
-rw-r--r--drivers/trusty/Makefile9
-rw-r--r--drivers/trusty/trusty-mem.c139
-rw-r--r--drivers/trusty/trusty-smc-arm.S41
-rw-r--r--drivers/trusty/trusty-smc-arm64.S35
-rw-r--r--drivers/trusty/trusty-smc.h26
-rw-r--r--drivers/trusty/trusty.c956
-rw-r--r--include/linux/trusty/arm_ffa.h590
-rw-r--r--include/linux/trusty/sm_err.h28
-rw-r--r--include/linux/trusty/smcall.h109
-rw-r--r--include/linux/trusty/trusty.h90
12 files changed, 2051 insertions, 0 deletions
diff --git a/Documentation/devicetree/bindings/trusty/trusty-smc.txt b/Documentation/devicetree/bindings/trusty/trusty-smc.txt
new file mode 100644
index 0000000..1b39ad3
--- /dev/null
+++ b/Documentation/devicetree/bindings/trusty/trusty-smc.txt
@@ -0,0 +1,6 @@
+Trusty smc interface
+
+Trusty is running in secure mode on the same (arm) cpu(s) as the current os.
+
+Required properties:
+- compatible: "android,trusty-smc-v1"
diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig
new file mode 100644
index 0000000..12fcb74
--- /dev/null
+++ b/drivers/trusty/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Trusty driver
+#
+
+menu "Trusty driver"
+
+config TRUSTY
+ tristate "Trusty core driver"
+ depends on ARM || ARM64
+ help
+ Trusty is a secure OS that provides a Trusted Execution Environment
+ (TEE) for Android. Trusty runs on the same processor as Linux but is
+ isolated from the rest of the system by both hardware and software.
+
+ This option enables the core part of the Linux kernel driver for
+ Trusty. This doesn't do much by itself; you'll need to enable some of
+ the sub-modules too.
+
+ If you build this as a module, it will be called trusty-core.
+
+endmenu
diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile
new file mode 100644
index 0000000..c5b68ef
--- /dev/null
+++ b/drivers/trusty/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for trusty components
+#
+
+obj-$(CONFIG_TRUSTY) += trusty-core.o
+trusty-core-objs += trusty.o trusty-mem.o
+trusty-core-$(CONFIG_ARM) += trusty-smc-arm.o
+trusty-core-$(CONFIG_ARM64) += trusty-smc-arm64.o
diff --git a/drivers/trusty/trusty-mem.c b/drivers/trusty/trusty-mem.c
new file mode 100644
index 0000000..8a36029
--- /dev/null
+++ b/drivers/trusty/trusty-mem.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Google, Inc.
+ */
+
+#include <linux/types.h>
+#include <linux/printk.h>
+#include <linux/trusty/arm_ffa.h>
+#include <linux/trusty/trusty.h>
+#include <linux/trusty/smcall.h>
+
+#define MEM_ATTR_STRONGLY_ORDERED (0x00U)
+#define MEM_ATTR_DEVICE (0x04U)
+#define MEM_ATTR_NORMAL_NON_CACHEABLE (0x44U)
+#define MEM_ATTR_NORMAL_WRITE_THROUGH (0xAAU)
+#define MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE (0xEEU)
+#define MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE (0xFFU)
+
+#define ATTR_RDONLY (1U << 7)
+#define ATTR_INNER_SHAREABLE (3U << 8)
+
+static int get_mem_attr(struct page *page, pgprot_t pgprot)
+{
+#if defined(CONFIG_ARM64)
+ u64 mair;
+ unsigned int attr_index = (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) >> 2;
+
+ asm ("mrs %0, mair_el1\n" : "=&r" (mair));
+ return (mair >> (attr_index * 8)) & 0xff;
+
+#elif defined(CONFIG_ARM_LPAE)
+ u32 mair;
+ unsigned int attr_index = ((pgprot_val(pgprot) & L_PTE_MT_MASK) >> 2);
+
+ if (attr_index >= 4) {
+ attr_index -= 4;
+ asm volatile("mrc p15, 0, %0, c10, c2, 1\n" : "=&r" (mair));
+ } else {
+ asm volatile("mrc p15, 0, %0, c10, c2, 0\n" : "=&r" (mair));
+ }
+ return (mair >> (attr_index * 8)) & 0xff;
+
+#elif defined(CONFIG_ARM)
+ /* check memory type */
+ switch (pgprot_val(pgprot) & L_PTE_MT_MASK) {
+ case L_PTE_MT_WRITEALLOC:
+ return MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE;
+
+ case L_PTE_MT_BUFFERABLE:
+ return MEM_ATTR_NORMAL_NON_CACHEABLE;
+
+ case L_PTE_MT_WRITEBACK:
+ return MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE;
+
+ case L_PTE_MT_WRITETHROUGH:
+ return MEM_ATTR_NORMAL_WRITE_THROUGH;
+
+ case L_PTE_MT_UNCACHED:
+ return MEM_ATTR_STRONGLY_ORDERED;
+
+ case L_PTE_MT_DEV_SHARED:
+ case L_PTE_MT_DEV_NONSHARED:
+ return MEM_ATTR_DEVICE;
+
+ default:
+ return -EINVAL;
+ }
+#else
+ return 0;
+#endif
+}
+
+int trusty_encode_page_info(struct ns_mem_page_info *inf,
+ struct page *page, pgprot_t pgprot)
+{
+ int mem_attr;
+ u64 pte;
+ u8 ffa_mem_attr;
+ u8 ffa_mem_perm = 0;
+
+ if (!inf || !page)
+ return -EINVAL;
+
+ /* get physical address */
+ pte = (u64)page_to_phys(page);
+
+ /* get memory attributes */
+ mem_attr = get_mem_attr(page, pgprot);
+ if (mem_attr < 0)
+ return mem_attr;
+
+ switch (mem_attr) {
+ case MEM_ATTR_STRONGLY_ORDERED:
+ ffa_mem_attr = FFA_MEM_ATTR_DEVICE_NGNRNE;
+ break;
+
+ case MEM_ATTR_DEVICE:
+ ffa_mem_attr = FFA_MEM_ATTR_DEVICE_NGNRE;
+ break;
+
+ case MEM_ATTR_NORMAL_NON_CACHEABLE:
+ ffa_mem_attr = FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED;
+ break;
+
+ case MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE:
+ case MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE:
+ ffa_mem_attr = FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ inf->paddr = pte;
+
+ /* add other attributes */
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM_LPAE)
+ pte |= pgprot_val(pgprot);
+#elif defined(CONFIG_ARM)
+ if (pgprot_val(pgprot) & L_PTE_RDONLY)
+ pte |= ATTR_RDONLY;
+ if (pgprot_val(pgprot) & L_PTE_SHARED)
+ pte |= ATTR_INNER_SHAREABLE; /* inner sharable */
+#endif
+
+ if (!(pte & ATTR_RDONLY))
+ ffa_mem_perm |= FFA_MEM_PERM_RW;
+ else
+ ffa_mem_perm |= FFA_MEM_PERM_RO;
+
+ if ((pte & ATTR_INNER_SHAREABLE) == ATTR_INNER_SHAREABLE)
+ ffa_mem_attr |= FFA_MEM_ATTR_INNER_SHAREABLE;
+
+ inf->ffa_mem_attr = ffa_mem_attr;
+ inf->ffa_mem_perm = ffa_mem_perm;
+ inf->compat_attr = (pte & 0x0000FFFFFFFFFFFFull) |
+ ((u64)mem_attr << 48);
+ return 0;
+}
diff --git a/drivers/trusty/trusty-smc-arm.S b/drivers/trusty/trusty-smc-arm.S
new file mode 100644
index 0000000..8ff8354
--- /dev/null
+++ b/drivers/trusty/trusty-smc-arm.S
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#include <linux/linkage.h>
+
+.arch_extension sec
+
+ENTRY(trusty_smc8)
+ /* Save stack location where r3-r7 smc arguments are stored */
+ mov r12, sp
+
+ /* Save original r4-r7 values as caller expects these to be preserved */
+ push {r4-r7}
+
+ /* Save return value pointer and return address */
+ push {r0, lr}
+
+ /* arm abi shifts arguments when returning a struct, shift them back */
+ mov r0, r1
+ mov r1, r2
+ mov r2, r3
+
+ /* Load stack based arguments */
+ ldmia r12, {r3-r7}
+
+ smc #0
+
+ /* Restore return address and get return value pointer */
+ pop {r12, lr}
+
+ /* Copy 8-register smc return value to struct smc_ret8 return value */
+ stmia r12, {r0-r7}
+
+ /* Restore original r4-r7 values */
+ pop {r4-r7}
+
+ /* Return */
+ bx lr
+ENDPROC(trusty_smc8)
diff --git a/drivers/trusty/trusty-smc-arm64.S b/drivers/trusty/trusty-smc-arm64.S
new file mode 100644
index 0000000..14c8fed
--- /dev/null
+++ b/drivers/trusty/trusty-smc-arm64.S
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#include <linux/linkage.h>
+
+.macro push ra, rb
+stp \ra, \rb, [sp,#-16]!
+.endm
+
+.macro pop ra, rb
+ldp \ra, \rb, [sp], #16
+.endm
+
+lr .req x30
+
+SYM_FUNC_START(trusty_smc8)
+ /*
+ * Save x8 (return value ptr) and lr. The SMC calling convention says el3
+ * does not need to preserve x8. The normal ABI does not require either x8
+ * or lr to be preserved.
+ */
+ push x8, lr
+ smc #0
+ pop x8, lr
+
+ /* Copy 8-register smc return value to struct smc_ret8 return value */
+ stp x0, x1, [x8], #16
+ stp x2, x3, [x8], #16
+ stp x4, x5, [x8], #16
+ stp x6, x7, [x8], #16
+
+ ret
+SYM_FUNC_END(trusty_smc8)
diff --git a/drivers/trusty/trusty-smc.h b/drivers/trusty/trusty-smc.h
new file mode 100644
index 0000000..b53e5ab
--- /dev/null
+++ b/drivers/trusty/trusty-smc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google, Inc.
+ */
+#ifndef _TRUSTY_SMC_H
+#define _TRUSTY_SMC_H
+
+#include <linux/types.h>
+
+struct smc_ret8 {
+ unsigned long r0;
+ unsigned long r1;
+ unsigned long r2;
+ unsigned long r3;
+ unsigned long r4;
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+};
+
+struct smc_ret8 trusty_smc8(unsigned long r0, unsigned long r1,
+ unsigned long r2, unsigned long r3,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7);
+
+#endif /* _TRUSTY_SMC_H */
diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c
new file mode 100644
index 0000000..9f84da1
--- /dev/null
+++ b/drivers/trusty/trusty.c
@@ -0,0 +1,956 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Google, Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/trusty/arm_ffa.h>
+#include <linux/trusty/smcall.h>
+#include <linux/trusty/sm_err.h>
+#include <linux/trusty/trusty.h>
+
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+
+#include "trusty-smc.h"
+
+struct trusty_state;
+static struct platform_driver trusty_driver;
+
+struct trusty_work {
+ struct trusty_state *ts;
+ struct work_struct work;
+};
+
+struct trusty_state {
+ struct mutex smc_lock;
+ struct atomic_notifier_head notifier;
+ struct completion cpu_idle_completion;
+ char *version_str;
+ u32 api_version;
+ bool trusty_panicked;
+ struct device *dev;
+ struct workqueue_struct *nop_wq;
+ struct trusty_work __percpu *nop_works;
+ struct list_head nop_queue;
+ spinlock_t nop_lock; /* protects nop_queue */
+ struct device_dma_parameters dma_parms;
+ void *ffa_tx;
+ void *ffa_rx;
+ u16 ffa_local_id;
+ u16 ffa_remote_id;
+ struct mutex share_memory_msg_lock; /* protects share_memory_msg */
+};
+
+static inline unsigned long smc(unsigned long r0, unsigned long r1,
+ unsigned long r2, unsigned long r3)
+{
+ return trusty_smc8(r0, r1, r2, r3, 0, 0, 0, 0).r0;
+}
+
+s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ if (WARN_ON(!s))
+ return SM_ERR_INVALID_PARAMETERS;
+ if (WARN_ON(!SMC_IS_FASTCALL(smcnr)))
+ return SM_ERR_INVALID_PARAMETERS;
+ if (WARN_ON(SMC_IS_SMC64(smcnr)))
+ return SM_ERR_INVALID_PARAMETERS;
+
+ return smc(smcnr, a0, a1, a2);
+}
+EXPORT_SYMBOL(trusty_fast_call32);
+
+#ifdef CONFIG_64BIT
+s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ if (WARN_ON(!s))
+ return SM_ERR_INVALID_PARAMETERS;
+ if (WARN_ON(!SMC_IS_FASTCALL(smcnr)))
+ return SM_ERR_INVALID_PARAMETERS;
+ if (WARN_ON(!SMC_IS_SMC64(smcnr)))
+ return SM_ERR_INVALID_PARAMETERS;
+
+ return smc(smcnr, a0, a1, a2);
+}
+EXPORT_SYMBOL(trusty_fast_call64);
+#endif
+
+static unsigned long trusty_std_call_inner(struct device *dev,
+ unsigned long smcnr,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2)
+{
+ unsigned long ret;
+ int retry = 5;
+
+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx)\n",
+ __func__, smcnr, a0, a1, a2);
+ while (true) {
+ ret = smc(smcnr, a0, a1, a2);
+ while ((s32)ret == SM_ERR_FIQ_INTERRUPTED)
+ ret = smc(SMC_SC_RESTART_FIQ, 0, 0, 0);
+ if ((int)ret != SM_ERR_BUSY || !retry)
+ break;
+
+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, retry\n",
+ __func__, smcnr, a0, a1, a2);
+ retry--;
+ }
+
+ return ret;
+}
+
+static unsigned long trusty_std_call_helper(struct device *dev,
+ unsigned long smcnr,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2)
+{
+ unsigned long ret;
+ int sleep_time = 1;
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ while (true) {
+ local_irq_disable();
+ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE,
+ NULL);
+ ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2);
+ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED,
+ NULL);
+ if (ret == SM_ERR_INTERRUPTED) {
+ /*
+ * Make sure this cpu will eventually re-enter trusty
+ * even if the std_call resumes on another cpu.
+ */
+ trusty_enqueue_nop(dev, NULL);
+ }
+ local_irq_enable();
+
+ if ((int)ret != SM_ERR_BUSY)
+ break;
+
+ if (sleep_time == 256)
+ dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy\n",
+ __func__, smcnr, a0, a1, a2);
+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, wait %d ms\n",
+ __func__, smcnr, a0, a1, a2, sleep_time);
+
+ msleep(sleep_time);
+ if (sleep_time < 1000)
+ sleep_time <<= 1;
+
+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) retry\n",
+ __func__, smcnr, a0, a1, a2);
+ }
+
+ if (sleep_time > 256)
+ dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) busy cleared\n",
+ __func__, smcnr, a0, a1, a2);
+
+ return ret;
+}
+
+static void trusty_std_call_cpu_idle(struct trusty_state *s)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&s->cpu_idle_completion, HZ * 10);
+ if (!ret) {
+ dev_warn(s->dev,
+ "%s: timed out waiting for cpu idle to clear, retry anyway\n",
+ __func__);
+ }
+}
+
+s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2)
+{
+ int ret;
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ if (WARN_ON(SMC_IS_FASTCALL(smcnr)))
+ return SM_ERR_INVALID_PARAMETERS;
+
+ if (WARN_ON(SMC_IS_SMC64(smcnr)))
+ return SM_ERR_INVALID_PARAMETERS;
+
+ if (s->trusty_panicked) {
+ /*
+ * Avoid calling the notifiers if trusty has panicked as they
+ * can trigger more calls.
+ */
+ return SM_ERR_PANIC;
+ }
+
+ if (smcnr != SMC_SC_NOP) {
+ mutex_lock(&s->smc_lock);
+ reinit_completion(&s->cpu_idle_completion);
+ }
+
+ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) started\n",
+ __func__, smcnr, a0, a1, a2);
+
+ ret = trusty_std_call_helper(dev, smcnr, a0, a1, a2);
+ while (ret == SM_ERR_INTERRUPTED || ret == SM_ERR_CPU_IDLE) {
+ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) interrupted\n",
+ __func__, smcnr, a0, a1, a2);
+ if (ret == SM_ERR_CPU_IDLE)
+ trusty_std_call_cpu_idle(s);
+ ret = trusty_std_call_helper(dev, SMC_SC_RESTART_LAST, 0, 0, 0);
+ }
+ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) returned 0x%x\n",
+ __func__, smcnr, a0, a1, a2, ret);
+
+ if (WARN_ONCE(ret == SM_ERR_PANIC, "trusty crashed"))
+ s->trusty_panicked = true;
+
+ if (smcnr == SMC_SC_NOP)
+ complete(&s->cpu_idle_completion);
+ else
+ mutex_unlock(&s->smc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(trusty_std_call32);
+
+int trusty_share_memory(struct device *dev, u64 *id,
+ struct scatterlist *sglist, unsigned int nents,
+ pgprot_t pgprot)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+ int ret;
+ struct ns_mem_page_info pg_inf;
+ struct scatterlist *sg;
+ size_t count;
+ size_t i;
+ size_t len;
+ u64 ffa_handle = 0;
+ size_t total_len;
+ size_t endpoint_count = 1;
+ struct ffa_mtd *mtd = s->ffa_tx;
+ size_t comp_mrd_offset = offsetof(struct ffa_mtd, emad[endpoint_count]);
+ struct ffa_comp_mrd *comp_mrd = s->ffa_tx + comp_mrd_offset;
+ struct ffa_cons_mrd *cons_mrd = comp_mrd->address_range_array;
+ size_t cons_mrd_offset = (void *)cons_mrd - s->ffa_tx;
+ struct smc_ret8 smc_ret;
+ u32 cookie_low;
+ u32 cookie_high;
+
+ if (WARN_ON(dev->driver != &trusty_driver.driver))
+ return -EINVAL;
+
+ if (WARN_ON(nents < 1))
+ return -EINVAL;
+
+ if (nents != 1 && s->api_version < TRUSTY_API_VERSION_MEM_OBJ) {
+ dev_err(s->dev, "%s: old trusty version does not support non-contiguous memory objects\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ count = dma_map_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
+ if (count != nents) {
+ dev_err(s->dev, "failed to dma map sg_table\n");
+ return -EINVAL;
+ }
+
+ sg = sglist;
+ ret = trusty_encode_page_info(&pg_inf, phys_to_page(sg_dma_address(sg)),
+ pgprot);
+ if (ret) {
+ dev_err(s->dev, "%s: trusty_encode_page_info failed\n",
+ __func__);
+ goto err_encode_page_info;
+ }
+
+ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) {
+ *id = pg_inf.compat_attr;
+ return 0;
+ }
+
+ len = 0;
+ for_each_sg(sglist, sg, nents, i)
+ len += sg_dma_len(sg);
+
+ mutex_lock(&s->share_memory_msg_lock);
+
+ mtd->sender_id = s->ffa_local_id;
+ mtd->memory_region_attributes = pg_inf.ffa_mem_attr;
+ mtd->reserved_3 = 0;
+ mtd->flags = 0;
+ mtd->handle = 0;
+ mtd->tag = 0;
+ mtd->reserved_24_27 = 0;
+ mtd->emad_count = endpoint_count;
+ for (i = 0; i < endpoint_count; i++) {
+ struct ffa_emad *emad = &mtd->emad[i];
+ /* TODO: support stream ids */
+ emad->mapd.endpoint_id = s->ffa_remote_id;
+ emad->mapd.memory_access_permissions = pg_inf.ffa_mem_perm;
+ emad->mapd.flags = 0;
+ emad->comp_mrd_offset = comp_mrd_offset;
+ emad->reserved_8_15 = 0;
+ }
+ comp_mrd->total_page_count = len / PAGE_SIZE;
+ comp_mrd->address_range_count = nents;
+ comp_mrd->reserved_8_15 = 0;
+
+ total_len = cons_mrd_offset + nents * sizeof(*cons_mrd);
+ sg = sglist;
+ while (count) {
+ size_t lcount =
+ min_t(size_t, count, (PAGE_SIZE - cons_mrd_offset) /
+ sizeof(*cons_mrd));
+ size_t fragment_len = lcount * sizeof(*cons_mrd) +
+ cons_mrd_offset;
+
+ for (i = 0; i < lcount; i++) {
+ cons_mrd[i].address = sg_dma_address(sg);
+ cons_mrd[i].page_count = sg_dma_len(sg) / PAGE_SIZE;
+ cons_mrd[i].reserved_12_15 = 0;
+ sg = sg_next(sg);
+ }
+ count -= lcount;
+ if (cons_mrd_offset) {
+ /* First fragment */
+ smc_ret = trusty_smc8(SMC_FC_FFA_MEM_SHARE, total_len,
+ fragment_len, 0, 0, 0, 0, 0);
+ } else {
+ smc_ret = trusty_smc8(SMC_FC_FFA_MEM_FRAG_TX,
+ cookie_low, cookie_high,
+ fragment_len, 0, 0, 0, 0);
+ }
+ if (smc_ret.r0 == SMC_FC_FFA_MEM_FRAG_RX) {
+ cookie_low = smc_ret.r1;
+ cookie_high = smc_ret.r2;
+ dev_dbg(s->dev, "cookie %x %x", cookie_low,
+ cookie_high);
+ if (!count) {
+ /*
+ * We have sent all our descriptors. Expected
+ * SMC_FC_FFA_SUCCESS, not a request to send
+ * another fragment.
+ */
+ dev_err(s->dev, "%s: fragment_len %zd/%zd, unexpected SMC_FC_FFA_MEM_FRAG_RX\n",
+ __func__, fragment_len, total_len);
+ ret = -EIO;
+ break;
+ }
+ } else if (smc_ret.r0 == SMC_FC_FFA_SUCCESS) {
+ ffa_handle = smc_ret.r2 | (u64)smc_ret.r3 << 32;
+ dev_dbg(s->dev, "%s: fragment_len %zu/%zu, got handle 0x%llx\n",
+ __func__, fragment_len, total_len,
+ ffa_handle);
+ if (count) {
+ /*
+ * We have not sent all our descriptors.
+ * Expected SMC_FC_FFA_MEM_FRAG_RX not
+ * SMC_FC_FFA_SUCCESS.
+ */
+ dev_err(s->dev, "%s: fragment_len %zu/%zu, unexpected SMC_FC_FFA_SUCCESS, count %zu != 0\n",
+ __func__, fragment_len, total_len,
+ count);
+ ret = -EIO;
+ break;
+ }
+ } else {
+ dev_err(s->dev, "%s: fragment_len %zu/%zu, SMC_FC_FFA_MEM_SHARE failed 0x%lx 0x%lx 0x%lx",
+ __func__, fragment_len, total_len,
+ smc_ret.r0, smc_ret.r1, smc_ret.r2);
+ ret = -EIO;
+ break;
+ }
+
+ cons_mrd = s->ffa_tx;
+ cons_mrd_offset = 0;
+ }
+
+ mutex_unlock(&s->share_memory_msg_lock);
+
+ if (!ret) {
+ *id = ffa_handle;
+ dev_dbg(s->dev, "%s: done\n", __func__);
+ return 0;
+ }
+
+ dev_err(s->dev, "%s: failed %d", __func__, ret);
+
+err_encode_page_info:
+ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
+ return ret;
+}
+EXPORT_SYMBOL(trusty_share_memory);
+
+/*
+ * trusty_share_memory_compat - trusty_share_memory wrapper for old apis
+ *
+ * Call trusty_share_memory and filter out memory attributes if trusty version
+ * is old. Used by clients that used to pass just a physical address to trusty
+ * instead of a physical address plus memory attributes value.
+ */
+int trusty_share_memory_compat(struct device *dev, u64 *id,
+ struct scatterlist *sglist, unsigned int nents,
+ pgprot_t pgprot)
+{
+ int ret;
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ ret = trusty_share_memory(dev, id, sglist, nents, pgprot);
+ if (!ret && s->api_version < TRUSTY_API_VERSION_PHYS_MEM_OBJ)
+ *id &= 0x0000FFFFFFFFF000ull;
+
+ return ret;
+}
+EXPORT_SYMBOL(trusty_share_memory_compat);
+
+int trusty_reclaim_memory(struct device *dev, u64 id,
+ struct scatterlist *sglist, unsigned int nents)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+ int ret = 0;
+ struct smc_ret8 smc_ret;
+
+ if (WARN_ON(dev->driver != &trusty_driver.driver))
+ return -EINVAL;
+
+ if (WARN_ON(nents < 1))
+ return -EINVAL;
+
+ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) {
+ if (nents != 1) {
+ dev_err(s->dev, "%s: not supported\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+ dev_dbg(s->dev, "%s: done\n", __func__);
+ return 0;
+ }
+
+ mutex_lock(&s->share_memory_msg_lock);
+
+ smc_ret = trusty_smc8(SMC_FC_FFA_MEM_RECLAIM, (u32)id, id >> 32, 0, 0,
+ 0, 0, 0);
+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
+ dev_err(s->dev, "%s: SMC_FC_FFA_MEM_RECLAIM failed 0x%lx 0x%lx 0x%lx",
+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
+ if (smc_ret.r0 == SMC_FC_FFA_ERROR &&
+ smc_ret.r2 == FFA_ERROR_DENIED)
+ ret = -EBUSY;
+ else
+ ret = -EIO;
+ }
+
+ mutex_unlock(&s->share_memory_msg_lock);
+
+ if (ret != 0)
+ return ret;
+
+ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+ dev_dbg(s->dev, "%s: done\n", __func__);
+ return 0;
+}
+EXPORT_SYMBOL(trusty_reclaim_memory);
+
+int trusty_call_notifier_register(struct device *dev, struct notifier_block *n)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ return atomic_notifier_chain_register(&s->notifier, n);
+}
+EXPORT_SYMBOL(trusty_call_notifier_register);
+
+int trusty_call_notifier_unregister(struct device *dev,
+ struct notifier_block *n)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ return atomic_notifier_chain_unregister(&s->notifier, n);
+}
+EXPORT_SYMBOL(trusty_call_notifier_unregister);
+
+static int trusty_remove_child(struct device *dev, void *data)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+static ssize_t trusty_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", s->version_str ?: "unknown");
+}
+
+static DEVICE_ATTR(trusty_version, 0400, trusty_version_show, NULL);
+
+static struct attribute *trusty_attrs[] = {
+ &dev_attr_trusty_version.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(trusty);
+
+const char *trusty_version_str_get(struct device *dev)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ return s->version_str;
+}
+EXPORT_SYMBOL(trusty_version_str_get);
+
+static int trusty_init_msg_buf(struct trusty_state *s, struct device *dev)
+{
+ phys_addr_t tx_paddr;
+ phys_addr_t rx_paddr;
+ int ret;
+ struct smc_ret8 smc_ret;
+
+ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ)
+ return 0;
+
+ /* Get supported FF-A version and check if it is compatible */
+ smc_ret = trusty_smc8(SMC_FC_FFA_VERSION, FFA_CURRENT_VERSION, 0, 0,
+ 0, 0, 0, 0);
+ if (FFA_VERSION_TO_MAJOR(smc_ret.r0) != FFA_CURRENT_VERSION_MAJOR) {
+ dev_err(s->dev,
+ "%s: Unsupported FF-A version 0x%lx, expected 0x%x\n",
+ __func__, smc_ret.r0, FFA_CURRENT_VERSION);
+ ret = -EIO;
+ goto err_version;
+ }
+
+ /* Check that SMC_FC_FFA_MEM_SHARE is implemented */
+ smc_ret = trusty_smc8(SMC_FC_FFA_FEATURES, SMC_FC_FFA_MEM_SHARE, 0, 0,
+ 0, 0, 0, 0);
+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
+ dev_err(s->dev,
+ "%s: SMC_FC_FFA_FEATURES(SMC_FC_FFA_MEM_SHARE) failed 0x%lx 0x%lx 0x%lx\n",
+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
+ ret = -EIO;
+ goto err_features;
+ }
+
+ /*
+ * Set FF-A endpoint IDs.
+ *
+ * Hardcode 0x8000 for the secure os.
+ * TODO: Use FF-A call or device tree to configure this dynamically
+ */
+ smc_ret = trusty_smc8(SMC_FC_FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
+ dev_err(s->dev,
+ "%s: SMC_FC_FFA_ID_GET failed 0x%lx 0x%lx 0x%lx\n",
+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
+ ret = -EIO;
+ goto err_id_get;
+ }
+
+ s->ffa_local_id = smc_ret.r2;
+ s->ffa_remote_id = 0x8000;
+
+ s->ffa_tx = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!s->ffa_tx) {
+ ret = -ENOMEM;
+ goto err_alloc_tx;
+ }
+ tx_paddr = virt_to_phys(s->ffa_tx);
+ if (WARN_ON(tx_paddr & (PAGE_SIZE - 1))) {
+ ret = -EINVAL;
+ goto err_unaligned_tx_buf;
+ }
+
+ s->ffa_rx = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!s->ffa_rx) {
+ ret = -ENOMEM;
+ goto err_alloc_rx;
+ }
+ rx_paddr = virt_to_phys(s->ffa_rx);
+ if (WARN_ON(rx_paddr & (PAGE_SIZE - 1))) {
+ ret = -EINVAL;
+ goto err_unaligned_rx_buf;
+ }
+
+ smc_ret = trusty_smc8(SMC_FCZ_FFA_RXTX_MAP, tx_paddr, rx_paddr, 1, 0,
+ 0, 0, 0);
+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
+ dev_err(s->dev, "%s: SMC_FCZ_FFA_RXTX_MAP failed 0x%lx 0x%lx 0x%lx\n",
+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
+ ret = -EIO;
+ goto err_rxtx_map;
+ }
+
+ return 0;
+
+err_rxtx_map:
+err_unaligned_rx_buf:
+ kfree(s->ffa_rx);
+ s->ffa_rx = NULL;
+err_alloc_rx:
+err_unaligned_tx_buf:
+ kfree(s->ffa_tx);
+ s->ffa_tx = NULL;
+err_alloc_tx:
+err_id_get:
+err_features:
+err_version:
+ return ret;
+}
+
+static void trusty_free_msg_buf(struct trusty_state *s, struct device *dev)
+{
+ struct smc_ret8 smc_ret;
+
+ smc_ret = trusty_smc8(SMC_FC_FFA_RXTX_UNMAP, 0, 0, 0, 0, 0, 0, 0);
+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
+ dev_err(s->dev, "%s: SMC_FC_FFA_RXTX_UNMAP failed 0x%lx 0x%lx 0x%lx\n",
+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
+ } else {
+ kfree(s->ffa_rx);
+ kfree(s->ffa_tx);
+ }
+}
+
+static void trusty_init_version(struct trusty_state *s, struct device *dev)
+{
+ int ret;
+ int i;
+ int version_str_len;
+
+ ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, -1, 0, 0);
+ if (ret <= 0)
+ goto err_get_size;
+
+ version_str_len = ret;
+
+ s->version_str = kmalloc(version_str_len + 1, GFP_KERNEL);
+ for (i = 0; i < version_str_len; i++) {
+ ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, i, 0, 0);
+ if (ret < 0)
+ goto err_get_char;
+ s->version_str[i] = ret;
+ }
+ s->version_str[i] = '\0';
+
+ dev_info(dev, "trusty version: %s\n", s->version_str);
+ return;
+
+err_get_char:
+ kfree(s->version_str);
+ s->version_str = NULL;
+err_get_size:
+ dev_err(dev, "failed to get version: %d\n", ret);
+}
+
+u32 trusty_get_api_version(struct device *dev)
+{
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ return s->api_version;
+}
+EXPORT_SYMBOL(trusty_get_api_version);
+
+static int trusty_init_api_version(struct trusty_state *s, struct device *dev)
+{
+ u32 api_version;
+
+ api_version = trusty_fast_call32(dev, SMC_FC_API_VERSION,
+ TRUSTY_API_VERSION_CURRENT, 0, 0);
+ if (api_version == SM_ERR_UNDEFINED_SMC)
+ api_version = 0;
+
+ if (api_version > TRUSTY_API_VERSION_CURRENT) {
+ dev_err(dev, "unsupported api version %u > %u\n",
+ api_version, TRUSTY_API_VERSION_CURRENT);
+ return -EINVAL;
+ }
+
+ dev_info(dev, "selected api version: %u (requested %u)\n",
+ api_version, TRUSTY_API_VERSION_CURRENT);
+ s->api_version = api_version;
+
+ return 0;
+}
+
+static bool dequeue_nop(struct trusty_state *s, u32 *args)
+{
+ unsigned long flags;
+ struct trusty_nop *nop = NULL;
+
+ spin_lock_irqsave(&s->nop_lock, flags);
+ if (!list_empty(&s->nop_queue)) {
+ nop = list_first_entry(&s->nop_queue,
+ struct trusty_nop, node);
+ list_del_init(&nop->node);
+ args[0] = nop->args[0];
+ args[1] = nop->args[1];
+ args[2] = nop->args[2];
+ } else {
+ args[0] = 0;
+ args[1] = 0;
+ args[2] = 0;
+ }
+ spin_unlock_irqrestore(&s->nop_lock, flags);
+ return nop;
+}
+
+static void locked_nop_work_func(struct work_struct *work)
+{
+ int ret;
+ struct trusty_work *tw = container_of(work, struct trusty_work, work);
+ struct trusty_state *s = tw->ts;
+
+ ret = trusty_std_call32(s->dev, SMC_SC_LOCKED_NOP, 0, 0, 0);
+ if (ret != 0)
+ dev_err(s->dev, "%s: SMC_SC_LOCKED_NOP failed %d",
+ __func__, ret);
+
+ dev_dbg(s->dev, "%s: done\n", __func__);
+}
+
+static void nop_work_func(struct work_struct *work)
+{
+ int ret;
+ bool next;
+ u32 args[3];
+ u32 last_arg0;
+ struct trusty_work *tw = container_of(work, struct trusty_work, work);
+ struct trusty_state *s = tw->ts;
+
+ dequeue_nop(s, args);
+ do {
+ dev_dbg(s->dev, "%s: %x %x %x\n",
+ __func__, args[0], args[1], args[2]);
+
+ last_arg0 = args[0];
+ ret = trusty_std_call32(s->dev, SMC_SC_NOP,
+ args[0], args[1], args[2]);
+
+ next = dequeue_nop(s, args);
+
+ if (ret == SM_ERR_NOP_INTERRUPTED) {
+ next = true;
+ } else if (ret != SM_ERR_NOP_DONE) {
+ dev_err(s->dev, "%s: SMC_SC_NOP %x failed %d",
+ __func__, last_arg0, ret);
+ if (last_arg0) {
+ /*
+ * Don't break out of the loop if a non-default
+ * nop-handler returns an error.
+ */
+ next = true;
+ }
+ }
+ } while (next);
+
+ dev_dbg(s->dev, "%s: done\n", __func__);
+}
+
+void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop)
+{
+ unsigned long flags;
+ struct trusty_work *tw;
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ preempt_disable();
+ tw = this_cpu_ptr(s->nop_works);
+ if (nop) {
+ WARN_ON(s->api_version < TRUSTY_API_VERSION_SMP_NOP);
+
+ spin_lock_irqsave(&s->nop_lock, flags);
+ if (list_empty(&nop->node))
+ list_add_tail(&nop->node, &s->nop_queue);
+ spin_unlock_irqrestore(&s->nop_lock, flags);
+ }
+ queue_work(s->nop_wq, &tw->work);
+ preempt_enable();
+}
+EXPORT_SYMBOL(trusty_enqueue_nop);
+
+void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop)
+{
+ unsigned long flags;
+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+ if (WARN_ON(!nop))
+ return;
+
+ spin_lock_irqsave(&s->nop_lock, flags);
+ if (!list_empty(&nop->node))
+ list_del_init(&nop->node);
+ spin_unlock_irqrestore(&s->nop_lock, flags);
+}
+EXPORT_SYMBOL(trusty_dequeue_nop);
+
+static int trusty_probe(struct platform_device *pdev)
+{
+ int ret;
+ unsigned int cpu;
+ work_func_t work_func;
+ struct trusty_state *s;
+ struct device_node *node = pdev->dev.of_node;
+
+ if (!node) {
+ dev_err(&pdev->dev, "of_node required\n");
+ return -EINVAL;
+ }
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ ret = -ENOMEM;
+ goto err_allocate_state;
+ }
+
+ s->dev = &pdev->dev;
+ spin_lock_init(&s->nop_lock);
+ INIT_LIST_HEAD(&s->nop_queue);
+ mutex_init(&s->smc_lock);
+ mutex_init(&s->share_memory_msg_lock);
+ ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier);
+ init_completion(&s->cpu_idle_completion);
+
+ s->dev->dma_parms = &s->dma_parms;
+ dma_set_max_seg_size(s->dev, 0xfffff000); /* dma_parms limit */
+ /*
+ * Set dma mask to 48 bits. This is the current limit of
+ * trusty_encode_page_info.
+ */
+ dma_coerce_mask_and_coherent(s->dev, DMA_BIT_MASK(48));
+
+ platform_set_drvdata(pdev, s);
+
+ trusty_init_version(s, &pdev->dev);
+
+ ret = trusty_init_api_version(s, &pdev->dev);
+ if (ret < 0)
+ goto err_api_version;
+
+ ret = trusty_init_msg_buf(s, &pdev->dev);
+ if (ret < 0)
+ goto err_init_msg_buf;
+
+ s->nop_wq = alloc_workqueue("trusty-nop-wq", WQ_CPU_INTENSIVE, 0);
+ if (!s->nop_wq) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "Failed create trusty-nop-wq\n");
+ goto err_create_nop_wq;
+ }
+
+ s->nop_works = alloc_percpu(struct trusty_work);
+ if (!s->nop_works) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate works\n");
+ goto err_alloc_works;
+ }
+
+ if (s->api_version < TRUSTY_API_VERSION_SMP)
+ work_func = locked_nop_work_func;
+ else
+ work_func = nop_work_func;
+
+ for_each_possible_cpu(cpu) {
+ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
+
+ tw->ts = s;
+ INIT_WORK(&tw->work, work_func);
+ }
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add children: %d\n", ret);
+ goto err_add_children;
+ }
+
+ return 0;
+
+err_add_children:
+ for_each_possible_cpu(cpu) {
+ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
+
+ flush_work(&tw->work);
+ }
+ free_percpu(s->nop_works);
+err_alloc_works:
+ destroy_workqueue(s->nop_wq);
+err_create_nop_wq:
+ trusty_free_msg_buf(s, &pdev->dev);
+err_init_msg_buf:
+err_api_version:
+ s->dev->dma_parms = NULL;
+ kfree(s->version_str);
+ device_for_each_child(&pdev->dev, NULL, trusty_remove_child);
+ mutex_destroy(&s->share_memory_msg_lock);
+ mutex_destroy(&s->smc_lock);
+ kfree(s);
+err_allocate_state:
+ return ret;
+}
+
+static int trusty_remove(struct platform_device *pdev)
+{
+ unsigned int cpu;
+ struct trusty_state *s = platform_get_drvdata(pdev);
+
+ device_for_each_child(&pdev->dev, NULL, trusty_remove_child);
+
+ for_each_possible_cpu(cpu) {
+ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
+
+ flush_work(&tw->work);
+ }
+ free_percpu(s->nop_works);
+ destroy_workqueue(s->nop_wq);
+
+ mutex_destroy(&s->share_memory_msg_lock);
+ mutex_destroy(&s->smc_lock);
+ trusty_free_msg_buf(s, &pdev->dev);
+ s->dev->dma_parms = NULL;
+ kfree(s->version_str);
+ kfree(s);
+ return 0;
+}
+
+static const struct of_device_id trusty_of_match[] = {
+ { .compatible = "android,trusty-smc-v1", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(trusty, trusty_of_match);
+
+static struct platform_driver trusty_driver = {
+ .probe = trusty_probe,
+ .remove = trusty_remove,
+ .driver = {
+ .name = "trusty",
+ .of_match_table = trusty_of_match,
+ .dev_groups = trusty_groups,
+ },
+};
+
+static int __init trusty_driver_init(void)
+{
+ return platform_driver_register(&trusty_driver);
+}
+
+static void __exit trusty_driver_exit(void)
+{
+ platform_driver_unregister(&trusty_driver);
+}
+
+subsys_initcall(trusty_driver_init);
+module_exit(trusty_driver_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Trusty core driver");
diff --git a/include/linux/trusty/arm_ffa.h b/include/linux/trusty/arm_ffa.h
new file mode 100644
index 0000000..ab7b2af
--- /dev/null
+++ b/include/linux/trusty/arm_ffa.h
@@ -0,0 +1,590 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Google, Inc.
+ *
+ * Trusty and TF-A also have a copy of this header.
+ * Please keep the copies in sync.
+ */
+#ifndef __LINUX_TRUSTY_ARM_FFA_H
+#define __LINUX_TRUSTY_ARM_FFA_H
+
+/*
+ * Subset of Arm PSA Firmware Framework for Arm v8-A 1.0 EAC 1_0
+ * (https://developer.arm.com/docs/den0077/a) needed for shared memory.
+ */
+
+#include "smcall.h"
+
+#ifndef STATIC_ASSERT
+#define STATIC_ASSERT(e) _Static_assert(e, #e)
+#endif
+
+#define FFA_CURRENT_VERSION_MAJOR (1U)
+#define FFA_CURRENT_VERSION_MINOR (0U)
+
+#define FFA_VERSION_TO_MAJOR(version) ((version) >> 16)
+#define FFA_VERSION_TO_MINOR(version) ((version) & (0xffff))
+#define FFA_VERSION(major, minor) (((major) << 16) | (minor))
+#define FFA_CURRENT_VERSION \
+ FFA_VERSION(FFA_CURRENT_VERSION_MAJOR, FFA_CURRENT_VERSION_MINOR)
+
+#define SMC_ENTITY_SHARED_MEMORY 4
+
+#define SMC_FASTCALL_NR_SHARED_MEMORY(nr) \
+ SMC_FASTCALL_NR(SMC_ENTITY_SHARED_MEMORY, nr)
+#define SMC_FASTCALL64_NR_SHARED_MEMORY(nr) \
+ SMC_FASTCALL64_NR(SMC_ENTITY_SHARED_MEMORY, nr)
+
+/**
+ * typedef ffa_endpoint_id16_t - Endpoint ID
+ *
+ * Current implementation only supports VMIDs. FFA spec also support stream
+ * endpoint ids.
+ */
+typedef uint16_t ffa_endpoint_id16_t;
+
+/**
+ * struct ffa_cons_mrd - Constituent memory region descriptor
+ * @address:
+ * Start address of contiguous memory region. Must be 4K page aligned.
+ * @page_count:
+ * Number of 4K pages in region.
+ * @reserved_12_15:
+ * Reserve bytes 12-15 to pad struct size to 16 bytes.
+ */
+struct ffa_cons_mrd {
+ uint64_t address;
+ uint32_t page_count;
+ uint32_t reserved_12_15;
+};
+STATIC_ASSERT(sizeof(struct ffa_cons_mrd) == 16);
+
+/**
+ * struct ffa_comp_mrd - Composite memory region descriptor
+ * @total_page_count:
+ * Number of 4k pages in memory region. Must match sum of
+ * @address_range_array[].page_count.
+ * @address_range_count:
+ * Number of entries in @address_range_array.
+ * @reserved_8_15:
+ * Reserve bytes 8-15 to pad struct size to 16 byte alignment and
+ * make @address_range_array 16 byte aligned.
+ * @address_range_array:
+ * Array of &struct ffa_cons_mrd entries.
+ */
+struct ffa_comp_mrd {
+ uint32_t total_page_count;
+ uint32_t address_range_count;
+ uint64_t reserved_8_15;
+ struct ffa_cons_mrd address_range_array[];
+};
+STATIC_ASSERT(sizeof(struct ffa_comp_mrd) == 16);
+
+/**
+ * typedef ffa_mem_attr8_t - Memory region attributes
+ *
+ * * @FFA_MEM_ATTR_DEVICE_NGNRNE:
+ * Device-nGnRnE.
+ * * @FFA_MEM_ATTR_DEVICE_NGNRE:
+ * Device-nGnRE.
+ * * @FFA_MEM_ATTR_DEVICE_NGRE:
+ * Device-nGRE.
+ * * @FFA_MEM_ATTR_DEVICE_GRE:
+ * Device-GRE.
+ * * @FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED
+ * Normal memory. Non-cacheable.
+ * * @FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB
+ * Normal memory. Write-back cached.
+ * * @FFA_MEM_ATTR_NON_SHAREABLE
+ * Non-shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
+ * * @FFA_MEM_ATTR_OUTER_SHAREABLE
+ * Outer Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
+ * * @FFA_MEM_ATTR_INNER_SHAREABLE
+ * Inner Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
+ */
+typedef uint8_t ffa_mem_attr8_t;
+#define FFA_MEM_ATTR_DEVICE_NGNRNE ((1U << 4) | (0x0U << 2))
+#define FFA_MEM_ATTR_DEVICE_NGNRE ((1U << 4) | (0x1U << 2))
+#define FFA_MEM_ATTR_DEVICE_NGRE ((1U << 4) | (0x2U << 2))
+#define FFA_MEM_ATTR_DEVICE_GRE ((1U << 4) | (0x3U << 2))
+#define FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED ((2U << 4) | (0x1U << 2))
+#define FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB ((2U << 4) | (0x3U << 2))
+#define FFA_MEM_ATTR_NON_SHAREABLE (0x0U << 0)
+#define FFA_MEM_ATTR_OUTER_SHAREABLE (0x2U << 0)
+#define FFA_MEM_ATTR_INNER_SHAREABLE (0x3U << 0)
+
+/**
+ * typedef ffa_mem_perm8_t - Memory access permissions
+ *
+ * * @FFA_MEM_ATTR_RO
+ * Request or specify read-only mapping.
+ * * @FFA_MEM_ATTR_RW
+ * Request or allow read-write mapping.
+ * * @FFA_MEM_PERM_NX
+ * Deny executable mapping.
+ * * @FFA_MEM_PERM_X
+ * Request executable mapping.
+ */
+typedef uint8_t ffa_mem_perm8_t;
+#define FFA_MEM_PERM_RO (1U << 0)
+#define FFA_MEM_PERM_RW (1U << 1)
+#define FFA_MEM_PERM_NX (1U << 2)
+#define FFA_MEM_PERM_X (1U << 3)
+
+/**
+ * typedef ffa_mem_flag8_t - Endpoint memory flags
+ *
+ * * @FFA_MEM_FLAG_OTHER
+ * Other borrower. Memory region must not be or was not retrieved on behalf
+ * of this endpoint.
+ */
+typedef uint8_t ffa_mem_flag8_t;
+#define FFA_MEM_FLAG_OTHER (1U << 0)
+
+/**
+ * typedef ffa_mtd_flag32_t - Memory transaction descriptor flags
+ *
+ * * @FFA_MTD_FLAG_ZERO_MEMORY
+ * Zero memory after unmapping from sender (must be 0 for share).
+ * * @FFA_MTD_FLAG_TIME_SLICING
+ * Not supported by this implementation.
+ * * @FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH
+ * Zero memory after unmapping from borrowers (must be 0 for share).
+ * * @FFA_MTD_FLAG_TYPE_MASK
+ * Bit-mask to extract memory management transaction type from flags.
+ * * @FFA_MTD_FLAG_TYPE_SHARE_MEMORY
+ * Share memory transaction flag.
+ * Used by @SMC_FC_FFA_MEM_RETRIEVE_RESP to indicate that memory came from
+ * @SMC_FC_FFA_MEM_SHARE and by @SMC_FC_FFA_MEM_RETRIEVE_REQ to specify that
+ * it must have.
+ * * @FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK
+ * Not supported by this implementation.
+ */
+typedef uint32_t ffa_mtd_flag32_t;
+#define FFA_MTD_FLAG_ZERO_MEMORY (1U << 0)
+#define FFA_MTD_FLAG_TIME_SLICING (1U << 1)
+#define FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH (1U << 2)
+#define FFA_MTD_FLAG_TYPE_MASK (3U << 3)
+#define FFA_MTD_FLAG_TYPE_SHARE_MEMORY (1U << 3)
+#define FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK (0x1FU << 5)
+
+/**
+ * struct ffa_mapd - Memory access permissions descriptor
+ * @endpoint_id:
+ * Endpoint id that @memory_access_permissions and @flags apply to.
+ * (&typedef ffa_endpoint_id16_t).
+ * @memory_access_permissions:
+ * FFA_MEM_PERM_* values or'ed together (&typedef ffa_mem_perm8_t).
+ * @flags:
+ * FFA_MEM_FLAG_* values or'ed together (&typedef ffa_mem_flag8_t).
+ */
+struct ffa_mapd {
+ ffa_endpoint_id16_t endpoint_id;
+ ffa_mem_perm8_t memory_access_permissions;
+ ffa_mem_flag8_t flags;
+};
+STATIC_ASSERT(sizeof(struct ffa_mapd) == 4);
+
+/**
+ * struct ffa_emad - Endpoint memory access descriptor.
+ * @mapd: &struct ffa_mapd.
+ * @comp_mrd_offset:
+ * Offset of &struct ffa_comp_mrd form start of &struct ffa_mtd.
+ * @reserved_8_15:
+ * Reserved bytes 8-15. Must be 0.
+ */
+struct ffa_emad {
+ struct ffa_mapd mapd;
+ uint32_t comp_mrd_offset;
+ uint64_t reserved_8_15;
+};
+STATIC_ASSERT(sizeof(struct ffa_emad) == 16);
+
+/**
+ * struct ffa_mtd - Memory transaction descriptor.
+ * @sender_id:
+ * Sender endpoint id.
+ * @memory_region_attributes:
+ * FFA_MEM_ATTR_* values or'ed together (&typedef ffa_mem_attr8_t).
+ * @reserved_3:
+ * Reserved bytes 3. Must be 0.
+ * @flags:
+ * FFA_MTD_FLAG_* values or'ed together (&typedef ffa_mtd_flag32_t).
+ * @handle:
+ * Id of shared memory object. Most be 0 for MEM_SHARE.
+ * @tag: Client allocated tag. Must match original value.
+ * @reserved_24_27:
+ * Reserved bytes 24-27. Must be 0.
+ * @emad_count:
+ * Number of entries in @emad. Must be 1 in current implementation.
+ * FFA spec allows more entries.
+ * @emad:
+ * Endpoint memory access descriptor array (see @struct ffa_emad).
+ */
+struct ffa_mtd {
+ ffa_endpoint_id16_t sender_id;
+ ffa_mem_attr8_t memory_region_attributes;
+ uint8_t reserved_3;
+ ffa_mtd_flag32_t flags;
+ uint64_t handle;
+ uint64_t tag;
+ uint32_t reserved_24_27;
+ uint32_t emad_count;
+ struct ffa_emad emad[];
+};
+STATIC_ASSERT(sizeof(struct ffa_mtd) == 32);
+
+/**
+ * struct ffa_mem_relinquish_descriptor - Relinquish request descriptor.
+ * @handle:
+ * Id of shared memory object to relinquish.
+ * @flags:
+ * If bit 0 is set clear memory after unmapping from borrower. Must be 0
+ * for share. Bit[1]: Time slicing. Not supported, must be 0. All other
+ * bits are reserved 0.
+ * @endpoint_count:
+ * Number of entries in @endpoint_array.
+ * @endpoint_array:
+ * Array of endpoint ids.
+ */
+struct ffa_mem_relinquish_descriptor {
+ uint64_t handle;
+ uint32_t flags;
+ uint32_t endpoint_count;
+ ffa_endpoint_id16_t endpoint_array[];
+};
+STATIC_ASSERT(sizeof(struct ffa_mem_relinquish_descriptor) == 16);
+
+/**
+ * enum ffa_error - FF-A error code
+ * @FFA_ERROR_NOT_SUPPORTED:
+ * Operation contained possibly valid parameters not supported by the
+ * current implementation. Does not match FF-A 1.0 EAC 1_0 definition.
+ * @FFA_ERROR_INVALID_PARAMETERS:
+ * Invalid parameters. Conditions function specific.
+ * @FFA_ERROR_NO_MEMORY:
+ * Not enough memory.
+ * @FFA_ERROR_DENIED:
+ * Operation not allowed. Conditions function specific.
+ *
+ * FF-A 1.0 EAC 1_0 defines other error codes as well but the current
+ * implementation does not use them.
+ */
+enum ffa_error {
+ FFA_ERROR_NOT_SUPPORTED = -1,
+ FFA_ERROR_INVALID_PARAMETERS = -2,
+ FFA_ERROR_NO_MEMORY = -3,
+ FFA_ERROR_DENIED = -6,
+};
+
+/**
+ * SMC_FC32_FFA_MIN - First 32 bit SMC opcode reserved for FFA
+ */
+#define SMC_FC32_FFA_MIN SMC_FASTCALL_NR_SHARED_MEMORY(0x60)
+
+/**
+ * SMC_FC32_FFA_MAX - Last 32 bit SMC opcode reserved for FFA
+ */
+#define SMC_FC32_FFA_MAX SMC_FASTCALL_NR_SHARED_MEMORY(0x7F)
+
+/**
+ * SMC_FC64_FFA_MIN - First 64 bit SMC opcode reserved for FFA
+ */
+#define SMC_FC64_FFA_MIN SMC_FASTCALL64_NR_SHARED_MEMORY(0x60)
+
+/**
+ * SMC_FC64_FFA_MAX - Last 64 bit SMC opcode reserved for FFA
+ */
+#define SMC_FC64_FFA_MAX SMC_FASTCALL64_NR_SHARED_MEMORY(0x7F)
+
+/**
+ * SMC_FC_FFA_ERROR - SMC error return opcode
+ *
+ * Register arguments:
+ *
+ * * w1: VMID in [31:16], vCPU in [15:0]
+ * * w2: Error code (&enum ffa_error)
+ */
+#define SMC_FC_FFA_ERROR SMC_FASTCALL_NR_SHARED_MEMORY(0x60)
+
+/**
+ * SMC_FC_FFA_SUCCESS - 32 bit SMC success return opcode
+ *
+ * Register arguments:
+ *
+ * * w1: VMID in [31:16], vCPU in [15:0]
+ * * w2-w7: Function specific
+ */
+#define SMC_FC_FFA_SUCCESS SMC_FASTCALL_NR_SHARED_MEMORY(0x61)
+
+/**
+ * SMC_FC64_FFA_SUCCESS - 64 bit SMC success return opcode
+ *
+ * Register arguments:
+ *
+ * * w1: VMID in [31:16], vCPU in [15:0]
+ * * w2/x2-w7/x7: Function specific
+ */
+#define SMC_FC64_FFA_SUCCESS SMC_FASTCALL64_NR_SHARED_MEMORY(0x61)
+
+/**
+ * SMC_FC_FFA_VERSION - SMC opcode to return supported FF-A version
+ *
+ * Register arguments:
+ *
+ * * w1: Major version bit[30:16] and minor version in bit[15:0] supported
+ * by caller. Bit[31] must be 0.
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_SUCCESS
+ * * w2: Major version bit[30:16], minor version in bit[15:0], bit[31] must
+ * be 0.
+ *
+ * or
+ *
+ * * w0: SMC_FC_FFA_ERROR
+ * * w2: FFA_ERROR_NOT_SUPPORTED if major version passed in is less than the
+ * minimum major version supported.
+ */
+#define SMC_FC_FFA_VERSION SMC_FASTCALL_NR_SHARED_MEMORY(0x63)
+
+/**
+ * SMC_FC_FFA_FEATURES - SMC opcode to check optional feature support
+ *
+ * Register arguments:
+ *
+ * * w1: FF-A function ID
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_SUCCESS
+ * * w2: Bit[0]: Supports custom buffers for memory transactions.
+ * Bit[1:0]: For RXTX_MAP min buffer size and alignment boundary.
+ * Other bits must be 0.
+ * * w3: For FFA_MEM_RETRIEVE_REQ, bit[7-0]: Number of times receiver can
+ * retrieve each memory region before relinquishing it specified as
+ * ((1U << (value + 1)) - 1 (or value = bits in reference count - 1).
+ * For all other bits and commands: must be 0.
+ * or
+ *
+ * * w0: SMC_FC_FFA_ERROR
+ * * w2: FFA_ERROR_NOT_SUPPORTED if function is not implemented, or
+ * FFA_ERROR_INVALID_PARAMETERS if function id is not valid.
+ */
+#define SMC_FC_FFA_FEATURES SMC_FASTCALL_NR_SHARED_MEMORY(0x64)
+
+/**
+ * SMC_FC_FFA_RXTX_MAP - 32 bit SMC opcode to map message buffers
+ *
+ * Register arguments:
+ *
+ * * w1: TX address
+ * * w2: RX address
+ * * w3: RX/TX page count in bit[5:0]
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC_FFA_RXTX_MAP SMC_FASTCALL_NR_SHARED_MEMORY(0x66)
+
+/**
+ * SMC_FC64_FFA_RXTX_MAP - 64 bit SMC opcode to map message buffers
+ *
+ * Register arguments:
+ *
+ * * x1: TX address
+ * * x2: RX address
+ * * x3: RX/TX page count in bit[5:0]
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC64_FFA_RXTX_MAP SMC_FASTCALL64_NR_SHARED_MEMORY(0x66)
+#ifdef CONFIG_64BIT
+#define SMC_FCZ_FFA_RXTX_MAP SMC_FC64_FFA_RXTX_MAP
+#else
+#define SMC_FCZ_FFA_RXTX_MAP SMC_FC_FFA_RXTX_MAP
+#endif
+
+/**
+ * SMC_FC_FFA_RXTX_UNMAP - SMC opcode to unmap message buffers
+ *
+ * Register arguments:
+ *
+ * * w1: ID in [31:16]
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC_FFA_RXTX_UNMAP SMC_FASTCALL_NR_SHARED_MEMORY(0x67)
+
+/**
+ * SMC_FC_FFA_ID_GET - SMC opcode to get endpoint id of caller
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_SUCCESS
+ * * w2: ID in bit[15:0], bit[31:16] must be 0.
+ */
+#define SMC_FC_FFA_ID_GET SMC_FASTCALL_NR_SHARED_MEMORY(0x69)
+
+/**
+ * SMC_FC_FFA_MEM_DONATE - 32 bit SMC opcode to donate memory
+ *
+ * Not supported.
+ */
+#define SMC_FC_FFA_MEM_DONATE SMC_FASTCALL_NR_SHARED_MEMORY(0x71)
+
+/**
+ * SMC_FC_FFA_MEM_LEND - 32 bit SMC opcode to lend memory
+ *
+ * Not currently supported.
+ */
+#define SMC_FC_FFA_MEM_LEND SMC_FASTCALL_NR_SHARED_MEMORY(0x72)
+
+/**
+ * SMC_FC_FFA_MEM_SHARE - 32 bit SMC opcode to share memory
+ *
+ * Register arguments:
+ *
+ * * w1: Total length
+ * * w2: Fragment length
+ * * w3: Address
+ * * w4: Page count
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_SUCCESS
+ * * w2/w3: Handle
+ *
+ * or
+ *
+ * * w0: &SMC_FC_FFA_MEM_FRAG_RX
+ * * w1-: See &SMC_FC_FFA_MEM_FRAG_RX
+ *
+ * or
+ *
+ * * w0: SMC_FC_FFA_ERROR
+ * * w2: Error code (&enum ffa_error)
+ */
+#define SMC_FC_FFA_MEM_SHARE SMC_FASTCALL_NR_SHARED_MEMORY(0x73)
+
+/**
+ * SMC_FC64_FFA_MEM_SHARE - 64 bit SMC opcode to share memory
+ *
+ * Register arguments:
+ *
+ * * w1: Total length
+ * * w2: Fragment length
+ * * x3: Address
+ * * w4: Page count
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_SUCCESS
+ * * w2/w3: Handle
+ *
+ * or
+ *
+ * * w0: &SMC_FC_FFA_MEM_FRAG_RX
+ * * w1-: See &SMC_FC_FFA_MEM_FRAG_RX
+ *
+ * or
+ *
+ * * w0: SMC_FC_FFA_ERROR
+ * * w2: Error code (&enum ffa_error)
+ */
+#define SMC_FC64_FFA_MEM_SHARE SMC_FASTCALL64_NR_SHARED_MEMORY(0x73)
+
+/**
+ * SMC_FC_FFA_MEM_RETRIEVE_REQ - 32 bit SMC opcode to retrieve shared memory
+ *
+ * Register arguments:
+ *
+ * * w1: Total length
+ * * w2: Fragment length
+ * * w3: Address
+ * * w4: Page count
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_MEM_RETRIEVE_RESP
+ * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_RETRIEVE_RESP
+ */
+#define SMC_FC_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL_NR_SHARED_MEMORY(0x74)
+
+/**
+ * SMC_FC64_FFA_MEM_RETRIEVE_REQ - 64 bit SMC opcode to retrieve shared memory
+ *
+ * Register arguments:
+ *
+ * * w1: Total length
+ * * w2: Fragment length
+ * * x3: Address
+ * * w4: Page count
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_MEM_RETRIEVE_RESP
+ * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_RETRIEVE_RESP
+ */
+#define SMC_FC64_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL64_NR_SHARED_MEMORY(0x74)
+
+/**
+ * SMC_FC_FFA_MEM_RETRIEVE_RESP - Retrieve 32 bit SMC return opcode
+ *
+ * Register arguments:
+ *
+ * * w1: Total length
+ * * w2: Fragment length
+ */
+#define SMC_FC_FFA_MEM_RETRIEVE_RESP SMC_FASTCALL_NR_SHARED_MEMORY(0x75)
+
+/**
+ * SMC_FC_FFA_MEM_RELINQUISH - SMC opcode to relinquish shared memory
+ *
+ * Input in &struct ffa_mem_relinquish_descriptor format in message buffer.
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC_FFA_MEM_RELINQUISH SMC_FASTCALL_NR_SHARED_MEMORY(0x76)
+
+/**
+ * SMC_FC_FFA_MEM_RECLAIM - SMC opcode to reclaim shared memory
+ *
+ * Register arguments:
+ *
+ * * w1/w2: Handle
+ * * w3: Flags
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC_FFA_MEM_RECLAIM SMC_FASTCALL_NR_SHARED_MEMORY(0x77)
+
+/**
+ * SMC_FC_FFA_MEM_FRAG_RX - SMC opcode to request next fragment.
+ *
+ * Register arguments:
+ *
+ * * w1/w2: Cookie
+ * * w3: Fragment offset.
+ * * w4: Endpoint id ID in [31:16], if client is hypervisor.
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_MEM_FRAG_TX
+ * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_FRAG_TX
+ */
+#define SMC_FC_FFA_MEM_FRAG_RX SMC_FASTCALL_NR_SHARED_MEMORY(0x7A)
+
+/**
+ * SMC_FC_FFA_MEM_FRAG_TX - SMC opcode to transmit next fragment
+ *
+ * Register arguments:
+ *
+ * * w1/w2: Cookie
+ * * w3: Fragment length.
+ * * w4: Sender endpoint id ID in [31:16], if client is hypervisor.
+ *
+ * Return:
+ * * w0: &SMC_FC_FFA_MEM_FRAG_RX or &SMC_FC_FFA_SUCCESS.
+ * * w1/x1-w5/x5: See opcode in w0.
+ */
+#define SMC_FC_FFA_MEM_FRAG_TX SMC_FASTCALL_NR_SHARED_MEMORY(0x7B)
+
+#endif /* __LINUX_TRUSTY_ARM_FFA_H */
diff --git a/include/linux/trusty/sm_err.h b/include/linux/trusty/sm_err.h
new file mode 100644
index 0000000..f650444
--- /dev/null
+++ b/include/linux/trusty/sm_err.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2013 Google Inc. All rights reserved
+ *
+ * Trusty and TF-A also have a copy of this header.
+ * Please keep the copies in sync.
+ */
+#ifndef __LINUX_TRUSTY_SM_ERR_H
+#define __LINUX_TRUSTY_SM_ERR_H
+
+/* Errors from the secure monitor */
+#define SM_ERR_UNDEFINED_SMC 0xFFFFFFFF /* Unknown SMC (defined by ARM DEN 0028A(0.9.0) */
+#define SM_ERR_INVALID_PARAMETERS -2
+#define SM_ERR_INTERRUPTED -3 /* Got interrupted. Call back with restart SMC */
+#define SM_ERR_UNEXPECTED_RESTART -4 /* Got an restart SMC when we didn't expect it */
+#define SM_ERR_BUSY -5 /* Temporarily busy. Call back with original args */
+#define SM_ERR_INTERLEAVED_SMC -6 /* Got a trusted_service SMC when a restart SMC is required */
+#define SM_ERR_INTERNAL_FAILURE -7 /* Unknown error */
+#define SM_ERR_NOT_SUPPORTED -8
+#define SM_ERR_NOT_ALLOWED -9 /* SMC call not allowed */
+#define SM_ERR_END_OF_INPUT -10
+#define SM_ERR_PANIC -11 /* Secure OS crashed */
+#define SM_ERR_FIQ_INTERRUPTED -12 /* Got interrupted by FIQ. Call back with SMC_SC_RESTART_FIQ on same CPU */
+#define SM_ERR_CPU_IDLE -13 /* SMC call waiting for another CPU */
+#define SM_ERR_NOP_INTERRUPTED -14 /* Got interrupted. Call back with new SMC_SC_NOP */
+#define SM_ERR_NOP_DONE -15 /* Cpu idle after SMC_SC_NOP (not an error) */
+
+#endif
diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h
new file mode 100644
index 0000000..2f69eb3
--- /dev/null
+++ b/include/linux/trusty/smcall.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2013-2014 Google Inc. All rights reserved
+ *
+ * Trusty and TF-A also have a copy of this header.
+ * Please keep the copies in sync.
+ */
+#ifndef __LINUX_TRUSTY_SMCALL_H
+#define __LINUX_TRUSTY_SMCALL_H
+
+#define SMC_NUM_ENTITIES 64
+#define SMC_NUM_ARGS 4
+#define SMC_NUM_PARAMS (SMC_NUM_ARGS - 1)
+
+#define SMC_IS_FASTCALL(smc_nr) ((smc_nr) & 0x80000000)
+#define SMC_IS_SMC64(smc_nr) ((smc_nr) & 0x40000000)
+#define SMC_ENTITY(smc_nr) (((smc_nr) & 0x3F000000) >> 24)
+#define SMC_FUNCTION(smc_nr) ((smc_nr) & 0x0000FFFF)
+
+#define SMC_NR(entity, fn, fastcall, smc64) ((((fastcall) & 0x1U) << 31) | \
+ (((smc64) & 0x1U) << 30) | \
+ (((entity) & 0x3FU) << 24) | \
+ ((fn) & 0xFFFFU) \
+ )
+
+#define SMC_FASTCALL_NR(entity, fn) SMC_NR((entity), (fn), 1, 0)
+#define SMC_STDCALL_NR(entity, fn) SMC_NR((entity), (fn), 0, 0)
+#define SMC_FASTCALL64_NR(entity, fn) SMC_NR((entity), (fn), 1, 1)
+#define SMC_STDCALL64_NR(entity, fn) SMC_NR((entity), (fn), 0, 1)
+
+#define SMC_ENTITY_ARCH 0 /* ARM Architecture calls */
+#define SMC_ENTITY_CPU 1 /* CPU Service calls */
+#define SMC_ENTITY_SIP 2 /* SIP Service calls */
+#define SMC_ENTITY_OEM 3 /* OEM Service calls */
+#define SMC_ENTITY_STD 4 /* Standard Service calls */
+#define SMC_ENTITY_RESERVED 5 /* Reserved for future use */
+#define SMC_ENTITY_TRUSTED_APP 48 /* Trusted Application calls */
+#define SMC_ENTITY_TRUSTED_OS 50 /* Trusted OS calls */
+#define SMC_ENTITY_SECURE_MONITOR 60 /* Trusted OS calls internal to secure monitor */
+
+/* FC = Fast call, SC = Standard call */
+#define SMC_SC_RESTART_LAST SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0)
+#define SMC_SC_LOCKED_NOP SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 1)
+
+/**
+ * SMC_SC_RESTART_FIQ - Re-enter trusty after it was interrupted by an fiq
+ *
+ * No arguments, no return value.
+ *
+ * Re-enter trusty after returning to ns to process an fiq. Must be called iff
+ * trusty returns SM_ERR_FIQ_INTERRUPTED.
+ *
+ * Enable by selecting api version TRUSTY_API_VERSION_RESTART_FIQ (1) or later.
+ */
+#define SMC_SC_RESTART_FIQ SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 2)
+
+/**
+ * SMC_SC_NOP - Enter trusty to run pending work.
+ *
+ * No arguments.
+ *
+ * Returns SM_ERR_NOP_INTERRUPTED or SM_ERR_NOP_DONE.
+ * If SM_ERR_NOP_INTERRUPTED is returned, the call must be repeated.
+ *
+ * Enable by selecting api version TRUSTY_API_VERSION_SMP (2) or later.
+ */
+#define SMC_SC_NOP SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 3)
+
+/*
+ * Return from secure os to non-secure os with return value in r1
+ */
+#define SMC_SC_NS_RETURN SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0)
+
+#define SMC_FC_RESERVED SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0)
+#define SMC_FC_FIQ_EXIT SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 1)
+#define SMC_FC_REQUEST_FIQ SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 2)
+#define SMC_FC_GET_NEXT_IRQ SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 3)
+
+#define SMC_FC_CPU_SUSPEND SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 7)
+#define SMC_FC_CPU_RESUME SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 8)
+
+#define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 9)
+#define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 10)
+
+/**
+ * SMC_FC_API_VERSION - Find and select supported API version.
+ *
+ * @r1: Version supported by client.
+ *
+ * Returns version supported by trusty.
+ *
+ * If multiple versions are supported, the client should start by calling
+ * SMC_FC_API_VERSION with the largest version it supports. Trusty will then
+ * return a version it supports. If the client does not support the version
+ * returned by trusty and the version returned is less than the version
+ * requested, repeat the call with the largest supported version less than the
+ * last returned version.
+ *
+ * This call must be made before any calls that are affected by the api version.
+ */
+#define TRUSTY_API_VERSION_RESTART_FIQ (1)
+#define TRUSTY_API_VERSION_SMP (2)
+#define TRUSTY_API_VERSION_SMP_NOP (3)
+#define TRUSTY_API_VERSION_PHYS_MEM_OBJ (4)
+#define TRUSTY_API_VERSION_MEM_OBJ (5)
+#define TRUSTY_API_VERSION_CURRENT (5)
+#define SMC_FC_API_VERSION SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 11)
+
+#endif /* __LINUX_TRUSTY_SMCALL_H */
diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h
new file mode 100644
index 0000000..4cfd873
--- /dev/null
+++ b/include/linux/trusty/trusty.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Google, Inc.
+ */
+#ifndef __LINUX_TRUSTY_TRUSTY_H
+#define __LINUX_TRUSTY_TRUSTY_H
+
+#include <linux/kernel.h>
+#include <linux/trusty/sm_err.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/pagemap.h>
+
+
+#if IS_ENABLED(CONFIG_TRUSTY)
+s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2);
+s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2);
+#ifdef CONFIG_64BIT
+s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2);
+#endif
+#else
+static inline s32 trusty_std_call32(struct device *dev, u32 smcnr,
+ u32 a0, u32 a1, u32 a2)
+{
+ return SM_ERR_UNDEFINED_SMC;
+}
+static inline s32 trusty_fast_call32(struct device *dev, u32 smcnr,
+ u32 a0, u32 a1, u32 a2)
+{
+ return SM_ERR_UNDEFINED_SMC;
+}
+#ifdef CONFIG_64BIT
+static inline s64 trusty_fast_call64(struct device *dev,
+ u64 smcnr, u64 a0, u64 a1, u64 a2)
+{
+ return SM_ERR_UNDEFINED_SMC;
+}
+#endif
+#endif
+
+struct notifier_block;
+enum {
+ TRUSTY_CALL_PREPARE,
+ TRUSTY_CALL_RETURNED,
+};
+int trusty_call_notifier_register(struct device *dev,
+ struct notifier_block *n);
+int trusty_call_notifier_unregister(struct device *dev,
+ struct notifier_block *n);
+const char *trusty_version_str_get(struct device *dev);
+u32 trusty_get_api_version(struct device *dev);
+
+struct ns_mem_page_info {
+ u64 paddr;
+ u8 ffa_mem_attr;
+ u8 ffa_mem_perm;
+ u64 compat_attr;
+};
+
+int trusty_encode_page_info(struct ns_mem_page_info *inf,
+ struct page *page, pgprot_t pgprot);
+
+struct scatterlist;
+typedef u64 trusty_shared_mem_id_t;
+int trusty_share_memory(struct device *dev, trusty_shared_mem_id_t *id,
+ struct scatterlist *sglist, unsigned int nents,
+ pgprot_t pgprot);
+int trusty_share_memory_compat(struct device *dev, trusty_shared_mem_id_t *id,
+ struct scatterlist *sglist, unsigned int nents,
+ pgprot_t pgprot);
+int trusty_reclaim_memory(struct device *dev, trusty_shared_mem_id_t id,
+ struct scatterlist *sglist, unsigned int nents);
+
+struct trusty_nop {
+ struct list_head node;
+ u32 args[3];
+};
+
+static inline void trusty_nop_init(struct trusty_nop *nop,
+ u32 arg0, u32 arg1, u32 arg2) {
+ INIT_LIST_HEAD(&nop->node);
+ nop->args[0] = arg0;
+ nop->args[1] = arg1;
+ nop->args[2] = arg2;
+}
+
+void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop);
+void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop);
+
+#endif