summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobin Peng <robinpeng@google.com>2022-12-16 05:58:24 +0000
committerRobin Peng <robinpeng@google.com>2022-12-16 05:58:24 +0000
commit14a3d3ce77e6dd25c7ef3c5cebb271c88c90ec3f (patch)
treef467dc1f10b892c54b67faee6da2f180793f0072
parent45c6a20ada39db4a105af9e3a8d6217d83096e44 (diff)
parent1f3037b5692db5ded25a061fd5599f60145f76bc (diff)
downloadzuma-14a3d3ce77e6dd25c7ef3c5cebb271c88c90ec3f.tar.gz
Merge android13-gs-pixel-5.15 into android14-gs-pixel-5.15
Bug: 260174400 Change-Id: Iedecbf4448908a34b0bd2ec88d1f25a280d786f6 Signed-off-by: Robin Peng <robinpeng@google.com>
-rw-r--r--Makefile3
-rw-r--r--callisto-platform.c340
-rw-r--r--callisto-platform.h22
-rw-r--r--callisto/config.h1
-rw-r--r--callisto/csrs.h6
-rw-r--r--callisto/mailbox-regs.h30
-rw-r--r--gcip-kernel-driver/drivers/gcip/Makefile4
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c101
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-domain-pool.h49
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-kci.h9
-rw-r--r--gsx01-mailbox-driver.c70
-rw-r--r--gxp-client.c18
-rw-r--r--gxp-common-platform.c40
-rw-r--r--gxp-core-telemetry.c83
-rw-r--r--gxp-core-telemetry.h27
-rw-r--r--gxp-domain-pool.c129
-rw-r--r--gxp-domain-pool.h27
-rw-r--r--gxp-internal.h5
-rw-r--r--gxp-kci.c52
-rw-r--r--gxp-kci.h85
-rw-r--r--gxp-mailbox-driver.c83
-rw-r--r--gxp-mailbox-regs.h29
-rw-r--r--gxp-mcu-platform.c87
-rw-r--r--gxp-mcu-platform.h53
-rw-r--r--gxp.h30
25 files changed, 910 insertions, 473 deletions
diff --git a/Makefile b/Makefile
index f69cdab..b5d4438 100644
--- a/Makefile
+++ b/Makefile
@@ -23,7 +23,6 @@ gxp-objs += \
gxp-firmware-data.o \
gxp-firmware.o \
gxp-lpm.o \
- gxp-mailbox-driver.o \
gxp-mailbox-manager.o \
gxp-mailbox.o \
gxp-mapping.o \
@@ -42,10 +41,12 @@ USE_GCIP := TRUE
gxp-objs += \
callisto-platform.o \
+ gsx01-mailbox-driver.o \
gxp-dci.o \
gxp-kci.o \
gxp-mcu-firmware.o \
gxp-mcu-fs.o \
+ gxp-mcu-platform.o \
gxp-mcu-telemetry.o \
gxp-mcu.o \
gxp-uci.o \
diff --git a/callisto-platform.c b/callisto-platform.c
index bc274a3..8600be5 100644
--- a/callisto-platform.c
+++ b/callisto-platform.c
@@ -11,29 +11,38 @@
#include <linux/platform_device.h>
#include "callisto-platform.h"
-
-#include "gxp-common-platform.c"
#include "gxp-kci.h"
#include "gxp-mcu-fs.h"
#include "gxp-uci.h"
-#include "gxp-usage-stats.h"
-#if IS_ENABLED(CONFIG_GXP_TEST)
-char *callisto_work_mode_name = "direct";
-#else
-static char *callisto_work_mode_name = "direct";
-#endif
+#include "gxp-common-platform.c"
+
+static int gxp_mmu_set_shareability(struct device *dev, u32 reg_base)
+{
+ void __iomem *addr = ioremap(reg_base, PAGE_SIZE);
+
+ if (!addr) {
+ dev_err(dev, "sysreg ioremap failed\n");
+ return -ENOMEM;
+ }
-module_param_named(work_mode, callisto_work_mode_name, charp, 0660);
+ writel_relaxed(SHAREABLE_WRITE | SHAREABLE_READ | INNER_SHAREABLE,
+ addr + GXP_SYSREG_AUR0_SHAREABILITY);
+ writel_relaxed(SHAREABLE_WRITE | SHAREABLE_READ | INNER_SHAREABLE,
+ addr + GXP_SYSREG_AUR1_SHAREABILITY);
+ iounmap(addr);
-static char *zuma_revision = "a0";
-module_param_named(chip_rev, zuma_revision, charp, 0660);
+ return 0;
+}
static int callisto_platform_parse_dt(struct platform_device *pdev,
struct gxp_dev *gxp)
{
struct resource *r;
void *addr;
+ int ret;
+ u32 reg;
+ struct device *dev = gxp->dev;
/*
* Setting BAAW is required for having correct base for CSR accesses.
@@ -43,7 +52,7 @@ static int callisto_platform_parse_dt(struct platform_device *pdev,
*/
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "baaw");
if (!IS_ERR_OR_NULL(r)) {
- addr = devm_ioremap_resource(gxp->dev, r);
+ addr = devm_ioremap_resource(dev, r);
/* start address */
writel(0x0, addr + 0x0);
/* Window - size */
@@ -53,36 +62,28 @@ static int callisto_platform_parse_dt(struct platform_device *pdev,
/* Window - enable */
writel(0x80000003, addr + 0xc);
}
- return 0;
-}
-
-static int callisto_platform_after_probe(struct gxp_dev *gxp)
-{
- struct callisto_dev *callisto = to_callisto_dev(gxp);
-
- if (gxp_is_direct_mode(gxp))
- return 0;
-
- gxp_usage_stats_init(gxp);
- return gxp_mcu_init(gxp, &callisto->mcu);
-}
-
-static void callisto_platform_before_remove(struct gxp_dev *gxp)
-{
- struct callisto_dev *callisto = to_callisto_dev(gxp);
- if (gxp_is_direct_mode(gxp))
- return;
+ if (!of_find_property(dev->of_node, "gxp,shareability", NULL)) {
+ ret = -ENODEV;
+ goto err;
+ }
+ ret = of_property_read_u32_index(dev->of_node,
+ "gxp,shareability", 0, &reg);
+ if (ret)
+ goto err;
+ ret = gxp_mmu_set_shareability(dev, reg);
+err:
+ if (ret)
+ dev_warn(dev, "Failed to enable shareability: %d\n", ret);
- gxp_mcu_exit(&callisto->mcu);
- gxp_usage_stats_exit(gxp);
+ return 0;
}
static int callisto_request_power_states(struct gxp_client *client,
struct gxp_power_states power_states)
{
struct gxp_dev *gxp = client->gxp;
- struct callisto_dev *callisto = to_callisto_dev(gxp);
+ struct gxp_mcu *mcu = gxp_mcu_of(gxp);
struct gxp_uci_command cmd;
int ret;
@@ -97,7 +98,7 @@ static int callisto_request_power_states(struct gxp_client *client,
cmd.client_id = client->vd->client_id;
ret = gxp_uci_send_command(
- &callisto->mcu.uci, client->vd, &cmd,
+ &mcu->uci, client->vd, &cmd,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].queue,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].waitq,
@@ -105,24 +106,15 @@ static int callisto_request_power_states(struct gxp_client *client,
return ret;
}
-static int callisto_platform_after_vd_block_ready(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd)
+static int allocate_vmbox(struct gxp_dev *gxp, struct gxp_virtual_device *vd)
{
- struct gxp_kci *kci = &(to_callisto_dev(gxp)->mcu.kci);
+ struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci);
int pasid, ret;
- u8 operation = KCI_ALLOCATE_VMBOX_OP_ALLOCATE_VMBOX;
-
- if (gxp_is_direct_mode(gxp))
- return 0;
-
- if (vd->tpu_client_id >= 0)
- operation |= KCI_ALLOCATE_VMBOX_OP_LINK_OFFLOAD_VMBOX;
pasid = gxp_iommu_aux_get_pasid(gxp, vd->domain);
/* TODO(b/255706432): Adopt vd->slice_index after the firmware supports this. */
ret = gxp_kci_allocate_vmbox(kci, pasid, vd->num_cores,
- /*slice_index=*/0, vd->tpu_client_id,
- operation);
+ /*slice_index=*/0);
if (ret) {
if (ret != GCIP_KCI_ERROR_UNIMPLEMENTED) {
dev_err(gxp->dev,
@@ -145,16 +137,11 @@ static int callisto_platform_after_vd_block_ready(struct gxp_dev *gxp,
return 0;
}
-static void
-callisto_platform_before_vd_block_unready(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd)
+static void release_vmbox(struct gxp_dev *gxp, struct gxp_virtual_device *vd)
{
- struct gxp_kci *kci = &(to_callisto_dev(gxp)->mcu.kci);
+ struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci);
int ret;
- if (gxp_is_direct_mode(gxp))
- return;
-
if (vd->client_id < 0)
return;
@@ -177,6 +164,103 @@ callisto_platform_before_vd_block_unready(struct gxp_dev *gxp,
vd->client_id = -1;
}
+static int link_offload_vmbox(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ u32 offload_client_id, u8 offload_chip_type)
+{
+ struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci);
+ int ret;
+
+ ret = gxp_kci_link_unlink_offload_vmbox(
+ kci, vd->client_id, offload_client_id, offload_chip_type, true);
+ if (ret) {
+ if (ret != GCIP_KCI_ERROR_UNIMPLEMENTED) {
+ dev_err(gxp->dev,
+ "Failed to link offload VMBox for client %d, offload client %u, offload chip type %d: %d",
+ vd->client_id, offload_client_id,
+ offload_chip_type, ret);
+ return ret;
+ }
+
+ /*
+ * TODO(241057541): Remove this conditional branch after the firmware side
+ * implements handling link_offload_vmbox command.
+ */
+ dev_info(
+ gxp->dev,
+ "Linking offload VMBox is not implemented from the firmware side");
+ }
+
+ return 0;
+}
+
+static void unlink_offload_vmbox(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ u32 offload_client_id, u8 offload_chip_type)
+{
+ struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci);
+ int ret;
+
+ ret = gxp_kci_link_unlink_offload_vmbox(kci, vd->client_id,
+ offload_client_id,
+ offload_chip_type, false);
+ if (ret) {
+ /*
+ * TODO(241057541): Remove this conditional branch after the firmware side
+ * implements handling allocate_vmbox command.
+ */
+ if (ret == GCIP_KCI_ERROR_UNIMPLEMENTED)
+ dev_info(
+ gxp->dev,
+ "Unlinking offload VMBox is not implemented from the firmware side");
+ else
+ dev_err(gxp->dev,
+ "Failed to unlink offload VMBox for client %d, offload client %u, offload chip type %d: %d",
+ vd->client_id, offload_client_id,
+ offload_chip_type, ret);
+ }
+}
+
+static int callisto_platform_after_vd_block_ready(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
+{
+ int ret;
+
+ if (gxp_is_direct_mode(gxp))
+ return 0;
+
+ ret = allocate_vmbox(gxp, vd);
+ if (ret)
+ return ret;
+
+ if (vd->tpu_client_id >= 0) {
+ ret = link_offload_vmbox(gxp, vd, vd->tpu_client_id,
+ GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU);
+ if (ret)
+ goto err_release_vmbox;
+ }
+
+ return 0;
+
+err_release_vmbox:
+ release_vmbox(gxp, vd);
+ return ret;
+}
+
+static void
+callisto_platform_before_vd_block_unready(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
+{
+ if (gxp_is_direct_mode(gxp))
+ return;
+ if (vd->client_id < 0)
+ return;
+ if (vd->tpu_client_id >= 0)
+ unlink_offload_vmbox(gxp, vd, vd->tpu_client_id,
+ GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU);
+ release_vmbox(gxp, vd);
+}
+
static int callisto_wakelock_after_blk_on(struct gxp_dev *gxp)
{
struct gxp_mcu_firmware *mcu_fw = gxp_mcu_firmware_of(gxp);
@@ -195,43 +279,49 @@ static void callisto_wakelock_before_blk_off(struct gxp_dev *gxp)
gxp_mcu_firmware_stop(mcu_fw);
}
+#ifdef HAS_TPU_EXT
+
+static int get_tpu_client_id(struct gxp_client *client)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct edgetpu_ext_offload_info offload_info;
+ struct edgetpu_ext_client_info tpu_info;
+ int ret;
+
+ tpu_info.tpu_file = client->tpu_file;
+ ret = edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ START_OFFLOAD, &tpu_info, &offload_info);
+ if (ret)
+ return ret;
+
+ return offload_info.client_id;
+}
+
static int callisto_after_map_tpu_mbx_queue(struct gxp_dev *gxp,
struct gxp_client *client)
{
- struct gxp_kci *kci = &(to_callisto_dev(gxp)->mcu.kci);
+ struct gxp_virtual_device *vd = client->vd;
int tpu_client_id = -1, ret;
- /*
- * TODO(b/247923533): Get a client ID from the TPU kernel driver and remove this workaround
- * condition.
- */
- if (tpu_client_id < 0)
+ if (gxp_is_direct_mode(gxp))
return 0;
- if (client->vd->client_id >= 0) {
- ret = gxp_kci_allocate_vmbox(
- kci, client->vd->client_id, 0, 0, tpu_client_id,
- KCI_ALLOCATE_VMBOX_OP_LINK_OFFLOAD_VMBOX);
- if (ret) {
- if (ret != GCIP_KCI_ERROR_UNIMPLEMENTED) {
- dev_err(gxp->dev,
- "Failed to link TPU VMbox client %d, TPU client %d: %d",
- client->vd->client_id, tpu_client_id,
- ret);
- return ret;
- }
-
- /*
- * TODO(241057541): Remove this conditional branch after the firmware side
- * implements handling allocate_vmbox command.
- */
- dev_info(
- gxp->dev,
- "Linking TPU VNMBox is not implemented from the firmware side");
- }
+ tpu_client_id = get_tpu_client_id(client);
+ if (tpu_client_id < 0) {
+ dev_err(gxp->dev, "Failed to get a TPU client ID: %d",
+ tpu_client_id);
+ return tpu_client_id;
}
- client->vd->tpu_client_id = tpu_client_id;
+ if (vd->client_id >= 0) {
+ ret = link_offload_vmbox(gxp, vd, tpu_client_id,
+ GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU);
+ if (ret)
+ return ret;
+ }
+
+ vd->tpu_client_id = tpu_client_id;
return 0;
}
@@ -239,47 +329,42 @@ static int callisto_after_map_tpu_mbx_queue(struct gxp_dev *gxp,
static void callisto_before_unmap_tpu_mbx_queue(struct gxp_dev *gxp,
struct gxp_client *client)
{
- /*
- * We don't have to care about the case that the client releases a TPU vmbox which is
- * linked to the DSP client without notifying the DSP MCU firmware because the client will
- * always release the DSP vmbox earlier than the TPU vmbox. (i.e, the `release_vmbox` KCI
- * command will be always sent to the DSP MCU firmware to release the DSP vmbox before
- * releasing the TPU vmbox and the firmware will stop TPU offloading.) Also, from Callisto,
- * we don't have to care about mapping/unmapping the TPU mailbox buffer here neither.
- * Therefore, just unset the TPU client ID here.
- */
- client->vd->tpu_client_id = -1;
+ struct gxp_virtual_device *vd = client->vd;
+
+ if (vd->client_id >= 0 && vd->tpu_client_id >= 0)
+ unlink_offload_vmbox(gxp, vd, vd->tpu_client_id,
+ GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU);
+ vd->tpu_client_id = -1;
}
+#endif /* HAS_TPU_EXT */
+
static int gxp_platform_probe(struct platform_device *pdev)
{
struct callisto_dev *callisto =
devm_kzalloc(&pdev->dev, sizeof(*callisto), GFP_KERNEL);
+ struct gxp_mcu_dev *mcu_dev = &callisto->mcu_dev;
+ struct gxp_dev *gxp;
if (!callisto)
return -ENOMEM;
- callisto->mode = callisto_dev_parse_work_mode(callisto_work_mode_name);
+ gxp_mcu_dev_init(mcu_dev);
- callisto->gxp.parse_dt = callisto_platform_parse_dt;
- callisto->gxp.after_probe = callisto_platform_after_probe;
- callisto->gxp.before_remove = callisto_platform_before_remove;
- callisto->gxp.handle_ioctl = gxp_mcu_ioctl;
- callisto->gxp.handle_mmap = gxp_mcu_mmap;
- callisto->gxp.after_vd_block_ready =
- callisto_platform_after_vd_block_ready;
- callisto->gxp.before_vd_block_unready =
+ gxp = &mcu_dev->gxp;
+ gxp->parse_dt = callisto_platform_parse_dt;
+ gxp->after_vd_block_ready = callisto_platform_after_vd_block_ready;
+ gxp->before_vd_block_unready =
callisto_platform_before_vd_block_unready;
- callisto->gxp.request_power_states = callisto_request_power_states;
- callisto->gxp.wakelock_after_blk_on = callisto_wakelock_after_blk_on;
- callisto->gxp.wakelock_before_blk_off =
- callisto_wakelock_before_blk_off;
- callisto->gxp.after_map_tpu_mbx_queue =
- callisto_after_map_tpu_mbx_queue;
- callisto->gxp.before_unmap_tpu_mbx_queue =
- callisto_before_unmap_tpu_mbx_queue;
-
- return gxp_common_platform_probe(pdev, &callisto->gxp);
+ gxp->request_power_states = callisto_request_power_states;
+ gxp->wakelock_after_blk_on = callisto_wakelock_after_blk_on;
+ gxp->wakelock_before_blk_off = callisto_wakelock_before_blk_off;
+#ifdef HAS_TPU_EXT
+ gxp->after_map_tpu_mbx_queue = callisto_after_map_tpu_mbx_queue;
+ gxp->before_unmap_tpu_mbx_queue = callisto_before_unmap_tpu_mbx_queue;
+#endif
+
+ return gxp_common_platform_probe(pdev, gxp);
}
static int gxp_platform_remove(struct platform_device *pdev)
@@ -318,39 +403,6 @@ static void __exit gxp_platform_exit(void)
gxp_common_platform_unreg_sscd();
}
-struct gxp_mcu *gxp_mcu_of(struct gxp_dev *gxp)
-{
- return &(to_callisto_dev(gxp)->mcu);
-}
-
-struct gxp_mcu_firmware *gxp_mcu_firmware_of(struct gxp_dev *gxp)
-{
- return &(to_callisto_dev(gxp)->mcu.fw);
-}
-
-bool gxp_is_direct_mode(struct gxp_dev *gxp)
-{
- struct callisto_dev *callisto = to_callisto_dev(gxp);
-
- return callisto->mode == DIRECT;
-}
-
-enum gxp_chip_revision gxp_get_chip_revision(struct gxp_dev *gxp)
-{
- if (!strcmp(zuma_revision, "a0"))
- return GXP_CHIP_A0;
- if (!strcmp(zuma_revision, "b0"))
- return GXP_CHIP_B0;
- return GXP_CHIP_ANY;
-}
-
-enum callisto_work_mode callisto_dev_parse_work_mode(const char *work_mode)
-{
- if (!strcmp(work_mode, "mcu"))
- return MCU;
- return DIRECT;
-}
-
MODULE_DESCRIPTION("Google GXP platform driver");
MODULE_LICENSE("GPL v2");
MODULE_INFO(gitinfo, GIT_REPO_TAG);
diff --git a/callisto-platform.h b/callisto-platform.h
index 95aa534..e7d017e 100644
--- a/callisto-platform.h
+++ b/callisto-platform.h
@@ -8,27 +8,13 @@
#ifndef __CALLISTO_PLATFORM_H__
#define __CALLISTO_PLATFORM_H__
-#include "gxp-internal.h"
-#include "gxp-mcu.h"
+#include "gxp-mcu-platform.h"
-#define to_callisto_dev(gxp) container_of(gxp, struct callisto_dev, gxp)
-
-#if IS_ENABLED(CONFIG_GXP_TEST)
-/* expose this variable to have unit tests set it dynamically */
-extern char *callisto_work_mode_name;
-#endif
-
-enum callisto_work_mode {
- MCU = 0,
- DIRECT = 1,
-};
+#define to_callisto_dev(gxp) \
+ container_of(to_mcu_dev(gxp), struct callisto_dev, mcu_dev)
struct callisto_dev {
- struct gxp_dev gxp;
- struct gxp_mcu mcu;
- enum callisto_work_mode mode;
+ struct gxp_mcu_dev mcu_dev;
};
-enum callisto_work_mode callisto_dev_parse_work_mode(const char *work_mode);
-
#endif /* __CALLISTO_PLATFORM_H__ */
diff --git a/callisto/config.h b/callisto/config.h
index 253e266..7a62b47 100644
--- a/callisto/config.h
+++ b/callisto/config.h
@@ -44,5 +44,6 @@
#include "csrs.h"
#include "iova.h"
#include "lpm.h"
+#include "mailbox-regs.h"
#endif /* __CALLISTO_CONFIG_H__ */
diff --git a/callisto/csrs.h b/callisto/csrs.h
index 7a4c9dd..f1bfcd1 100644
--- a/callisto/csrs.h
+++ b/callisto/csrs.h
@@ -92,4 +92,10 @@ enum gxp_csrs {
#define PLL_CON0_PLL_AUR 0x100
#define PLL_CON0_NOC_USER 0x610
+#define GXP_SYSREG_AUR0_SHAREABILITY 0x0
+#define GXP_SYSREG_AUR1_SHAREABILITY 0x4
+#define SHAREABLE_WRITE (1 << 13)
+#define SHAREABLE_READ (1 << 12)
+#define INNER_SHAREABLE 1
+
#endif /* __CALLISTO_CSRS_H__ */
diff --git a/callisto/mailbox-regs.h b/callisto/mailbox-regs.h
new file mode 100644
index 0000000..c90bafa
--- /dev/null
+++ b/callisto/mailbox-regs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GXP mailbox registers.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __CALLISTO_MAILBOX_REGS_H__
+#define __CALLISTO_MAILBOX_REGS_H__
+
+/* Mailbox CSRs */
+#define MBOX_MCUCTLR_OFFSET 0x0000
+
+#define MBOX_INTGR0_OFFSET 0x0020
+#define MBOX_INTMSR0_OFFSET 0x0030
+
+#define MBOX_INTCR1_OFFSET 0x0044
+#define MBOX_INTMR1_OFFSET 0x0048
+#define MBOX_INTSR1_OFFSET 0x004C
+#define MBOX_INTMSR1_OFFSET 0x0050
+
+/* Mailbox Shared Data Registers */
+#define MBOX_DATA_REG_BASE 0x0080
+
+#define MBOX_DATA_STATUS_OFFSET 0x00
+#define MBOX_DATA_DESCRIPTOR_ADDR_OFFSET 0x04
+#define MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET 0x08
+#define MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET 0x0C
+
+#endif /* __CALLISTO_MAILBOX_REGS_H__ */
diff --git a/gcip-kernel-driver/drivers/gcip/Makefile b/gcip-kernel-driver/drivers/gcip/Makefile
index 7f6d2f0..2f34448 100644
--- a/gcip-kernel-driver/drivers/gcip/Makefile
+++ b/gcip-kernel-driver/drivers/gcip/Makefile
@@ -6,8 +6,8 @@
CONFIG_GCIP ?= m
obj-$(CONFIG_GCIP) += gcip.o
-gcip-objs := gcip-firmware.o gcip-image-config.o gcip-kci.o gcip-mailbox.o \
- gcip-mem-pool.o gcip-telemetry.o
+gcip-objs := gcip-domain-pool.o gcip-firmware.o gcip-image-config.o gcip-kci.o \
+ gcip-mailbox.o gcip-mem-pool.o gcip-telemetry.o
CURRENT_DIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c b/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c
new file mode 100644
index 0000000..2341b52
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP IOMMU domain allocator.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/iommu.h>
+#include <linux/vmalloc.h>
+
+#include <gcip/gcip-domain-pool.h>
+
+int gcip_domain_pool_init(struct device *dev, struct gcip_domain_pool *pool, unsigned int size)
+{
+ unsigned int i;
+ struct iommu_domain *domain;
+
+ pool->size = size;
+ pool->dev = dev;
+
+ if (!size)
+ return 0;
+
+ dev_dbg(pool->dev, "Initializing domain pool with %u domains\n", size);
+
+ ida_init(&pool->idp);
+ pool->array = vzalloc(sizeof(*pool->array) * size);
+ if (!pool->array) {
+ ida_destroy(&pool->idp);
+ return -ENOMEM;
+ }
+ for (i = 0; i < size; i++) {
+ domain = iommu_domain_alloc(dev->bus);
+ if (!domain) {
+ dev_err(pool->dev, "Failed to allocate iommu domain %d of %u\n", i + 1,
+ size);
+ gcip_domain_pool_destroy(pool);
+ return -ENOMEM;
+ }
+
+ pool->array[i] = domain;
+ }
+ return 0;
+}
+
+struct iommu_domain *gcip_domain_pool_alloc(struct gcip_domain_pool *pool)
+{
+ int id;
+
+ if (!pool->size)
+ return iommu_domain_alloc(pool->dev->bus);
+
+ id = ida_alloc_max(&pool->idp, pool->size - 1, GFP_KERNEL);
+
+ if (id < 0) {
+ dev_err(pool->dev, "No more domains available from pool of size %u\n", pool->size);
+ return NULL;
+ }
+
+ dev_dbg(pool->dev, "Allocated domain from pool with id = %d\n", id);
+
+ return pool->array[id];
+}
+
+void gcip_domain_pool_free(struct gcip_domain_pool *pool, struct iommu_domain *domain)
+{
+ int id;
+
+ if (!pool->size) {
+ iommu_domain_free(domain);
+ return;
+ }
+ for (id = 0; id < pool->size; id++) {
+ if (pool->array[id] == domain) {
+ dev_dbg(pool->dev, "Released domain from pool with id = %d\n", id);
+ ida_free(&pool->idp, id);
+ return;
+ }
+ }
+ dev_err(pool->dev, "Domain not found in pool\n");
+}
+
+void gcip_domain_pool_destroy(struct gcip_domain_pool *pool)
+{
+ int i;
+
+ if (!pool->size)
+ return;
+
+ dev_dbg(pool->dev, "Destroying domain pool with %u domains\n", pool->size);
+
+ for (i = 0; i < pool->size; i++) {
+ if (pool->array[i])
+ iommu_domain_free(pool->array[i]);
+ }
+
+ ida_destroy(&pool->idp);
+ vfree(pool->array);
+}
diff --git a/gcip-kernel-driver/include/gcip/gcip-domain-pool.h b/gcip-kernel-driver/include/gcip/gcip-domain-pool.h
new file mode 100644
index 0000000..b740bf9
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-domain-pool.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP IOMMU domain allocator.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_DOMAIN_POOL_H__
+#define __GCIP_DOMAIN_POOL_H__
+
+#include <linux/idr.h>
+#include <linux/iommu.h>
+
+struct gcip_domain_pool {
+ struct ida idp; /* ID allocator to keep track of used domains. */
+ /*
+ * Size of the pool. Can be set to 0, in which case the implementation will fall back to
+ * dynamic domain allocation using the IOMMU API directly.
+ */
+ unsigned int size;
+ struct iommu_domain **array; /* Array holding the pointers to pre-allocated domains. */
+ struct device *dev; /* The device used for logging warnings/errors. */
+};
+
+/*
+ * Initializes a domain pool.
+ *
+ * @dev: pointer to device structure.
+ * @pool: caller-allocated pool structure.
+ * @size: size of the pre-allocated domains pool.
+ * Set to zero to fall back to dynamically allocated domains.
+ *
+ * returns 0 on success or negative error value.
+ */
+int gcip_domain_pool_init(struct device *dev, struct gcip_domain_pool *pool, unsigned int size);
+
+/*
+ * Allocates a domain from the pool
+ * returns NULL on error.
+ */
+struct iommu_domain *gcip_domain_pool_alloc(struct gcip_domain_pool *pool);
+
+/* Releases a domain from the pool. */
+void gcip_domain_pool_free(struct gcip_domain_pool *pool, struct iommu_domain *domain);
+
+/* Cleans up all resources used by the domain pool. */
+void gcip_domain_pool_destroy(struct gcip_domain_pool *pool);
+
+#endif /* __GCIP_DOMAIN_POOL_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-kci.h b/gcip-kernel-driver/include/gcip/gcip-kci.h
index 74e44ae..bda1b40 100644
--- a/gcip-kernel-driver/include/gcip/gcip-kci.h
+++ b/gcip-kernel-driver/include/gcip/gcip-kci.h
@@ -93,10 +93,10 @@ enum gcip_kci_code {
GCIP_KCI_CODE_GET_USAGE = 12,
GCIP_KCI_CODE_NOTIFY_THROTTLING = 13,
GCIP_KCI_CODE_BLOCK_BUS_SPEED_CONTROL = 14,
- /* TODO(b/237955391): Update this code after decided. */
GCIP_KCI_CODE_ALLOCATE_VMBOX = 15,
- /* TODO(b/237955391): Update this code after decided. */
GCIP_KCI_CODE_RELEASE_VMBOX = 16,
+ GCIP_KCI_CODE_LINK_OFFLOAD_VMBOX = 17,
+ GCIP_KCI_CODE_UNLINK_OFFLOAD_VMBOX = 18,
GCIP_KCI_CODE_RKCI_ACK = 256,
};
@@ -140,6 +140,11 @@ enum gcip_kci_error {
GCIP_KCI_ERROR_UNAUTHENTICATED = 16,
};
+/* Type of the chip of the offload vmbox to be linked. */
+enum gcip_kci_offload_chip_type {
+ GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU = 0,
+};
+
/*
* Reason for triggering the CMD doorbell.
* The CMD doorbell is triggered either when a CMD is pushed or the RESP that might blocks the FW is
diff --git a/gsx01-mailbox-driver.c b/gsx01-mailbox-driver.c
new file mode 100644
index 0000000..f0090f4
--- /dev/null
+++ b/gsx01-mailbox-driver.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP hardware-based mailbox csr driver implementation for GSX01.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <asm/barrier.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "gxp-config.h"
+#include "gxp-mailbox-driver.h"
+#include "gxp-mailbox.h"
+
+#include "gxp-mailbox-driver.c"
+
+static u32 csr_read(struct gxp_mailbox *mailbox, uint reg_offset)
+{
+ return readl(mailbox->csr_reg_base + reg_offset);
+}
+
+static void csr_write(struct gxp_mailbox *mailbox, uint reg_offset, u32 value)
+{
+ writel(value, mailbox->csr_reg_base + reg_offset);
+}
+
+void gxp_mailbox_reset_hw(struct gxp_mailbox *mailbox)
+{
+ csr_write(mailbox, MBOX_MCUCTLR_OFFSET, 1);
+}
+
+void gxp_mailbox_generate_device_interrupt(struct gxp_mailbox *mailbox,
+ u32 int_mask)
+{
+ /*
+ * Ensure all memory writes have been committed to memory before
+ * signalling to the device to read from them. This avoids the scenario
+ * where the interrupt trigger write gets delivered to the MBX HW before
+ * the DRAM transactions made it to DRAM since they're Normal
+ * transactions and can be re-ordered and backed off behind other
+ * transfers.
+ */
+ wmb();
+
+ csr_write(mailbox, MBOX_INTGR0_OFFSET, int_mask);
+}
+
+u32 gxp_mailbox_get_device_mask_status(struct gxp_mailbox *mailbox)
+{
+ return csr_read(mailbox, MBOX_INTMSR0_OFFSET);
+}
+
+void gxp_mailbox_clear_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
+{
+ csr_write(mailbox, MBOX_INTCR1_OFFSET, int_mask);
+}
+
+void gxp_mailbox_mask_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
+{
+ csr_write(mailbox, MBOX_INTMR1_OFFSET, int_mask);
+}
+
+u32 gxp_mailbox_get_host_mask_status(struct gxp_mailbox *mailbox)
+{
+ return csr_read(mailbox, MBOX_INTMSR1_OFFSET);
+}
diff --git a/gxp-client.c b/gxp-client.c
index a08547d..03446ab 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -30,7 +30,6 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
client->has_vd_wakelock = false;
client->requested_states = off_states;
client->vd = NULL;
- client->mbx_desc.mapped = false;
return client;
}
@@ -54,13 +53,22 @@ void gxp_client_destroy(struct gxp_client *client)
gxp_eventfd_put(client->mb_eventfds[core]);
}
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5)
if (client->tpu_file) {
+ if (client->vd) {
+ if (gxp->before_unmap_tpu_mbx_queue)
+ gxp->before_unmap_tpu_mbx_queue(gxp, client);
+ /*
+ * TODO(b/237624453): remove '|| 1' once the MCU supports DSP->TPU interop
+ */
+ if (gxp_is_direct_mode(gxp) || 1)
+ gxp_dma_unmap_tpu_buffer(gxp,
+ client->vd->domain,
+ client->mbx_desc);
+ }
fput(client->tpu_file);
client->tpu_file = NULL;
- if (client->vd)
- gxp_dma_unmap_tpu_buffer(gxp, client->vd->domain,
- client->mbx_desc);
}
#endif
diff --git a/gxp-common-platform.c b/gxp-common-platform.c
index 5128b20..c4e2102 100644
--- a/gxp-common-platform.c
+++ b/gxp-common-platform.c
@@ -518,11 +518,27 @@ out:
static int gxp_get_specs(struct gxp_client *client,
struct gxp_specs_ioctl __user *argp)
{
+ struct buffer_data *logging_buff_data;
+ struct gxp_dev *gxp = client->gxp;
struct gxp_specs_ioctl ibuf = {
.core_count = GXP_NUM_CORES,
+ .features = !gxp_is_direct_mode(client->gxp),
+ .telemetry_buffer_size = 0,
+ .secure_telemetry_buffer_size =
+ (u8)(SECURE_CORE_TELEMETRY_BUFFER_SIZE /
+ GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE),
.memory_per_core = client->gxp->memory_per_core,
};
+ if (!IS_ERR_OR_NULL(gxp->core_telemetry_mgr)) {
+ logging_buff_data = gxp->core_telemetry_mgr->logging_buff_data;
+ if (!IS_ERR_OR_NULL(logging_buff_data)) {
+ ibuf.telemetry_buffer_size =
+ (u8)(logging_buff_data->size /
+ GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE);
+ }
+ }
+
if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
return -EFAULT;
@@ -855,11 +871,6 @@ static int map_tpu_mbx_queue(struct gxp_client *client,
u32 core_count;
int ret = 0;
- if (client->mbx_desc.mapped) {
- dev_err(gxp->dev, "Mappings already exist for TPU mailboxes");
- return -EBUSY;
- }
-
down_read(&gxp->vd_semaphore);
core_count = client->vd->num_cores;
@@ -906,7 +917,6 @@ static int map_tpu_mbx_queue(struct gxp_client *client,
client->mbx_desc.phys_core_list = phys_core_list;
client->mbx_desc.cmdq_size = mbx_info->cmdq_size;
client->mbx_desc.respq_size = mbx_info->respq_size;
- client->mbx_desc.mapped = true;
goto out_free;
@@ -937,7 +947,6 @@ static void unmap_tpu_mbx_queue(struct gxp_client *client,
edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
FREE_EXTERNAL_MAILBOX, &gxp_tpu_info, NULL);
- client->mbx_desc.mapped = false;
}
static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
@@ -963,6 +972,12 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
goto out_unlock_client_semaphore;
}
+ if (client->tpu_file) {
+ dev_err(gxp->dev, "Mapping/linking TPU mailbox information already exists");
+ ret = -EBUSY;
+ goto out_unlock_client_semaphore;
+ }
+
/*
* If someone is attacking us through this interface -
* it's possible that ibuf.tpu_fd here is already a different file from the one passed to
@@ -983,7 +998,7 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
if (gxp_is_direct_mode(gxp) || 1) {
ret = map_tpu_mbx_queue(client, &ibuf);
if (ret)
- goto out_unlock_client_semaphore;
+ goto err_fput_tpu_file;
}
if (gxp->after_map_tpu_mbx_queue) {
@@ -996,6 +1011,9 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
err_unmap_tpu_mbx_queue:
unmap_tpu_mbx_queue(client, &ibuf);
+err_fput_tpu_file:
+ fput(client->tpu_file);
+ client->tpu_file = NULL;
out_unlock_client_semaphore:
up_write(&client->semaphore);
@@ -1619,6 +1637,12 @@ static int gxp_mmap(struct file *file, struct vm_area_struct *vma)
}
switch (vma->vm_pgoff << PAGE_SHIFT) {
+ case GXP_MMAP_CORE_LOG_BUFFER_OFFSET:
+ return gxp_core_telemetry_mmap_buffers(
+ client->gxp, GXP_TELEMETRY_TYPE_LOGGING, vma);
+ case GXP_MMAP_CORE_TRACE_BUFFER_OFFSET:
+ return gxp_core_telemetry_mmap_buffers(
+ client->gxp, GXP_TELEMETRY_TYPE_TRACING, vma);
case GXP_MMAP_CORE_LOG_BUFFER_OFFSET_LEGACY:
return gxp_core_telemetry_mmap_buffers_legacy(
client->gxp, GXP_TELEMETRY_TYPE_LOGGING, vma);
diff --git a/gxp-core-telemetry.c b/gxp-core-telemetry.c
index 9b8cf85..eafa4b7 100644
--- a/gxp-core-telemetry.c
+++ b/gxp-core-telemetry.c
@@ -86,7 +86,7 @@ int gxp_core_telemetry_init(struct gxp_dev *gxp)
gxp->core_telemetry_mgr = mgr;
gxp_core_telemetry_buffer_size = ALIGN(gxp_core_telemetry_buffer_size,
- CORE_TELEMETRY_BUFFER_UNIT_SIZE);
+ GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE);
if ((gxp_core_telemetry_buffer_size < CORE_TELEMETRY_DEFAULT_BUFFER_SIZE) ||
(gxp_core_telemetry_buffer_size > CORE_TELEMETRY_MAX_BUFFER_SIZE)) {
dev_warn(gxp->dev,
@@ -136,10 +136,18 @@ struct telemetry_vma_data {
static void telemetry_vma_open(struct vm_area_struct *vma)
{
+ struct gxp_dev *gxp;
struct telemetry_vma_data *vma_data =
(struct telemetry_vma_data *)vma->vm_private_data;
- struct gxp_dev *gxp = vma_data->gxp;
+ /*
+ * vma_ops are required only for legacy telemetry flow
+ * to keep track of buffer allocation during mmap and
+ * buffer free during munmap.
+ */
+ if (IS_ERR_OR_NULL(vma_data))
+ return;
+ gxp = vma_data->gxp;
mutex_lock(&gxp->core_telemetry_mgr->lock);
refcount_inc(&vma_data->ref_count);
@@ -149,11 +157,22 @@ static void telemetry_vma_open(struct vm_area_struct *vma)
static void telemetry_vma_close(struct vm_area_struct *vma)
{
+ struct gxp_dev *gxp;
+ struct buffer_data *buff_data;
+ u8 type;
struct telemetry_vma_data *vma_data =
(struct telemetry_vma_data *)vma->vm_private_data;
- struct gxp_dev *gxp = vma_data->gxp;
- struct buffer_data *buff_data = vma_data->buff_data;
- u8 type = vma_data->type;
+ /*
+ * vma_ops are required only for legacy telemetry flow
+ * to keep track of buffer allocation during mmap and
+ * buffer free during munmap.
+ */
+ if (IS_ERR_OR_NULL(vma_data))
+ return;
+
+ gxp = vma_data->gxp;
+ buff_data = vma_data->buff_data;
+ type = vma_data->type;
mutex_lock(&gxp->core_telemetry_mgr->lock);
@@ -192,6 +211,7 @@ out:
mutex_unlock(&gxp->core_telemetry_mgr->lock);
}
+/* TODO(b/260959553): Remove vma ops during legacy telemetry removal */
static const struct vm_operations_struct telemetry_vma_ops = {
.open = telemetry_vma_open,
.close = telemetry_vma_close,
@@ -361,11 +381,64 @@ static int remap_telemetry_buffers(struct gxp_dev *gxp,
out:
vma->vm_pgoff = orig_pgoff;
+ /* TODO(b/260959553): Remove vma ops during legacy telemetry removal */
vma->vm_ops = &telemetry_vma_ops;
return ret;
}
+int gxp_core_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma)
+{
+ int ret = 0;
+ struct buffer_data *buff_data;
+ size_t total_size = vma->vm_end - vma->vm_start;
+ size_t size = total_size / GXP_NUM_CORES;
+
+ if (!gxp->core_telemetry_mgr)
+ return -ENODEV;
+
+ if (type == GXP_TELEMETRY_TYPE_LOGGING)
+ buff_data = gxp->core_telemetry_mgr->logging_buff_data;
+ else if (type == GXP_TELEMETRY_TYPE_TRACING)
+ buff_data = gxp->core_telemetry_mgr->tracing_buff_data;
+ else
+ return -EINVAL;
+ /*
+ * Total size must divide evenly into a GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE
+ * aligned buffer per core.
+ */
+ if (!total_size ||
+ total_size % (GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE * GXP_NUM_CORES)) {
+ dev_warn(
+ gxp->dev,
+ "Invalid vma size(%lu bytes) requested for telemetry\n",
+ total_size);
+ return -EINVAL;
+ }
+ /*
+ * Per core log buffer size should be equal to pre allocated
+ * aligned buffer per core.
+ */
+ if (size != buff_data->size) {
+ dev_warn(
+ gxp->dev,
+ "Invalid per core requested telemetry buffer size(%lu bytes)\n",
+ size);
+ return -EINVAL;
+ }
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ ret = remap_telemetry_buffers(gxp, vma, buff_data);
+ if (ret)
+ goto err;
+ vma->vm_private_data = NULL;
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ return 0;
+err:
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ return ret;
+}
+
int gxp_core_telemetry_mmap_buffers_legacy(struct gxp_dev *gxp, u8 type,
struct vm_area_struct *vma)
{
diff --git a/gxp-core-telemetry.h b/gxp-core-telemetry.h
index 4568d5c..9a89c0e 100644
--- a/gxp-core-telemetry.h
+++ b/gxp-core-telemetry.h
@@ -16,15 +16,16 @@
#include "gxp-internal.h"
#include "gxp.h"
-/* Core telemetry buffer size is a multiple of 64 kB */
-#define CORE_TELEMETRY_BUFFER_UNIT_SIZE SZ_64K
-#define CORE_TELEMETRY_DEFAULT_BUFFER_SIZE CORE_TELEMETRY_BUFFER_UNIT_SIZE
+/* Default telemetry buffer size per core */
+#define CORE_TELEMETRY_DEFAULT_BUFFER_SIZE GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE
/**
* Maximum core telemetry buffer size that can be represented by GXP_GET_SPECS
* ioctl. 8 bits are reserved to represent telemetry buffer size in GXP_GET_SPECS
- * ioctl and the size is represented in unit of CORE_TELEMETRY_BUFFER_UNIT_SIZE.
+ * ioctl and the size is represented in unit of GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE.
*/
-#define CORE_TELEMETRY_MAX_BUFFER_SIZE (U8_MAX * CORE_TELEMETRY_BUFFER_UNIT_SIZE)
+#define CORE_TELEMETRY_MAX_BUFFER_SIZE (U8_MAX * GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE)
+/* Secure telemetry buffer size per core */
+#define SECURE_CORE_TELEMETRY_BUFFER_SIZE GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE
struct gxp_core_telemetry_work {
struct work_struct work;
@@ -60,6 +61,22 @@ struct gxp_core_telemetry_manager {
int gxp_core_telemetry_init(struct gxp_dev *gxp);
/**
+ * gxp_core_telemetry_mmap_buffers() - Maps the preallocated telemetry
+ * buffers to the user-space vma.
+ * @gxp: The GXP device to create the buffers for.
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`.
+ * @vma: The vma from user-space which all cores' buffers will be mapped into.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ENODEV - Core telemetry support has not been initialized. Must explicitly
+ * check this, since this function is called based on user-input.
+ * * -EINVAL - Either the vma size is not aligned or @type is not valid.
+ */
+int gxp_core_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma);
+
+/**
* gxp_core_telemetry_mmap_buffers_legacy() - Allocate a telemetry buffer for
* each core and map them to their
* core and the user-space vma
diff --git a/gxp-domain-pool.c b/gxp-domain-pool.c
index ada1775..a0f9ead 100644
--- a/gxp-domain-pool.c
+++ b/gxp-domain-pool.c
@@ -5,146 +5,75 @@
* Copyright (C) 2022 Google LLC
*/
-#include <linux/idr.h>
#include <linux/iommu.h>
#include <linux/slab.h>
+#include <gcip/gcip-domain-pool.h>
+
#include "gxp-dma.h"
#include "gxp-domain-pool.h"
-#include "gxp-internal.h"
-
-static struct gxp_iommu_domain *gxp_domain_alloc(struct gxp_dev *gxp)
-{
- struct iommu_domain *domain;
- struct gxp_iommu_domain *gdomain;
-
- gdomain = kmalloc(sizeof(*gdomain), GFP_KERNEL);
- if (!gdomain)
- return ERR_PTR(-ENOMEM);
-
- domain = iommu_domain_alloc(gxp->dev->bus);
- if (!domain) {
- kfree(gdomain);
- return ERR_PTR(-ENOMEM);
- }
- gdomain->domain = domain;
-
- return gdomain;
-}
-
-static void gxp_domain_free(struct gxp_iommu_domain *gdomain)
-{
- iommu_domain_free(gdomain->domain);
- kfree(gdomain);
-}
-int gxp_domain_pool_init(struct gxp_dev *gxp, struct gxp_domain_pool *pool,
+int gxp_domain_pool_init(struct gxp_dev *gxp, struct gcip_domain_pool *pool,
unsigned int size)
{
- unsigned int i;
- struct gxp_iommu_domain *gdomain;
- int __maybe_unused ret;
+ int ret = gcip_domain_pool_init(gxp->dev, pool, size);
+ __maybe_unused int i;
- pool->size = size;
- pool->gxp = gxp;
+ if (ret)
+ return ret;
- if (!size)
- return 0;
-
- dev_dbg(pool->gxp->dev, "Initializing domain pool with %u domains\n", size);
-
- ida_init(&pool->idp);
- pool->array = vzalloc(sizeof(*pool->array) * size);
- if (!pool->array) {
- dev_err(gxp->dev, "Failed to allocate memory for domain pool array\n");
- return -ENOMEM;
- }
- for (i = 0; i < size; i++) {
- gdomain = gxp_domain_alloc(pool->gxp);
- if (IS_ERR(gdomain)) {
- dev_err(pool->gxp->dev,
- "Failed to allocate gxp iommu domain %d of %u\n",
- i + 1, size);
- gxp_domain_pool_destroy(pool);
- return -ENOMEM;
- }
#if IS_ENABLED(CONFIG_GXP_GEM5)
+ for (i = 0; i < size; i++) {
+ struct iommu_domain *domain = pool->array[i];
+
/*
* Gem5 uses arm-smmu-v3 which requires domain finalization to do iommu map. Calling
* iommu_aux_attach_device to finalize the allocated domain and detach the device
* right after that.
*/
- ret = iommu_aux_attach_device(gdomain->domain, pool->gxp->dev);
+ ret = iommu_aux_attach_device(domain, gxp->dev);
if (ret) {
dev_err(gxp->dev,
"Failed to attach device to iommu domain %d of %u, ret=%d\n",
i + 1, size, ret);
- gxp_domain_free(gdomain);
gxp_domain_pool_destroy(pool);
return ret;
}
- iommu_aux_detach_device(gdomain->domain, pool->gxp->dev);
+ iommu_aux_detach_device(domain, gxp->dev);
+ }
#endif /* CONFIG_GXP_GEM5 */
- pool->array[i] = gdomain;
- }
return 0;
}
-struct gxp_iommu_domain *gxp_domain_pool_alloc(struct gxp_domain_pool *pool)
+struct gxp_iommu_domain *gxp_domain_pool_alloc(struct gcip_domain_pool *pool)
{
- int id;
-
- if (!pool->size)
- return gxp_domain_alloc(pool->gxp);
+ struct iommu_domain *domain = gcip_domain_pool_alloc(pool);
+ struct gxp_iommu_domain *gdomain;
- id = ida_alloc_max(&pool->idp, pool->size - 1, GFP_KERNEL);
+ if (!domain)
+ return NULL;
- if (id < 0) {
- dev_err(pool->gxp->dev,
- "No more domains available from pool of size %u\n",
- pool->size);
+ gdomain = kmalloc(sizeof(*gdomain), GFP_KERNEL);
+ if (!gdomain) {
+ gcip_domain_pool_free(pool, domain);
return NULL;
}
- dev_dbg(pool->gxp->dev, "Allocated domain from pool with id = %d\n", id);
+ gdomain->domain = domain;
- return pool->array[id];
+ return gdomain;
}
-void gxp_domain_pool_free(struct gxp_domain_pool *pool, struct gxp_iommu_domain *gdomain)
+void gxp_domain_pool_free(struct gcip_domain_pool *pool,
+ struct gxp_iommu_domain *gdomain)
{
- int id;
-
- if (!pool->size) {
- gxp_domain_free(gdomain);
- return;
- }
- for (id = 0; id < pool->size; id++) {
- if (pool->array[id] == gdomain) {
- dev_dbg(pool->gxp->dev, "Released domain from pool with id = %d\n", id);
- ida_free(&pool->idp, id);
- return;
- }
- }
- dev_err(pool->gxp->dev, "%s: domain not found in pool", __func__);
+ gcip_domain_pool_free(pool, gdomain->domain);
+ kfree(gdomain);
}
-void gxp_domain_pool_destroy(struct gxp_domain_pool *pool)
+void gxp_domain_pool_destroy(struct gcip_domain_pool *pool)
{
- int i;
-
- if (!pool->size)
- return;
-
- dev_dbg(pool->gxp->dev, "Destroying domain pool with %u domains\n", pool->size);
-
- for (i = 0; i < pool->size; i++) {
- if (pool->array[i])
- gxp_domain_free(pool->array[i]);
- }
-
- ida_destroy(&pool->idp);
- vfree(pool->array);
+ gcip_domain_pool_destroy(pool);
}
diff --git a/gxp-domain-pool.h b/gxp-domain-pool.h
index 2a262ff..ad2d38a 100644
--- a/gxp-domain-pool.h
+++ b/gxp-domain-pool.h
@@ -8,22 +8,9 @@
#ifndef __GXP_DOMAIN_POOL_H__
#define __GXP_DOMAIN_POOL_H__
-#include <linux/idr.h>
-#include <linux/iommu.h>
-
-#include "gxp-internal.h"
-
-struct gxp_domain_pool {
- struct ida idp; /* ID allocator to keep track of used domains. */
- /*
- * Size of the pool. Can be set to 0, in which case the implementation will fall back to
- * dynamic domain allocation using the IOMMU API directly.
- */
- unsigned int size;
- struct gxp_iommu_domain **array; /* Array holding the pointers to pre-allocated domains. */
- struct gxp_dev *gxp; /* The gxp device used for logging warnings/errors. */
-};
+#include <gcip/gcip-domain-pool.h>
+#include "gxp-dma.h"
/*
* Initializes a domain pool.
@@ -35,19 +22,19 @@ struct gxp_domain_pool {
*
* returns 0 on success or negative error value.
*/
-int gxp_domain_pool_init(struct gxp_dev *gxp, struct gxp_domain_pool *pool,
+int gxp_domain_pool_init(struct gxp_dev *gxp, struct gcip_domain_pool *pool,
unsigned int size);
/*
* Allocates a domain from the pool
* returns NULL on error.
*/
-struct gxp_iommu_domain *gxp_domain_pool_alloc(struct gxp_domain_pool *pool);
+struct gxp_iommu_domain *gxp_domain_pool_alloc(struct gcip_domain_pool *pool);
/* Releases a domain from the pool. */
-void gxp_domain_pool_free(struct gxp_domain_pool *pool, struct gxp_iommu_domain *domain);
+void gxp_domain_pool_free(struct gcip_domain_pool *pool,
+ struct gxp_iommu_domain *gdomain);
/* Cleans up all resources used by the domain pool. */
-void gxp_domain_pool_destroy(struct gxp_domain_pool *pool);
-
+void gxp_domain_pool_destroy(struct gcip_domain_pool *pool);
#endif /* __GXP_DOMAIN_POOL_H__ */
diff --git a/gxp-internal.h b/gxp-internal.h
index 91c78a0..0415163 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -37,7 +37,6 @@ enum gxp_chip_revision {
struct gxp_tpu_mbx_desc {
uint phys_core_list;
size_t cmdq_size, respq_size;
- bool mapped;
};
/* ioremapped resource */
@@ -55,10 +54,10 @@ struct gxp_tpu_dev {
};
/* Forward declarations from submodules */
+struct gcip_domain_pool;
struct gxp_client;
struct gxp_mailbox_manager;
struct gxp_debug_dump_manager;
-struct gxp_domain_pool;
struct gxp_dma_manager;
struct gxp_fw_data_manager;
struct gxp_power_manager;
@@ -116,7 +115,7 @@ struct gxp_dev {
*/
struct device *gsa_dev;
u32 memory_per_core;
- struct gxp_domain_pool *domain_pool;
+ struct gcip_domain_pool *domain_pool;
struct list_head client_list;
struct mutex client_list_lock;
/*
diff --git a/gxp-kci.c b/gxp-kci.c
index 0f93c33..8c76190 100644
--- a/gxp-kci.c
+++ b/gxp-kci.c
@@ -517,8 +517,8 @@ int gxp_kci_shutdown(struct gxp_kci *gkci)
return gxp_kci_send_cmd(gkci->mbx, &cmd);
}
-int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u8 client_id, u8 num_cores,
- u8 slice_index, u8 tpu_client_id, u8 operation)
+int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u32 client_id, u8 num_cores,
+ u8 slice_index)
{
struct gcip_kci_command_element cmd = {
.code = GCIP_KCI_CODE_ALLOCATE_VMBOX,
@@ -535,18 +535,9 @@ int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u8 client_id, u8 num_cores,
return -ENOMEM;
detail = buf.vaddr;
- detail->operation = operation;
detail->client_id = client_id;
-
- if (detail->operation & KCI_ALLOCATE_VMBOX_OP_ALLOCATE_VMBOX) {
- detail->num_cores = num_cores;
- detail->slice_index = slice_index;
- }
-
- if (detail->operation & KCI_ALLOCATE_VMBOX_OP_LINK_OFFLOAD_VMBOX) {
- detail->offload_client_id = tpu_client_id;
- detail->offload_type = KCI_ALLOCATE_VMBOX_OFFLOAD_TYPE_TPU;
- }
+ detail->num_cores = num_cores;
+ detail->slice_index = slice_index;
cmd.dma.address = buf.daddr;
cmd.dma.size = sizeof(*detail);
@@ -557,7 +548,7 @@ int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u8 client_id, u8 num_cores,
return ret;
}
-int gxp_kci_release_vmbox(struct gxp_kci *gkci, u8 client_id)
+int gxp_kci_release_vmbox(struct gxp_kci *gkci, u32 client_id)
{
struct gcip_kci_command_element cmd = {
.code = GCIP_KCI_CODE_RELEASE_VMBOX,
@@ -585,6 +576,39 @@ int gxp_kci_release_vmbox(struct gxp_kci *gkci, u8 client_id)
return ret;
}
+int gxp_kci_link_unlink_offload_vmbox(
+ struct gxp_kci *gkci, u32 client_id, u32 offload_client_id,
+ enum gcip_kci_offload_chip_type offload_chip_type, bool link)
+{
+ struct gcip_kci_command_element cmd = {
+ .code = link ? GCIP_KCI_CODE_LINK_OFFLOAD_VMBOX :
+ GCIP_KCI_CODE_UNLINK_OFFLOAD_VMBOX,
+ };
+ struct gxp_kci_link_unlink_offload_vmbox_detail *detail;
+ struct gxp_mapped_resource buf;
+ int ret;
+
+ if (!gkci || !gkci->mbx)
+ return -ENODEV;
+
+ ret = gxp_mcu_mem_alloc_data(gkci->mcu, &buf, sizeof(*detail));
+ if (ret)
+ return -ENOMEM;
+
+ detail = buf.vaddr;
+ detail->client_id = client_id;
+ detail->offload_client_id = offload_client_id;
+ detail->offload_chip_type = offload_chip_type;
+
+ cmd.dma.address = buf.daddr;
+ cmd.dma.size = sizeof(*detail);
+
+ ret = gxp_kci_send_cmd(gkci->mbx, &cmd);
+ gxp_mcu_mem_free_data(gkci->mcu, &buf);
+
+ return ret;
+}
+
int gxp_kci_notify_throttling(struct gxp_kci *gkci, u32 rate)
{
struct gcip_kci_command_element cmd = {
diff --git a/gxp-kci.h b/gxp-kci.h
index 85669e2..99a2c81 100644
--- a/gxp-kci.h
+++ b/gxp-kci.h
@@ -68,27 +68,8 @@ struct gxp_kci {
/* Used when sending the details about allocate_vmbox KCI command. */
struct gxp_kci_allocate_vmbox_detail {
- /*
- * Operations of command.
- * The operations below can be sent in one command, but also separately according to how
- * the bits of this field are set.
- *
- * Bitfields:
- * [0:0] - Virtual mailbox allocation.
- * 0 = Ignore.
- * 1 = Allocate a virtual mailbox.
- * @client_id, @num_cores and @slice_index are mandatory.
- * [1:1] - Offload virtual mailbox linkage.
- * 0 = Ignore.
- * 1 = Link an offload virtual mailbox.
- * This operation cannot be called before allocating the virtual mailbox
- * for both DSP and offload chip.
- * @client_id, @offload_client_id and @offload_type are mandatory.
- * [7:2] - RESERVED
- */
- u8 operation;
/* Client ID. */
- u8 client_id;
+ u32 client_id;
/* The number of required cores. */
u8 num_cores;
/*
@@ -96,13 +77,6 @@ struct gxp_kci_allocate_vmbox_detail {
* used for MCU<->core mailbox.
*/
u8 slice_index;
- /* Client ID of offload chip. */
- u8 offload_client_id;
- /*
- * Type of offload chip.
- * 0: TPU
- */
- u8 offload_type;
/* Reserved */
u8 reserved[58];
} __packed;
@@ -110,9 +84,24 @@ struct gxp_kci_allocate_vmbox_detail {
/* Used when sending the details about release_vmbox KCI command. */
struct gxp_kci_release_vmbox_detail {
/* Client ID. */
- u8 client_id;
+ u32 client_id;
+ /* Reserved */
+ u8 reserved[60];
+} __packed;
+
+/* Used when sending the details about {link,unlink}_offload_vmbox KCI command. */
+struct gxp_kci_link_unlink_offload_vmbox_detail {
+ /* DSP Client ID. */
+ u32 client_id;
+ /* Client ID of offload mailbox. */
+ u32 offload_client_id;
+ /*
+ * Chip type of offload mailbox.
+ * See enum gcip_kci_offload_chip_type.
+ */
+ u8 offload_chip_type;
/* Reserved */
- u8 reserved[63];
+ u8 reserved[55];
} __packed;
/*
@@ -199,29 +188,16 @@ int gxp_kci_shutdown(struct gxp_kci *gkci);
int gxp_kci_notify_throttling(struct gxp_kci *gkci, u32 rate);
/*
- * Allocates a virtual mailbox to communicate with MCU firmware. According to @operation, it links
- * the TPU virtual mailbox of @tpu_client_id to the DSP client of @client_id to offload TPU
- * commands from the firmware side.
+ * Allocates a virtual mailbox to communicate with MCU firmware.
*
* A new client wants to run a workload on DSP, it needs to allocate a virtual mailbox. Creating
* mailbox will be initiated from the application by calling GXP_ALLOCATE_VIRTUAL_DEVICE ioctl.
- * Allocated virtual mailbox should be released by calling `gxp_kci_release_vmbox`. To allocate a
- * virtual mailbox, @client_id, @num_cores and @slice_index must be passed and @operation must be
- * masked with `KCI_ALLOCATE_VMBOX_OP_ALLOCATE_VMBOX`.
- *
- * To offload TPU commands, the virtual mailbox which is allocated from the TPU side should be
- * linked to the DSP client. Therefore, by passing @client_id which is a client ID of DSP,
- * @tpu_client_id which can be fetched from the TPU driver to this function and masking
- * @operation with `KCI_ALLOCATE_VMBOX_OP_LINK_OFFLOAD_VMBOX`, the TPU virtual mailbox will be
- * linked to the DSP client.
- *
- * Allocating a virtual mailbox and linking a TPU virtual mailbox can be done with the same
- * function call, but also can be done separately. It depends on how @operation is set.
+ * Allocated virtual mailbox should be released by calling `gxp_kci_release_vmbox`.
*
* Returns the code of response, or a negative errno on error.
*/
-int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u8 client_id, u8 num_cores,
- u8 slice_index, u8 tpu_client_id, u8 operation);
+int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u32 client_id, u8 num_cores,
+ u8 slice_index);
/*
* Releases a virtual mailbox which is allocated by `gxp_kci_allocate_vmbox`.
@@ -229,7 +205,22 @@ int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u8 client_id, u8 num_cores,
*
* Returns the code of response, or a negative errno on error.
*/
-int gxp_kci_release_vmbox(struct gxp_kci *gkci, u8 client_id);
+int gxp_kci_release_vmbox(struct gxp_kci *gkci, u32 client_id);
+
+/*
+ * Links or unlinks @client_id (DSP client ID) and @offload_client_id (Client ID of offloading
+ * chip). It will link them if @link is true. Otherwise, it will unlink them.
+ *
+ * Link: Should be called before sending offload commands from DSP to the target chip.
+ * Unlink: Should be called after offloading is completed.
+ *
+ * The type of the target chip should be passed to the @offload_chip_type.
+ *
+ * Returns the code of response, or a negative errno on error.
+ */
+int gxp_kci_link_unlink_offload_vmbox(
+ struct gxp_kci *gkci, u32 client_id, u32 offload_client_id,
+ enum gcip_kci_offload_chip_type offload_chip_type, bool link);
/*
* Send an ack to the FW after handling a reverse KCI request.
diff --git a/gxp-mailbox-driver.c b/gxp-mailbox-driver.c
index 23788f7..40fdba1 100644
--- a/gxp-mailbox-driver.c
+++ b/gxp-mailbox-driver.c
@@ -17,16 +17,6 @@
#include "gxp-mailbox-regs.h"
#include "gxp-mailbox.h"
-static u32 csr_read(struct gxp_mailbox *mailbox, uint reg_offset)
-{
- return readl(mailbox->csr_reg_base + reg_offset);
-}
-
-static void csr_write(struct gxp_mailbox *mailbox, uint reg_offset, u32 value)
-{
- writel(value, mailbox->csr_reg_base + reg_offset);
-}
-
static u32 data_read(struct gxp_mailbox *mailbox, uint reg_offset)
{
return readl(mailbox->data_reg_base + reg_offset);
@@ -159,63 +149,20 @@ void __iomem *gxp_mailbox_get_csr_base(struct gxp_dev *gxp, uint index)
void __iomem *gxp_mailbox_get_data_base(struct gxp_dev *gxp, uint index)
{
- return gxp->mbx[index].vaddr + 0x80;
-}
-
-/* gxp-mailbox-driver.h: CSR-based calls */
-
-void gxp_mailbox_reset_hw(struct gxp_mailbox *mailbox)
-{
- csr_write(mailbox, MBOX_MCUCTLR_OFFSET, 1);
-}
-
-void gxp_mailbox_generate_device_interrupt(struct gxp_mailbox *mailbox,
- u32 int_mask)
-{
- /*
- * Ensure all memory writes have been committed to memory before
- * signalling to the device to read from them. This avoids the scenario
- * where the interrupt trigger write gets delivered to the MBX HW before
- * the DRAM transactions made it to DRAM since they're Normal
- * transactions and can be re-ordered and backed off behind other
- * transfers.
- */
- wmb();
-
- csr_write(mailbox, MBOX_INTGR0_OFFSET, int_mask);
-}
-
-u32 gxp_mailbox_get_device_mask_status(struct gxp_mailbox *mailbox)
-{
- return csr_read(mailbox, MBOX_INTMSR0_OFFSET);
-}
-
-void gxp_mailbox_clear_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
-{
- csr_write(mailbox, MBOX_INTCR1_OFFSET, int_mask);
-}
-
-void gxp_mailbox_mask_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
-{
- csr_write(mailbox, MBOX_INTMR1_OFFSET, int_mask);
-}
-
-u32 gxp_mailbox_get_host_mask_status(struct gxp_mailbox *mailbox)
-{
- return csr_read(mailbox, MBOX_INTMSR1_OFFSET);
+ return gxp->mbx[index].vaddr + MBOX_DATA_REG_BASE;
}
/* gxp-mailbox-driver.h: Data register-based calls */
void gxp_mailbox_write_status(struct gxp_mailbox *mailbox, u32 status)
{
- data_write(mailbox, MBOX_STATUS_OFFSET, status);
+ data_write(mailbox, MBOX_DATA_STATUS_OFFSET, status);
}
void gxp_mailbox_write_descriptor(struct gxp_mailbox *mailbox,
dma_addr_t descriptor_addr)
{
- data_write(mailbox, MBOX_DESCRIPTOR_ADDR_OFFSET, (u32)descriptor_addr);
+ data_write(mailbox, MBOX_DATA_DESCRIPTOR_ADDR_OFFSET, (u32)descriptor_addr);
}
void gxp_mailbox_write_cmd_queue_tail(struct gxp_mailbox *mailbox, u16 val)
@@ -226,10 +173,10 @@ void gxp_mailbox_write_cmd_queue_tail(struct gxp_mailbox *mailbox, u16 val)
spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
- current_resp_head = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET) &
+ current_resp_head = data_read(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET) &
RESP_HEAD_MASK;
new_cmd_tail = (u32)val << CMD_TAIL_SHIFT;
- data_write(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET,
+ data_write(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET,
new_cmd_tail | current_resp_head);
spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
@@ -243,10 +190,10 @@ void gxp_mailbox_write_resp_queue_head(struct gxp_mailbox *mailbox, u16 val)
spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
- current_cmd_tail = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET) &
+ current_cmd_tail = data_read(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET) &
CMD_TAIL_MASK;
new_resp_head = (u32)val << RESP_HEAD_SHIFT;
- data_write(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET,
+ data_write(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET,
current_cmd_tail | new_resp_head);
spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
@@ -259,7 +206,7 @@ u16 gxp_mailbox_read_cmd_queue_head(struct gxp_mailbox *mailbox)
spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
- reg_val = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET);
+ reg_val = data_read(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET);
spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
@@ -273,7 +220,7 @@ u16 gxp_mailbox_read_resp_queue_tail(struct gxp_mailbox *mailbox)
spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
- reg_val = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET);
+ reg_val = data_read(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET);
spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
@@ -288,10 +235,10 @@ void gxp_mailbox_write_cmd_queue_head(struct gxp_mailbox *mailbox, u16 val)
spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
- current_resp_tail = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET) &
+ current_resp_tail = data_read(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET) &
RESP_TAIL_MASK;
new_cmd_head = (u32)val << CMD_HEAD_SHIFT;
- data_write(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET,
+ data_write(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET,
new_cmd_head | current_resp_tail);
spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
@@ -305,10 +252,10 @@ void gxp_mailbox_write_resp_queue_tail(struct gxp_mailbox *mailbox, u16 val)
spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
- current_cmd_head = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET) &
+ current_cmd_head = data_read(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET) &
CMD_HEAD_MASK;
new_resp_tail = (u32)val << RESP_TAIL_SHIFT;
- data_write(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET,
+ data_write(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET,
current_cmd_head | new_resp_tail);
spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
@@ -321,7 +268,7 @@ u16 gxp_mailbox_read_cmd_queue_tail(struct gxp_mailbox *mailbox)
spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
- reg_val = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET);
+ reg_val = data_read(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET);
spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
@@ -335,7 +282,7 @@ u16 gxp_mailbox_read_resp_queue_head(struct gxp_mailbox *mailbox)
spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
- reg_val = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET);
+ reg_val = data_read(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET);
spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
diff --git a/gxp-mailbox-regs.h b/gxp-mailbox-regs.h
index 5d83b5e..05fb414 100644
--- a/gxp-mailbox-regs.h
+++ b/gxp-mailbox-regs.h
@@ -7,34 +7,9 @@
#ifndef __GXP_MAILBOX_REGS_H__
#define __GXP_MAILBOX_REGS_H__
-/* Mailbox CSRs */
-#define MBOX_MCUCTLR_OFFSET 0x0000
-
-#define MBOX_INTGR0_OFFSET 0x0020
-#define MBOX_INTCR0_OFFSET 0x0024
-#define MBOX_INTMR0_OFFSET 0x0028
-#define MBOX_INTSR0_OFFSET 0x002C
-#define MBOX_INTMSR0_OFFSET 0x0030
-
-#define MBOX_INTGR1_OFFSET 0x0040
-#define MBOX_INTCR1_OFFSET 0x0044
-#define MBOX_INTMR1_OFFSET 0x0048
-#define MBOX_INTSR1_OFFSET 0x004C
-#define MBOX_INTMSR1_OFFSET 0x0050
-
-/* Mailbox Shared Data Registers */
-#define MBOX_DATA_REG_BASE 0x0080
-
-#define MBOX_STATUS_OFFSET 0x00
-#define MBOX_DESCRIPTOR_ADDR_OFFSET 0x04
-#define MBOX_CMD_TAIL_RESP_HEAD_OFFSET 0x08
-#define MBOX_CMD_HEAD_RESP_TAIL_OFFSET 0x0C
-
-#define MBOX_REGS_SIZE 0x180
-
/*
* Macros for separating out the command queue tail and response queue head in
- * the `MBOX_CMD_TAIL_RESP_HEAD_OFFSET` register.
+ * the `MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET` register.
*/
#define CMD_TAIL_SHIFT 16
#define RESP_HEAD_SHIFT 0
@@ -43,7 +18,7 @@
/*
* Macros for separating out the command queue head and response queue tail in
- * the `MBOX_CMD_HEAD_RESP_TAIL_OFFSET` register.
+ * the `MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET` register.
*/
#define CMD_HEAD_SHIFT 16
#define RESP_TAIL_SHIFT 0
diff --git a/gxp-mcu-platform.c b/gxp-mcu-platform.c
new file mode 100644
index 0000000..852596b
--- /dev/null
+++ b/gxp-mcu-platform.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Platform device driver for devices with MCU support.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/moduleparam.h>
+
+#include "gxp-internal.h"
+#include "gxp-mcu-fs.h"
+#include "gxp-mcu-platform.h"
+#include "gxp-mcu.h"
+#include "gxp-usage-stats.h"
+
+#if IS_ENABLED(CONFIG_GXP_TEST)
+char *gxp_work_mode_name = "direct";
+#else
+static char *gxp_work_mode_name = "direct";
+#endif
+
+module_param_named(work_mode, gxp_work_mode_name, charp, 0660);
+
+static char *chip_rev = "a0";
+module_param(chip_rev, charp, 0660);
+
+struct gxp_mcu *gxp_mcu_of(struct gxp_dev *gxp)
+{
+ return &(to_mcu_dev(gxp)->mcu);
+}
+
+struct gxp_mcu_firmware *gxp_mcu_firmware_of(struct gxp_dev *gxp)
+{
+ return &(gxp_mcu_of(gxp)->fw);
+}
+
+bool gxp_is_direct_mode(struct gxp_dev *gxp)
+{
+ struct gxp_mcu_dev *mcu_dev = to_mcu_dev(gxp);
+
+ return mcu_dev->mode == DIRECT;
+}
+
+enum gxp_chip_revision gxp_get_chip_revision(struct gxp_dev *gxp)
+{
+ if (!strcmp(chip_rev, "a0"))
+ return GXP_CHIP_A0;
+ if (!strcmp(chip_rev, "b0"))
+ return GXP_CHIP_B0;
+ return GXP_CHIP_ANY;
+}
+
+int gxp_mcu_platform_after_probe(struct gxp_dev *gxp)
+{
+ if (gxp_is_direct_mode(gxp))
+ return 0;
+
+ gxp_usage_stats_init(gxp);
+ return gxp_mcu_init(gxp, gxp_mcu_of(gxp));
+}
+
+void gxp_mcu_platform_before_remove(struct gxp_dev *gxp)
+{
+ if (gxp_is_direct_mode(gxp))
+ return;
+
+ gxp_mcu_exit(gxp_mcu_of(gxp));
+ gxp_usage_stats_exit(gxp);
+}
+
+void gxp_mcu_dev_init(struct gxp_mcu_dev *mcu_dev)
+{
+ struct gxp_dev *gxp = &mcu_dev->gxp;
+
+ mcu_dev->mode = gxp_dev_parse_work_mode(gxp_work_mode_name);
+ gxp->after_probe = gxp_mcu_platform_after_probe;
+ gxp->before_remove = gxp_mcu_platform_before_remove;
+ gxp->handle_ioctl = gxp_mcu_ioctl;
+ gxp->handle_mmap = gxp_mcu_mmap;
+}
+
+enum gxp_work_mode gxp_dev_parse_work_mode(const char *work_mode)
+{
+ if (!strcmp(work_mode, "mcu"))
+ return MCU;
+ return DIRECT;
+}
diff --git a/gxp-mcu-platform.h b/gxp-mcu-platform.h
new file mode 100644
index 0000000..6ae923a
--- /dev/null
+++ b/gxp-mcu-platform.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Platform device driver for devices with MCU support.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GXP_MCU_PLATFORM_H__
+#define __GXP_MCU_PLATFORM_H__
+
+#include "gxp-internal.h"
+#include "gxp-mcu.h"
+
+#define to_mcu_dev(gxp) container_of(gxp, struct gxp_mcu_dev, gxp)
+
+#if IS_ENABLED(CONFIG_GXP_TEST)
+/* expose this variable to have unit tests set it dynamically */
+extern char *gxp_work_mode_name;
+#endif
+
+enum gxp_work_mode {
+ MCU = 0,
+ DIRECT = 1,
+};
+
+/* GXP device with MCU support. */
+struct gxp_mcu_dev {
+ struct gxp_dev gxp;
+ struct gxp_mcu mcu;
+ enum gxp_work_mode mode;
+};
+
+/*
+ * Initializes MCU structures.
+ * @gxp must be the field embedded in gxp_mcu_dev.
+ * It's expected to be called from the common driver probe function.
+ *
+ * Returns 0 on success, -errno otherwise.
+ */
+int gxp_mcu_platform_after_probe(struct gxp_dev *gxp);
+/* Reverts gxp_mcu_platform_after_probe. */
+void gxp_mcu_platform_before_remove(struct gxp_dev *gxp);
+
+/*
+ * Initializes @mcu_dev.
+ *
+ * It's expected to be called after allocation, before the common platform probe.
+ */
+void gxp_mcu_dev_init(struct gxp_mcu_dev *mcu_dev);
+
+enum gxp_work_mode gxp_dev_parse_work_mode(const char *work_mode);
+
+#endif /* __GXP_MCU_PLATFORM_H__ */
diff --git a/gxp.h b/gxp.h
index d4fb160..c6a05ea 100644
--- a/gxp.h
+++ b/gxp.h
@@ -45,6 +45,12 @@
/* Create coherent mappings of the buffer. */
#define GXP_MAP_COHERENT (1 << 2)
+/* To check whether the driver is working in MCU mode. */
+#define GXP_SPEC_FEATURE_MODE_MCU (1 << 0)
+
+/* Core telemetry buffer size is a multiple of 64 kB */
+#define GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE 0x10000u
+
struct gxp_map_ioctl {
/*
* Deprecated. All virtual cores will be mapped.
@@ -184,11 +190,27 @@ struct gxp_mailbox_response_ioctl {
struct gxp_specs_ioctl {
/* Maximum number of cores that can be allocated to a virtual device */
__u8 core_count;
+ /*
+ * A field to indicate the features or modes the device supports.
+ * Bitfields:
+ * [0:0] - Mode:
+ * 0 = direct mode
+ * 1 = MCU mode
+ * [7:1] - RESERVED
+ */
+ __u8 features;
+ /*
+ * Size of per core allocated telemetry buffer represented in units
+ * of GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE.
+ */
+ __u8 telemetry_buffer_size;
+ /*
+ * Size of per core reserved secure telemetry buffer represented in
+ * units of GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE.
+ */
+ __u8 secure_telemetry_buffer_size;
/* Deprecated fields that should be ignored */
- __u16 reserved_0;
- __u16 reserved_1;
- __u16 reserved_2;
- __u8 reserved_3;
+ __u8 reserved[8];
/*
* Amount of "tightly-coupled memory" or TCM available to each core.
* The value returned will be in kB, or 0 if the value was not