summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurora zuma automerger <aurora-zuma-automerger@google.com>2023-01-03 05:58:02 +0000
committerCopybara-Service <copybara-worker@google.com>2023-01-02 23:01:14 -0800
commitf9810bce749c240728b36e833e58aea43291512e (patch)
tree9539365033e83cbf53b147d46280166306b1b1b0
parentb730638009731c92fbf98db250aa1a43a4305b99 (diff)
downloadzuma-f9810bce749c240728b36e833e58aea43291512e.tar.gz
gxp: [Copybara Auto Merge] Merge branch 'zuma' into 'android14-gs-pixel-5.15'
gxp: remove debugfs earlier than before_remove Bug: 263830035 gxp: adopt updated gxp_client_allocate_virtual_device gxp: debugfs utilizes UCI in the MCU mode Bug: 263830035 (repeat) gxp: call gxp_client_* funcs from firmware_run debugfs Bug: 263830035 (repeat) gxp: apply clang-format to gxp-debugfs.c gxp: implement execute_cmd callback of UCI Bug: 263830035 (repeat) gxp: update the interface of execute_cmd callback Bug: 263830035 (repeat) gxp: power off core on mbox allocation failure Bug: 264184974 gxp: Set special client ID to secure VD Bug: 263685745 gxp: Allocate secure VD Bug: 263836991 gxp: Add a secure VD field to struct gxp_dev Bug: 263685535 gxp: Add 'flags' to struct gxp_virtual_device_ioctl Bug: 263836981 gxp: Remove warnings during compilation Bug: 264010198 gxp: cleanup common platform probe Bug: 263844135 gxp: amalthea use GCIP Bug: 263918299 gxp: update Makefile for kleaf support gxp: callisto add BUILD.bazel Bug: 263851072 gxp: increase opaque size of UCI structs Bug: 261667704 gxp: use PMU register Bug: 263830026 gcip: implement noncontiguous alloc Bug: 262684159 GCIP_MAIN_REV_ID: bbc5d495ddf833d94516c84963265f76eb500d51 gcip: add gcip-alloc-helper.h Bug: 262684159 (repeat) gcip: Update the comments in gcip-image-config for new encoding Bug: 257300340 GCIP_HEADERS_REV_ID: 37a282fd7aad536dc4521a908468bc9557911a19 gxp: map private firmware data region Bug: 261797596 gxp: introduce gxp_dma_map_iova_sgt Bug: 262825536 gxp: allocate per VD firmware data Bug: 261797596 (repeat) gxp: Correct the lpm offsets calculations Bug: 263239197 gxp: Move shareability config with block power configuration Bug: 263310466 gcip: Change hard-coded magic numbers to MACROs Bug: 257300340 (repeat) GCIP_MAIN_REV_ID: 52b24e3fd921807bed5b6f4db209066432b50776 gxp: Enable new telemetry Bug: 247955426 gxp: enable dynamic slice index Bug: 255706432 gxp: Add support for core_boot insmod parameter Bug: 251612313 gxp: Add first_open support to VMBox Bug: 263296400 gxp: add new UCI ioctl handlers Bug: 261667704 (repeat) gxp: replace retval with opaque Bug: 261667704 (repeat) gxp: rename UCI struct fields Bug: 261667704 (repeat) gcip: Update the size encoding of image config Bug: 257300340 (repeat) GCIP_MAIN_REV_ID: 5329d7fa9d3555c6339a31e654bd6a481647ccbf gxp: new UCI ioctl interfaces Bug: 261667704 (repeat) gxp: remove unused virt_core in vd_suspend gxp: callisto update INT/MIF frequencies Bug: 262964769 gxp: move system memory rates to config files Bug: 262964769 (repeat) gxp: change shared slice size to 32K Bug: 262686729 gcip: add reference count to the awaiter Bug: 261822585 gcip: introduce gcip_mailbox_cancel_awaiter Bug: 261822585 (repeat) GCIP_HEADERS_REV_ID: a44390f341770fd598141aa3079b73dd4a67af2b gcip: unittests: implement timeout race trigger Bug: 261822585 (repeat) gcip: implement reference count to the awaiter Bug: 261822585 (repeat) gcip: implement gcip_mailbox_cancel_awaiter Bug: 261822585 (repeat) GCIP_MAIN_REV_ID: a65dcc3b7e612ba2d89b509eb0b6c316159bbee8 gxp: increase shared buffer size from 192K to 512K Bug: 262686729 (repeat) gxp: unittests: test vd releases without waiting resps Bug: 261822585 (repeat) gxp: clean up waiting queue when releasing vd Bug: 261822585 (repeat) gxp: pop UCI async_resps from the wait queue Bug: 261822585 (repeat) gxp: push UCI async_resps into the wait queue Bug: 261822585 (repeat) gxp: introduce wait_queue for waiting responses Bug: 261822585 (repeat) gxp: rename list_entry to dest_list_entry Bug: 261822585 (repeat) gxp: rename queue to dest_queue Bug: 261822585 (repeat) GitOrigin-RevId: d6516a95b7cda7fb8f3f7d1ceef904920bb401be Change-Id: I07e6d97b392a1584dd6af68fc71782f89450c8c3
-rw-r--r--Makefile10
-rw-r--r--callisto-platform.c41
-rw-r--r--callisto/config-pwr-state.h18
-rw-r--r--callisto/iova.h5
-rw-r--r--callisto/lpm.h11
-rw-r--r--gcip-kernel-driver/drivers/gcip/Makefile5
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c92
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-image-config.c36
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-mailbox.c80
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-alloc-helper.h50
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-image-config.h5
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-mailbox.h39
-rw-r--r--gxp-client.c36
-rw-r--r--gxp-client.h4
-rw-r--r--gxp-common-platform.c234
-rw-r--r--gxp-core-telemetry.c69
-rw-r--r--gxp-dci.c22
-rw-r--r--gxp-debug-dump.c97
-rw-r--r--gxp-debugfs.c142
-rw-r--r--gxp-dma-iommu.c37
-rw-r--r--gxp-dma.h31
-rw-r--r--gxp-firmware.c101
-rw-r--r--gxp-firmware.h4
-rw-r--r--gxp-internal.h5
-rw-r--r--gxp-kci.c3
-rw-r--r--gxp-kci.h6
-rw-r--r--gxp-lpm.h6
-rw-r--r--gxp-mailbox-manager.h16
-rw-r--r--gxp-mcu-fs.c122
-rw-r--r--gxp-mcu.c2
-rw-r--r--gxp-pm.c79
-rw-r--r--gxp-pm.h11
-rw-r--r--gxp-uci.c196
-rw-r--r--gxp-uci.h39
-rw-r--r--gxp-vd.c49
-rw-r--r--gxp-vd.h20
-rw-r--r--gxp.h81
37 files changed, 1374 insertions, 430 deletions
diff --git a/Makefile b/Makefile
index 59a2c47..be9dd50 100644
--- a/Makefile
+++ b/Makefile
@@ -37,8 +37,6 @@ gxp-objs += \
ifeq ($(GXP_CHIP),CALLISTO)
-USE_GCIP := TRUE
-
gxp-objs += \
callisto-platform.o \
gsx01-mailbox-driver.o \
@@ -58,12 +56,10 @@ EDGETPU_CHIP := rio
endif
ifeq ($(CONFIG_$(GXP_CHIP)),m)
-ifeq ($(USE_GCIP),TRUE)
gxp-objs += $(GCIP_DIR)/gcip.o
endif
-endif
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
include $(KERNEL_SRC)/../private/google-modules/soc/gs/Makefile.include
@@ -100,11 +96,6 @@ KBUILD_OPTIONS += GXP_CHIP=$(GXP_CHIP) GXP_PLATFORM=$(GXP_PLATFORM)
# Access TPU driver's exported symbols.
EXTRA_SYMBOLS += $(OUT_DIR)/../private/google-modules/edgetpu/$(EDGETPU_CHIP)/drivers/edgetpu/Module.symvers
-ifneq ($(USE_GCIP),TRUE)
-modules modules_install clean:
- $(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 $(KBUILD_OPTIONS) \
- EXTRA_CFLAGS="$(EXTRA_CFLAGS)" KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" $(@)
-else
modules modules_install:
$(MAKE) -C $(KERNEL_SRC) M=$(M)/$(GCIP_DIR) gcip.o
$(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 $(KBUILD_OPTIONS) \
@@ -112,4 +103,3 @@ modules modules_install:
clean:
$(MAKE) -C $(KERNEL_SRC) M=$(M)/$(GCIP_DIR) $(@)
$(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 $(KBUILD_OPTIONS) $(@)
-endif
diff --git a/callisto-platform.c b/callisto-platform.c
index 8600be5..46a2e29 100644
--- a/callisto-platform.c
+++ b/callisto-platform.c
@@ -17,22 +17,17 @@
#include "gxp-common-platform.c"
-static int gxp_mmu_set_shareability(struct device *dev, u32 reg_base)
+void gxp_iommu_setup_shareability(struct gxp_dev *gxp)
{
- void __iomem *addr = ioremap(reg_base, PAGE_SIZE);
+ void __iomem *addr = gxp->sysreg_shareability;
- if (!addr) {
- dev_err(dev, "sysreg ioremap failed\n");
- return -ENOMEM;
- }
+ if (IS_ERR_OR_NULL(addr))
+ return;
writel_relaxed(SHAREABLE_WRITE | SHAREABLE_READ | INNER_SHAREABLE,
addr + GXP_SYSREG_AUR0_SHAREABILITY);
writel_relaxed(SHAREABLE_WRITE | SHAREABLE_READ | INNER_SHAREABLE,
addr + GXP_SYSREG_AUR1_SHAREABILITY);
- iounmap(addr);
-
- return 0;
}
static int callisto_platform_parse_dt(struct platform_device *pdev,
@@ -40,9 +35,9 @@ static int callisto_platform_parse_dt(struct platform_device *pdev,
{
struct resource *r;
void *addr;
+ struct device *dev = gxp->dev;
int ret;
u32 reg;
- struct device *dev = gxp->dev;
/*
* Setting BAAW is required for having correct base for CSR accesses.
@@ -71,7 +66,9 @@ static int callisto_platform_parse_dt(struct platform_device *pdev,
"gxp,shareability", 0, &reg);
if (ret)
goto err;
- ret = gxp_mmu_set_shareability(dev, reg);
+ gxp->sysreg_shareability = devm_ioremap(dev, reg, PAGE_SIZE);
+ if (!gxp->sysreg_shareability)
+ ret = -ENOMEM;
err:
if (ret)
dev_warn(dev, "Failed to enable shareability: %d\n", ret);
@@ -94,12 +91,12 @@ static int callisto_request_power_states(struct gxp_client *client,
cmd.wakelock_command_params.dsp_operating_point = power_states.power + 1;
cmd.wakelock_command_params.memory_operating_point = power_states.memory;
cmd.type = WAKELOCK_COMMAND;
- cmd.priority = 0; /* currently unused */
cmd.client_id = client->vd->client_id;
ret = gxp_uci_send_command(
&mcu->uci, client->vd, &cmd,
- &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].queue,
+ &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].wait_queue,
+ &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].dest_queue,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].waitq,
client->mb_eventfds[UCI_RESOURCE_ID]);
@@ -109,17 +106,20 @@ static int callisto_request_power_states(struct gxp_client *client,
static int allocate_vmbox(struct gxp_dev *gxp, struct gxp_virtual_device *vd)
{
struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci);
- int pasid, ret;
+ int client_id, ret;
+
+ if (vd->is_secure)
+ client_id = SECURE_CLIENT_ID;
+ else
+ client_id = gxp_iommu_aux_get_pasid(gxp, vd->domain);
- pasid = gxp_iommu_aux_get_pasid(gxp, vd->domain);
- /* TODO(b/255706432): Adopt vd->slice_index after the firmware supports this. */
- ret = gxp_kci_allocate_vmbox(kci, pasid, vd->num_cores,
- /*slice_index=*/0);
+ ret = gxp_kci_allocate_vmbox(kci, client_id, vd->num_cores,
+ vd->slice_index, vd->first_open);
if (ret) {
if (ret != GCIP_KCI_ERROR_UNIMPLEMENTED) {
dev_err(gxp->dev,
"Failed to allocate VMBox for client %d, TPU client %d: %d",
- pasid, vd->tpu_client_id, ret);
+ client_id, vd->tpu_client_id, ret);
return ret;
}
@@ -132,7 +132,8 @@ static int allocate_vmbox(struct gxp_dev *gxp, struct gxp_virtual_device *vd)
"Allocating VMBox is not implemented from the firmware side");
}
- vd->client_id = pasid;
+ vd->client_id = client_id;
+ vd->first_open = false;
return 0;
}
diff --git a/callisto/config-pwr-state.h b/callisto/config-pwr-state.h
index 1a51874..5558765 100644
--- a/callisto/config-pwr-state.h
+++ b/callisto/config-pwr-state.h
@@ -20,4 +20,22 @@ enum aur_power_rate {
AUR_UD_PLUS_RATE = 861000,
};
+enum aur_mem_int_rate {
+ AUR_MEM_INT_MIN = 0,
+ AUR_MEM_INT_VERY_LOW = 0,
+ AUR_MEM_INT_LOW = 356000,
+ AUR_MEM_INT_HIGH = 664000,
+ AUR_MEM_INT_VERY_HIGH = 799000,
+ AUR_MEM_INT_MAX = 1066000,
+};
+
+enum aur_mem_mif_rate {
+ AUR_MEM_MIF_MIN = 0,
+ AUR_MEM_MIF_VERY_LOW = 0,
+ AUR_MEM_MIF_LOW = 1014000,
+ AUR_MEM_MIF_HIGH = 1352000,
+ AUR_MEM_MIF_VERY_HIGH = 2028000,
+ AUR_MEM_MIF_MAX = 3744000,
+};
+
#endif /* __CALLISTO_CONFIG_PWR_STATE_H__ */
diff --git a/callisto/iova.h b/callisto/iova.h
index 95f71e6..496d863 100644
--- a/callisto/iova.h
+++ b/callisto/iova.h
@@ -13,9 +13,10 @@
#define GXP_IOVA_EXT_TPU_MBX (0x1A050000)
#define GXP_IOVA_FIRMWARE(_x_) (0xFA000000 + (_x_) * 0x00100000)
#define GXP_IOVA_SHARED_BUFFER (0xFA3A8000)
-#define GXP_SHARED_BUFFER_SIZE (0x00010000) /* 64K, per core */
-#define GXP_SHARED_SLICE_SIZE (0x00001000) /* 4K, per core */
+#define GXP_SHARED_BUFFER_SIZE (0x00080000) /* 512K */
+#define GXP_SHARED_SLICE_SIZE (0x00008000) /* 32K, per VD */
#define GXP_IOVA_FW_DATA (0xFA400000)
+#define GXP_IOVA_PRIV_FW_DATA (0xFA500000)
#define GXP_IOVA_TPU_MBX_BUFFER(_x_) (0xFE100000 + (_x_) * 0x00040000)
/* IOVAs for MCU firmware */
diff --git a/callisto/lpm.h b/callisto/lpm.h
index 6c1bde9..a2716db 100644
--- a/callisto/lpm.h
+++ b/callisto/lpm.h
@@ -28,12 +28,13 @@ enum lpm_psm_csrs {
LPM_REG_ENABLE_STATE_3 = 0x380,
};
+/* offset from GXP_LPM_BASE */
enum lpm_psm_base {
- GXP_REG_LPM_PSM_0 = 0x41000,
- GXP_REG_LPM_PSM_1 = 0x42000,
- GXP_REG_LPM_PSM_2 = 0x43000,
- GXP_REG_LPM_PSM_3 = 0x44000,
- GXP_REG_LPM_PSM_4 = 0x45000,
+ GXP_REG_LPM_PSM_0 = 0x1000,
+ GXP_REG_LPM_PSM_1 = 0x2000,
+ GXP_REG_LPM_PSM_2 = 0x3000,
+ GXP_REG_LPM_PSM_3 = 0x4000,
+ GXP_REG_LPM_PSM_4 = 0x5000,
};
#define PSM_STATE_TABLE_SZ (LPM_REG_ENABLE_STATE_1 - LPM_REG_ENABLE_STATE_0)
diff --git a/gcip-kernel-driver/drivers/gcip/Makefile b/gcip-kernel-driver/drivers/gcip/Makefile
index 2f34448..c3424ee 100644
--- a/gcip-kernel-driver/drivers/gcip/Makefile
+++ b/gcip-kernel-driver/drivers/gcip/Makefile
@@ -6,8 +6,9 @@
CONFIG_GCIP ?= m
obj-$(CONFIG_GCIP) += gcip.o
-gcip-objs := gcip-domain-pool.o gcip-firmware.o gcip-image-config.o gcip-kci.o \
- gcip-mailbox.o gcip-mem-pool.o gcip-telemetry.o
+gcip-objs := gcip-alloc-helper.o gcip-domain-pool.o gcip-firmware.o \
+ gcip-image-config.o gcip-kci.o gcip-mailbox.o gcip-mem-pool.o \
+ gcip-telemetry.o
CURRENT_DIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c b/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
new file mode 100644
index 0000000..f79149f
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP helpers for allocating memories.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <asm/page.h>
+#include <linux/device.h>
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <gcip/gcip-alloc-helper.h>
+
+/*
+ * Set @pages to the pages @mem represents.
+ * @mem must be a pointer returned by vmalloc.
+ *
+ * Returns 0 on success, -ENOMEM when any page is NULL.
+ */
+static int gcip_vmalloc_to_pages(void *mem, size_t count, struct page **pages)
+{
+ size_t i = 0;
+
+ while (count--) {
+ pages[i++] = vmalloc_to_page(mem);
+ if (!pages[i - 1])
+ return -ENOMEM;
+ mem += PAGE_SIZE;
+ }
+ return 0;
+}
+
+struct sg_table *gcip_alloc_noncontiguous(struct device *dev, size_t size, gfp_t gfp)
+{
+ struct gcip_sgt_handle *sh = kmalloc(sizeof(*sh), gfp);
+ void *mem;
+ struct page **pages;
+ size_t count;
+ int ret;
+
+ if (!sh)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+ count = size >> PAGE_SHIFT;
+ mem = vzalloc_node(size, dev_to_node(dev));
+ if (!mem) {
+ dev_err(dev, "GCIP noncontiguous alloc size=%#zx failed", size);
+ goto err_free_sh;
+ }
+
+ pages = kmalloc_array(count, sizeof(*pages), gfp);
+ if (!pages) {
+ dev_err(dev, "GCIP alloc pages array count=%zu failed", count);
+ goto err_free_mem;
+ }
+
+ if (gcip_vmalloc_to_pages(mem, count, pages)) {
+ dev_err(dev, "convert memory to pages failed");
+ goto err_free_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(&sh->sgt, pages, count, 0, size, gfp);
+ if (ret) {
+ dev_err(dev, "alloc SG table with size=%#zx failed: %d", size, ret);
+ goto err_free_pages;
+ }
+
+ kfree(pages);
+ sh->mem = mem;
+ return &sh->sgt;
+
+err_free_pages:
+ kfree(pages);
+err_free_mem:
+ vfree(mem);
+err_free_sh:
+ kfree(sh);
+ return NULL;
+}
+
+void gcip_free_noncontiguous(struct sg_table *sgt)
+{
+ struct gcip_sgt_handle *sh = container_of(sgt, struct gcip_sgt_handle, sgt);
+
+ sg_free_table(&sh->sgt);
+ vfree(sh->mem);
+ kfree(sh);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-image-config.c b/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
index 47c7de0..5fed69c 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
@@ -12,13 +12,27 @@
#include <gcip/gcip-image-config.h>
-#define SIZE_MASK 0xfff
+#define ADDR_SHIFT 12
+#define SIZE_MODE_BIT BIT(ADDR_SHIFT - 1)
+#define SECURE_SIZE_MASK (SIZE_MODE_BIT - 1u)
+#define NS_SIZE_MASK (BIT(ADDR_SHIFT) - 1u)
+#define ADDR_MASK ~(BIT(ADDR_SHIFT) - 1u)
+
+/* used by ns_iommu_mappings */
+#define CONFIG_TO_MBSIZE(a) (((a)&NS_SIZE_MASK) << 20)
/* used by iommu_mappings */
-#define CONFIG_TO_SIZE(a) ((1U << ((a) & SIZE_MASK)) << 12)
+static inline __u32 config_to_size(__u32 cfg)
+{
+ __u32 page_size;
-/* used by ns_iommu_mappings */
-#define CONFIG_TO_MBSIZE(a) (((a) & SIZE_MASK) << 20)
+ if (cfg & SIZE_MODE_BIT)
+ page_size = cfg & SECURE_SIZE_MASK;
+ else
+ page_size = BIT(cfg & SECURE_SIZE_MASK);
+
+ return page_size << PAGE_SHIFT;
+}
static int setup_iommu_mappings(struct gcip_image_config_parser *parser,
struct gcip_image_config *config)
@@ -35,8 +49,8 @@ static int setup_iommu_mappings(struct gcip_image_config_parser *parser,
ret = -EIO;
goto err;
}
- size = CONFIG_TO_SIZE(config->iommu_mappings[i].image_config_value);
- paddr = config->iommu_mappings[i].image_config_value & ~SIZE_MASK;
+ size = config_to_size(config->iommu_mappings[i].image_config_value);
+ paddr = config->iommu_mappings[i].image_config_value & ADDR_MASK;
dev_dbg(parser->dev, "Image config adding IOMMU mapping: %pad -> %pap", &daddr,
&paddr);
@@ -60,7 +74,7 @@ static int setup_iommu_mappings(struct gcip_image_config_parser *parser,
err:
while (i--) {
daddr = config->iommu_mappings[i].virt_address;
- size = CONFIG_TO_SIZE(config->iommu_mappings[i].image_config_value);
+ size = config_to_size(config->iommu_mappings[i].image_config_value);
parser->ops->unmap(parser->data, daddr, size, GCIP_IMAGE_CONFIG_FLAGS_SECURE);
}
return ret;
@@ -75,7 +89,7 @@ static void clear_iommu_mappings(struct gcip_image_config_parser *parser,
for (i = config->num_iommu_mappings - 1; i >= 0; i--) {
daddr = config->iommu_mappings[i].virt_address;
- size = CONFIG_TO_SIZE(config->iommu_mappings[i].image_config_value);
+ size = config_to_size(config->iommu_mappings[i].image_config_value);
dev_dbg(parser->dev, "Image config removing IOMMU mapping: %pad size=%#lx", &daddr,
size);
parser->ops->unmap(parser->data, daddr, size, GCIP_IMAGE_CONFIG_FLAGS_SECURE);
@@ -91,7 +105,7 @@ static int setup_ns_iommu_mappings(struct gcip_image_config_parser *parser,
phys_addr_t paddr = 0;
for (i = 0; i < config->num_ns_iommu_mappings; i++) {
- daddr = config->ns_iommu_mappings[i] & ~SIZE_MASK;
+ daddr = config->ns_iommu_mappings[i] & ADDR_MASK;
if (unlikely(!daddr)) {
dev_warn(parser->dev, "Invalid config, device address is zero");
ret = -EIO;
@@ -115,7 +129,7 @@ static int setup_ns_iommu_mappings(struct gcip_image_config_parser *parser,
err:
while (i--) {
size = CONFIG_TO_MBSIZE(config->ns_iommu_mappings[i]);
- daddr = config->ns_iommu_mappings[i] & ~SIZE_MASK;
+ daddr = config->ns_iommu_mappings[i] & ADDR_MASK;
parser->ops->unmap(parser->data, daddr, size, 0);
}
return ret;
@@ -130,7 +144,7 @@ static void clear_ns_iommu_mappings(struct gcip_image_config_parser *parser,
for (i = config->num_ns_iommu_mappings - 1; i >= 0; i--) {
size = CONFIG_TO_MBSIZE(config->ns_iommu_mappings[i]);
- daddr = config->ns_iommu_mappings[i] & ~SIZE_MASK;
+ daddr = config->ns_iommu_mappings[i] & ADDR_MASK;
dev_dbg(parser->dev, "Image config removing NS IOMMU mapping: %pad size=%#lx",
&daddr, size);
parser->ops->unmap(parser->data, daddr, size, 0);
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
index fc89d4d..cbb3c80 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
@@ -13,6 +13,14 @@
#include <gcip/gcip-mailbox.h>
+#if IS_ENABLED(CONFIG_GCIP_TEST)
+#include "unittests/helper/gcip-mailbox-controller.h"
+
+#define TEST_TRIGGER_TIMEOUT_RACE(awaiter) gcip_mailbox_controller_trigger_timeout_race(awaiter)
+#else
+#define TEST_TRIGGER_TIMEOUT_RACE(...)
+#endif
+
#define GET_CMD_QUEUE_HEAD() mailbox->ops->get_cmd_queue_head(mailbox)
#define GET_CMD_QUEUE_TAIL() mailbox->ops->get_cmd_queue_tail(mailbox)
#define INC_CMD_QUEUE_TAIL(inc) mailbox->ops->inc_cmd_queue_tail(mailbox, inc)
@@ -46,6 +54,19 @@ struct gcip_mailbox_wait_list_elem {
struct gcip_mailbox_resp_awaiter *awaiter;
};
+static void gcip_mailbox_awaiter_release(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ if (awaiter->release_data)
+ awaiter->release_data(awaiter->data);
+ kfree(awaiter);
+}
+
+static void gcip_mailbox_awaiter_dec_refs(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ if (refcount_dec_and_test(&awaiter->refs))
+ gcip_mailbox_awaiter_release(awaiter);
+}
+
/*
* Removes the response previously pushed with gcip_mailbox_push_wait_resp().
*
@@ -65,6 +86,10 @@ static void gcip_mailbox_del_wait_resp(struct gcip_mailbox *mailbox, void *resp)
break;
if (cur_seq == seq) {
list_del(&cur->list);
+ if (cur->awaiter) {
+ /* Remove the reference of the arrived handler. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ }
kfree(cur);
break;
}
@@ -90,6 +115,10 @@ static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
if (!entry)
return -ENOMEM;
+ /* Increase a reference of arrived handler. */
+ if (awaiter)
+ refcount_inc(&awaiter->refs);
+
entry->resp = resp;
entry->awaiter = awaiter;
ACQUIRE_WAIT_LIST_LOCK(true, &flags);
@@ -221,7 +250,23 @@ static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *res
list_del(&cur->list);
if (cur->awaiter) {
awaiter = cur->awaiter;
- cancel_delayed_work(&awaiter->timeout_work);
+
+ /*
+ * The timedout handler will be fired, but pended by waiting for
+ * acquiring the wait_list_lock.
+ */
+ TEST_TRIGGER_TIMEOUT_RACE(awaiter);
+
+ /*
+ * If canceling timeout_work succeeded, we have to decrease the
+ * reference count here because the timeout handler will not be
+ * called. Otherwise, the timeout handler is already canceled or
+ * pending by race. If it is canceled, the count must be decreased
+ * already, and if it is pending, the timeout handler will decrease
+ * the awaiter reference.
+ */
+ if (cancel_delayed_work(&awaiter->timeout_work))
+ gcip_mailbox_awaiter_dec_refs(awaiter);
/*
* If `handle_awaiter_arrived` callback is defined, @awaiter
* will be released from the implementation side. Otherwise, it
@@ -229,8 +274,7 @@ static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *res
*/
if (mailbox->ops->handle_awaiter_arrived)
mailbox->ops->handle_awaiter_arrived(mailbox, awaiter);
- else
- gcip_mailbox_release_awaiter(cur->awaiter);
+ gcip_mailbox_awaiter_dec_refs(awaiter);
}
kfree(cur);
break;
@@ -238,6 +282,10 @@ static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *res
if (!mailbox->ignore_seq_order && cur_seq < seq) {
SET_RESP_ELEM_STATUS(cur->resp, GCIP_MAILBOX_STATUS_NO_RESPONSE);
list_del(&cur->list);
+ if (cur->awaiter) {
+ /* Remove the reference of the arrived handler. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ }
kfree(cur);
}
}
@@ -379,8 +427,9 @@ static void gcip_mailbox_async_cmd_timeout_work(struct work_struct *work)
*/
if (mailbox->ops->handle_awaiter_timedout)
mailbox->ops->handle_awaiter_timedout(mailbox, awaiter);
- else
- gcip_mailbox_release_awaiter(awaiter);
+
+ /* Remove the reference of the timedout handler. */
+ gcip_mailbox_awaiter_dec_refs(awaiter);
}
/* Cleans up all the asynchronous responses which are not responded yet. */
@@ -414,6 +463,8 @@ static void gcip_mailbox_flush_awaiter(struct gcip_mailbox *mailbox)
awaiter = cur->awaiter;
if (mailbox->ops->flush_awaiter)
mailbox->ops->flush_awaiter(mailbox, awaiter);
+ /* Remove the reference of the arrived handler. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
} else {
dev_warn(mailbox->dev,
"Unexpected synchronous command pending on mailbox release\n");
@@ -429,8 +480,10 @@ static void gcip_mailbox_flush_awaiter(struct gcip_mailbox *mailbox)
list_for_each_entry_safe (cur, nxt, &resps_to_flush, list) {
list_del(&cur->list);
awaiter = cur->awaiter;
+ /* Cancel the timeout work and remove the reference of the timedout handler. */
gcip_mailbox_cancel_awaiter_timeout(awaiter);
- gcip_mailbox_release_awaiter(awaiter);
+ /* Remove the reference of the caller. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
kfree(cur);
}
}
@@ -576,6 +629,8 @@ struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mail
awaiter->mailbox = mailbox;
awaiter->data = data;
awaiter->release_data = mailbox->ops->release_awaiter_data;
+ /* 2 refs: caller (vd) and timedout handler. */
+ refcount_set(&awaiter->refs, 2);
INIT_DELAYED_WORK(&awaiter->timeout_work, gcip_mailbox_async_cmd_timeout_work);
schedule_delayed_work(&awaiter->timeout_work, msecs_to_jiffies(mailbox->timeout));
@@ -592,16 +647,21 @@ err_free_resp:
return ERR_PTR(ret);
}
+void gcip_mailbox_cancel_awaiter(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ gcip_mailbox_del_wait_resp(awaiter->mailbox, awaiter->resp);
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+}
+
void gcip_mailbox_cancel_awaiter_timeout(struct gcip_mailbox_resp_awaiter *awaiter)
{
- cancel_delayed_work_sync(&awaiter->timeout_work);
+ if (cancel_delayed_work_sync(&awaiter->timeout_work))
+ gcip_mailbox_awaiter_dec_refs(awaiter);
}
void gcip_mailbox_release_awaiter(struct gcip_mailbox_resp_awaiter *awaiter)
{
- if (awaiter->release_data)
- awaiter->release_data(awaiter->data);
- kfree(awaiter);
+ gcip_mailbox_awaiter_dec_refs(awaiter);
}
void gcip_mailbox_consume_one_response(struct gcip_mailbox *mailbox, void *resp)
diff --git a/gcip-kernel-driver/include/gcip/gcip-alloc-helper.h b/gcip-kernel-driver/include/gcip/gcip-alloc-helper.h
new file mode 100644
index 0000000..3d2c110
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-alloc-helper.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP helpers for allocating memories.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_ALLOC_HELPER_H__
+#define __GCIP_ALLOC_HELPER_H__
+
+#include <linux/device.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+/*
+ * The actual return value from the alloc_noncontiguous function.
+ * The user should only care about @sgt. @pages is used internally for freeing memory.
+ */
+struct gcip_sgt_handle {
+ struct sg_table sgt;
+ void *mem;
+};
+
+/*
+ * Allocates non-contiguous memory with size @size bytes.
+ *
+ * @dev: pointer to device structure. Is used for logging or the NUMA node for page allocation.
+ * @size: Total size in bytes. Will be page aligned.
+ * @gfp: The GFP flag for malloc internal structures.
+ *
+ * Returns the SG table represents the non-contiguous region.
+ * Returns NULL on any error.
+ */
+struct sg_table *gcip_alloc_noncontiguous(struct device *dev, size_t size, gfp_t gfp);
+/* Frees the memory allocated by gcip_alloc_noncontiguous. */
+void gcip_free_noncontiguous(struct sg_table *sgt);
+
+/*
+ * Returns the virtual memory that was used to allocate @sgt.
+ *
+ * @sgt must be the return pointer of gcip_alloc_noncontiguous.
+ */
+static inline void *gcip_noncontiguous_sgt_to_mem(struct sg_table *sgt)
+{
+ struct gcip_sgt_handle *sh = container_of(sgt, struct gcip_sgt_handle, sgt);
+
+ return sh->mem;
+}
+
+#endif /* __GCIP_ALLOC_HELPER_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-image-config.h b/gcip-kernel-driver/include/gcip/gcip-image-config.h
index a3539a0..a995188 100644
--- a/gcip-kernel-driver/include/gcip/gcip-image-config.h
+++ b/gcip-kernel-driver/include/gcip/gcip-image-config.h
@@ -35,8 +35,9 @@ struct gcip_image_config {
/* Device virtual address */
__u32 virt_address;
/*
- * contains a 12-bit aligned address and a page-order size into a
- * 32-bit value i.e. a physical address and size in page order.
+ * Encodes a 12-bit aligned address and the corresponding size
+ * into a 32-bit value.
+ * Detailed encoding method is defined in gcip-image-config.c.
*/
__u32 image_config_value;
} iommu_mappings[GCIP_IMG_CFG_MAX_IOMMU_MAPPINGS];
diff --git a/gcip-kernel-driver/include/gcip/gcip-mailbox.h b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
index 9ea7876..e81cfb9 100644
--- a/gcip-kernel-driver/include/gcip/gcip-mailbox.h
+++ b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
@@ -10,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
@@ -100,6 +101,8 @@ struct gcip_mailbox_resp_awaiter {
struct gcip_mailbox *mailbox;
/* User-defined data. */
void *data;
+ /* Reference count. */
+ refcount_t refs;
/*
* The callback for releasing the @data.
* It will be set as @release_awaiter_data of struct gcip_mailbox_ops.
@@ -401,7 +404,7 @@ int gcip_mailbox_send_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp);
* Executes @cmd command asynchronously. This function returns an instance of
* `struct gcip_mailbox_resp_awaiter` which handles the arrival and time-out of the response.
* The implementation side can cancel the asynchronous response by calling the
- * `gcip_mailbox_cancel_awaiter_timeout` function with it.
+ * `gcip_mailbox_cancel_awaiter` or `gcip_mailbox_cancel_awaiter_timeout` function with it.
*
* Arrived asynchronous response will be handled by `handle_awaiter_arrived` callback and timed out
* asynchronous response will be handled by `handle_awaiter_timedout` callback. Those callbacks
@@ -425,11 +428,38 @@ int gcip_mailbox_send_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp);
*
* Note: the asynchronous responses fetched from @resp_queue should be released by calling the
* `gcip_mailbox_release_awaiter` function.
+ *
+ * Note: if the life cycle of the mailbox is longer than the caller part, you should make sure
+ * that the callbacks don't access the variables of caller part after the release of it.
+ *
+ * Note: if you don't need the result of the response (e.g., if you pass @resp as NULL), you
+ * can release the returned awaiter right away by calling the `gcip_mailbox_release_awaiter`
+ * function.
*/
struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
void *resp, void *data);
/*
+ * Cancels awaiting the asynchronous response.
+ * This function will remove @awaiter from the waiting list to make it not to be handled by the
+ * arrived callback. Also, it will cancel the timeout work of @awaiter synchronously. Therefore,
+ * AFTER the return of this function, you can guarantee that arrived or timedout callback will
+ * not be called for @awaiter.
+ *
+ * However, by the race condition, you must note that arrived or timedout callback can be executed
+ * BEFORE this function returns. (i.e, this function and arrived/timedout callback is called at the
+ * same time but the callback acquired the lock earlier.)
+ *
+ * Note: this function will cancel or wait for the completion of arrived or timedout callbacks
+ * synchronously. Therefore, make sure that the caller side doesn't hold any locks which can be
+ * acquired by the arrived or timedout callbacks.
+ *
+ * If you already got a response of @awaiter and want to ensure that timedout handler is finished,
+ * you can use the `gcip_mailbox_cancel_awaiter_timeout` function instead.
+ */
+void gcip_mailbox_cancel_awaiter(struct gcip_mailbox_resp_awaiter *awaiter);
+
+/*
* Cancels the timeout work of the asynchronous response. In normally, the response arrives and
* the timeout is canceled, or the response timed out and the timeout handler executes. However,
* rarely, the response handler cancels the timeout handler while it has been already in progress.
@@ -437,7 +467,12 @@ struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mail
* recommended to call this function after fetching the asynchronous response even though the
* response arrived successfully.
*
- * Note: this function will cancel the timeout work synchronously.
+ * Note: this function will cancel or wait for the completion of timedout callbacks synchronously.
+ * Therefore, make sure that the caller side doesn't hold any locks which can be acquired by the
+ * timedout callbacks.
+ *
+ * If you haven't gotten a response of @awaiter yet and want to make it not to be processed by
+ * arrived and timedout callbacks, use the `gcip_mailbox_cancel_awaiter` function.
*/
void gcip_mailbox_cancel_awaiter_timeout(struct gcip_mailbox_resp_awaiter *awaiter);
diff --git a/gxp-client.c b/gxp-client.c
index 03446ab..f96a100 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -87,8 +87,27 @@ void gxp_client_destroy(struct gxp_client *client)
kfree(client);
}
+static int gxp_set_secure_vd(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+
+ if (gxp_is_direct_mode(gxp))
+ return 0;
+
+ mutex_lock(&gxp->secure_vd_lock);
+ if (gxp->secure_vd) {
+ mutex_unlock(&gxp->secure_vd_lock);
+ return -EEXIST;
+ }
+ vd->is_secure = true;
+ gxp->secure_vd = vd;
+ mutex_unlock(&gxp->secure_vd_lock);
+
+ return 0;
+}
+
int gxp_client_allocate_virtual_device(struct gxp_client *client,
- uint core_count)
+ uint core_count, u8 flags)
{
struct gxp_dev *gxp = client->gxp;
struct gxp_virtual_device *vd;
@@ -110,18 +129,23 @@ int gxp_client_allocate_virtual_device(struct gxp_client *client,
ret);
goto error;
}
-
+ if (flags & GXP_ALLOCATE_VD_SECURE) {
+ ret = gxp_set_secure_vd(vd);
+ if (ret)
+ goto error_vd_release;
+ }
if (client->has_block_wakelock) {
ret = gxp_vd_block_ready(vd);
- if (ret) {
- gxp_vd_release(vd);
- goto error;
- }
+ if (ret)
+ goto error_vd_release;
}
up_write(&gxp->vd_semaphore);
client->vd = vd;
return 0;
+
+error_vd_release:
+ gxp_vd_release(vd);
error:
up_write(&gxp->vd_semaphore);
return ret;
diff --git a/gxp-client.h b/gxp-client.h
index 56b50e8..01d0b2c 100644
--- a/gxp-client.h
+++ b/gxp-client.h
@@ -70,6 +70,7 @@ void gxp_client_destroy(struct gxp_client *client);
*
* @client: The client to allocate a virtual device
* @core_count: The requested core count of the virtual device.
+ * @flags: The flags passed from the runtime's request.
*
* The caller must have locked client->semaphore.
*
@@ -78,7 +79,8 @@ void gxp_client_destroy(struct gxp_client *client);
* * -EINVAL - A virtual device of the client has been allocated
* * Otherwise - Errno returned by virtual device allocation
*/
-int gxp_client_allocate_virtual_device(struct gxp_client *client, uint core_count);
+int gxp_client_allocate_virtual_device(struct gxp_client *client,
+ uint core_count, u8 flags);
/**
* gxp_client_acquire_block_wakelock() - Acquires a block wakelock.
*
diff --git a/gxp-common-platform.c b/gxp-common-platform.c
index c4e2102..d9ac532 100644
--- a/gxp-common-platform.c
+++ b/gxp-common-platform.c
@@ -567,7 +567,8 @@ static int gxp_allocate_vd(struct gxp_client *client,
}
down_write(&client->semaphore);
- ret = gxp_client_allocate_virtual_device(client, ibuf.core_count);
+ ret = gxp_client_allocate_virtual_device(client, ibuf.core_count,
+ ibuf.flags);
up_write(&client->semaphore);
return ret;
@@ -1663,30 +1664,11 @@ static const struct file_operations gxp_fops = {
.unlocked_ioctl = gxp_ioctl,
};
-static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_dev *gxp)
+static int gxp_set_reg_resources(struct platform_device *pdev, struct gxp_dev *gxp)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = gxp->dev;
struct resource *r;
- phys_addr_t offset, base_addr;
- struct device_node *np;
- struct platform_device *tpu_pdev;
- struct platform_device *gsa_pdev;
- int ret;
int i;
- bool tpu_found;
- u64 prop;
-
- dev_notice(dev, "Probing gxp driver with commit %s\n", GIT_REPO_TAG);
-
- platform_set_drvdata(pdev, gxp);
- gxp->dev = dev;
- if (gxp->parse_dt) {
- ret = gxp->parse_dt(pdev, gxp);
- if (ret)
- return ret;
- }
-
- gxp_wakelock_init(gxp);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (IS_ERR_OR_NULL(r)) {
@@ -1716,7 +1698,8 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
if (IS_ERR_OR_NULL(r) || IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
gxp->cmu.paddr = gxp->regs.paddr - GXP_CMU_OFFSET;
gxp->cmu.size = GXP_CMU_SIZE;
- gxp->cmu.vaddr = devm_ioremap(dev, gxp->cmu.paddr, gxp->cmu.size);
+ gxp->cmu.vaddr =
+ devm_ioremap(dev, gxp->cmu.paddr, gxp->cmu.size);
if (IS_ERR_OR_NULL(gxp->cmu.vaddr))
dev_warn(dev, "Failed to map CMU registers\n");
}
@@ -1740,64 +1723,143 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
gxp->lpm_regs.size = gxp->regs.size;
gxp->lpm_regs.paddr = gxp->regs.paddr;
#endif
- ret = gxp_pm_init(gxp);
- if (ret) {
- dev_err(dev, "Failed to init power management (ret=%d)\n", ret);
- return ret;
- }
for (i = 0; i < GXP_NUM_MAILBOXES; i++) {
r = platform_get_resource(pdev, IORESOURCE_MEM, i + 1);
if (IS_ERR_OR_NULL(r)) {
- dev_err(dev, "Failed to get mailbox%d resource\n", i);
- ret = -ENODEV;
- goto err_pm_destroy;
+ dev_err(dev, "Failed to get mailbox%d resource", i);
+ return -ENODEV;
}
gxp->mbx[i].paddr = r->start;
gxp->mbx[i].size = resource_size(r);
gxp->mbx[i].vaddr = devm_ioremap_resource(dev, r);
if (IS_ERR_OR_NULL(gxp->mbx[i].vaddr)) {
- dev_err(dev, "Failed to map mailbox%d registers\n", i);
- ret = -ENODEV;
- goto err_pm_destroy;
+ dev_err(dev, "Failed to map mailbox%d's register", i);
+ return -ENODEV;
}
}
- tpu_found = true;
+ return 0;
+}
+
+/*
+ * Get TPU device from the device tree. Warnings are shown when any expected
+ * device tree entry is missing.
+ */
+static void gxp_get_tpu_dev(struct gxp_dev *gxp)
+{
+ struct device *dev = gxp->dev;
+ struct platform_device *tpu_pdev;
+ struct device_node *np;
+ phys_addr_t offset, base_addr;
+ int ret;
+
/* Get TPU device from device tree */
np = of_parse_phandle(dev->of_node, "tpu-device", 0);
if (IS_ERR_OR_NULL(np)) {
dev_warn(dev, "No tpu-device in device tree\n");
- tpu_found = false;
+ goto out_not_found;
}
tpu_pdev = of_find_device_by_node(np);
if (!tpu_pdev) {
dev_err(dev, "TPU device not found\n");
- tpu_found = false;
+ goto out_not_found;
}
/* get tpu mailbox register base */
ret = of_property_read_u64_index(np, "reg", 0, &base_addr);
of_node_put(np);
if (ret) {
dev_warn(dev, "Unable to get tpu-device base address\n");
- tpu_found = false;
+ goto out_not_found;
}
/* get gxp-tpu mailbox register offset */
- ret = of_property_read_u64(dev->of_node, "gxp-tpu-mbx-offset",
- &offset);
+ ret = of_property_read_u64(dev->of_node, "gxp-tpu-mbx-offset", &offset);
if (ret) {
dev_warn(dev, "Unable to get tpu-device mailbox offset\n");
- tpu_found = false;
+ goto out_not_found;
}
- if (tpu_found) {
- gxp->tpu_dev.dev = &tpu_pdev->dev;
- get_device(gxp->tpu_dev.dev);
- gxp->tpu_dev.mbx_paddr = base_addr + offset;
- } else {
- dev_warn(dev, "TPU will not be available for interop\n");
- gxp->tpu_dev.mbx_paddr = 0;
+ gxp->tpu_dev.dev = get_device(&tpu_pdev->dev);
+ gxp->tpu_dev.mbx_paddr = base_addr + offset;
+ return;
+
+out_not_found:
+ dev_warn(dev, "TPU will not be available for interop\n");
+ gxp->tpu_dev.dev = NULL;
+ gxp->tpu_dev.mbx_paddr = 0;
+}
+
+static void gxp_put_tpu_dev(struct gxp_dev *gxp)
+{
+ /* put_device is no-op on !dev */
+ put_device(gxp->tpu_dev.dev);
+}
+
+/* Get GSA device from device tree. */
+static void gxp_get_gsa_dev(struct gxp_dev *gxp)
+{
+ struct device *dev = gxp->dev;
+ struct device_node *np;
+ struct platform_device *gsa_pdev;
+
+ gxp->gsa_dev = NULL;
+ np = of_parse_phandle(dev->of_node, "gsa-device", 0);
+ if (!np) {
+ dev_warn(
+ dev,
+ "No gsa-device in device tree. Firmware authentication not available\n");
+ return;
+ }
+ gsa_pdev = of_find_device_by_node(np);
+ if (!gsa_pdev) {
+ dev_err(dev, "GSA device not found\n");
+ of_node_put(np);
+ return;
}
+ gxp->gsa_dev = get_device(&gsa_pdev->dev);
+ of_node_put(np);
+ dev_info(dev, "GSA device found, Firmware authentication available\n");
+}
+
+static void gxp_put_gsa_dev(struct gxp_dev *gxp)
+{
+ put_device(gxp->gsa_dev);
+}
+
+static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_dev *gxp)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+ u64 prop;
+
+ dev_notice(dev, "Probing gxp driver with commit %s\n", GIT_REPO_TAG);
+
+ platform_set_drvdata(pdev, gxp);
+ gxp->dev = dev;
+ if (gxp->parse_dt) {
+ ret = gxp->parse_dt(pdev, gxp);
+ if (ret)
+ return ret;
+ }
+
+ ret = gxp_set_reg_resources(pdev, gxp);
+ if (ret)
+ return ret;
+
+ ret = gxp_wakelock_init(gxp);
+ if (ret) {
+ dev_err(dev, "failed to init wakelock: %d", ret);
+ return ret;
+ }
+
+ ret = gxp_pm_init(gxp);
+ if (ret) {
+ dev_err(dev, "Failed to init power management (ret=%d)\n", ret);
+ goto err_wakelock_destroy;
+ }
+
+ gxp_get_gsa_dev(gxp);
+ gxp_get_tpu_dev(gxp);
ret = gxp_dma_init(gxp);
if (ret) {
@@ -1806,12 +1868,11 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
}
gxp->mailbox_mgr = gxp_mailbox_create_manager(gxp, GXP_NUM_MAILBOXES);
- if (IS_ERR_OR_NULL(gxp->mailbox_mgr)) {
- dev_err(dev, "Failed to create mailbox manager\n");
- ret = -ENOMEM;
+ if (IS_ERR(gxp->mailbox_mgr)) {
+ ret = PTR_ERR(gxp->mailbox_mgr);
+ dev_err(dev, "Failed to create mailbox manager: %d\n", ret);
goto err_dma_exit;
}
-
if (gxp_is_direct_mode(gxp)) {
#if GXP_USE_LEGACY_MAILBOX
gxp_mailbox_init(gxp->mailbox_mgr);
@@ -1825,12 +1886,11 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
#else
ret = gxp_debug_dump_init(gxp, NULL, NULL);
#endif // !CONFIG_SUBSYSTEM_COREDUMP
- if (ret) {
- dev_err(dev, "Failed to initialize debug dump\n");
- gxp_debug_dump_exit(gxp);
- }
+ if (ret)
+ dev_warn(dev, "Failed to initialize debug dump\n");
mutex_init(&gxp->pin_user_pages_lock);
+ mutex_init(&gxp->secure_vd_lock);
gxp->domain_pool = kmalloc(sizeof(*gxp->domain_pool), GFP_KERNEL);
if (!gxp->domain_pool) {
@@ -1845,6 +1905,7 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
ret);
goto err_free_domain_pool;
}
+
ret = gxp_fw_init(gxp);
if (ret) {
dev_err(dev,
@@ -1852,29 +1913,9 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
ret);
goto err_domain_pool_destroy;
}
- gxp_vd_init(gxp);
- gxp_dma_init_default_resources(gxp);
- /* Get GSA device from device tree */
- np = of_parse_phandle(dev->of_node, "gsa-device", 0);
- if (!np) {
- dev_warn(
- dev,
- "No gsa-device in device tree. Firmware authentication not available\n");
- } else {
- gsa_pdev = of_find_device_by_node(np);
- if (!gsa_pdev) {
- dev_err(dev, "GSA device not found\n");
- of_node_put(np);
- ret = -ENODEV;
- goto err_vd_destroy;
- }
- gxp->gsa_dev = get_device(&gsa_pdev->dev);
- of_node_put(np);
- dev_info(
- dev,
- "GSA device found, Firmware authentication available\n");
- }
+ gxp_dma_init_default_resources(gxp);
+ gxp_vd_init(gxp);
ret = of_property_read_u64(dev->of_node, "gxp-memory-per-core",
&prop);
@@ -1885,23 +1926,29 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
gxp->memory_per_core = (u32)prop;
}
- gxp_fw_data_init(gxp);
+ ret = gxp_fw_data_init(gxp);
+ if (ret) {
+ dev_err(dev, "Failed to initialize firmware data: %d\n", ret);
+ goto err_vd_destroy;
+ }
+
ret = gxp_core_telemetry_init(gxp);
if (ret) {
dev_err(dev, "Failed to initialize core telemetry (ret=%d)", ret);
goto err_fw_data_destroy;
}
- gxp_create_debugfs(gxp);
gxp->thermal_mgr = gxp_thermal_init(gxp);
- if (!gxp->thermal_mgr)
- dev_err(dev, "Failed to init thermal driver\n");
+ if (IS_ERR(gxp->thermal_mgr)) {
+ ret = PTR_ERR(gxp->thermal_mgr);
+ dev_warn(dev, "Failed to init thermal driver: %d\n", ret);
+ }
INIT_LIST_HEAD(&gxp->client_list);
mutex_init(&gxp->client_list_lock);
if (gxp->after_probe) {
ret = gxp->after_probe(gxp);
if (ret)
- goto err_vd_destroy;
+ goto err_thermal_destroy;
}
gxp->misc_dev.minor = MISC_DYNAMIC_MINOR;
@@ -1913,6 +1960,7 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
goto err_before_remove;
}
+ gxp_create_debugfs(gxp);
gxp_debug_pointer = gxp;
dev_info(dev, "Probe finished");
@@ -1921,12 +1969,12 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
err_before_remove:
if (gxp->before_remove)
gxp->before_remove(gxp);
-err_vd_destroy:
- gxp_remove_debugfs(gxp);
+err_thermal_destroy:
+ /* thermal init doesn't need revert */
gxp_core_telemetry_exit(gxp);
err_fw_data_destroy:
gxp_fw_data_destroy(gxp);
- put_device(gxp->gsa_dev);
+err_vd_destroy:
gxp_vd_destroy(gxp);
gxp_fw_destroy(gxp);
err_domain_pool_destroy:
@@ -1935,12 +1983,15 @@ err_free_domain_pool:
kfree(gxp->domain_pool);
err_debug_dump_exit:
gxp_debug_dump_exit(gxp);
+ /* mailbox manager init doesn't need revert */
err_dma_exit:
gxp_dma_exit(gxp);
err_put_tpu_dev:
- put_device(gxp->tpu_dev.dev);
-err_pm_destroy:
+ gxp_put_tpu_dev(gxp);
+ gxp_put_gsa_dev(gxp);
gxp_pm_destroy(gxp);
+err_wakelock_destroy:
+ /* wakelock init doesn't need revert */
return ret;
}
@@ -1948,21 +1999,20 @@ static int gxp_common_platform_remove(struct platform_device *pdev)
{
struct gxp_dev *gxp = platform_get_drvdata(pdev);
+ gxp_remove_debugfs(gxp);
misc_deregister(&gxp->misc_dev);
if (gxp->before_remove)
gxp->before_remove(gxp);
- gxp_remove_debugfs(gxp);
gxp_core_telemetry_exit(gxp);
gxp_fw_data_destroy(gxp);
- if (gxp->gsa_dev)
- put_device(gxp->gsa_dev);
gxp_vd_destroy(gxp);
gxp_fw_destroy(gxp);
gxp_domain_pool_destroy(gxp->domain_pool);
kfree(gxp->domain_pool);
gxp_debug_dump_exit(gxp);
gxp_dma_exit(gxp);
- put_device(gxp->tpu_dev.dev);
+ gxp_put_tpu_dev(gxp);
+ gxp_put_gsa_dev(gxp);
gxp_pm_destroy(gxp);
gxp_debug_pointer = NULL;
diff --git a/gxp-core-telemetry.c b/gxp-core-telemetry.c
index eafa4b7..bce27c6 100644
--- a/gxp-core-telemetry.c
+++ b/gxp-core-telemetry.c
@@ -64,11 +64,55 @@ static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
size_t size);
static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data);
+/**
+ * enable_telemetry_buffers() - enable the telemetry buffers from host.
+ *
+ * @gxp: The GXP device the buffers were allocated for.
+ * @data: The data describing a set of core telemetry buffers to be enabled.
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`.
+ *
+ * Return:
+ * * 0 - Success
+ * * otherwise - Error returned by `gxp_fw_data_set_core_telemetry_descriptors()`
+ */
+static int enable_telemetry_buffers(struct gxp_dev *gxp,
+ struct buffer_data *data, u8 type)
+{
+ int i, ret;
+
+ /* Initialize the per core telemetry buffers header with magic code. */
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ /*
+ * First 64 bytes of per core telemetry buffers are reserved
+ * for buffer metadata header. We don't need to explicitly
+ * reset the header fields as during buffer allocation the
+ * entire buffer is zeroed out. First 4 bytes of buffer
+ * metadata header are reserved for valid_magic field.
+ */
+ *((uint *)data->buffers[i].vaddr) =
+ GXP_TELEMETRY_BUFFER_VALID_MAGIC_CODE;
+ }
+
+ data->host_status |= GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED;
+ ret = gxp_fw_data_set_core_telemetry_descriptors(
+ gxp, type, data->host_status, data->buffers, data->size);
+
+ if (ret) {
+ dev_err(gxp->dev,
+ "setting telemetry buffers in scratchpad region failed (ret=%d).",
+ ret);
+ return ret;
+ }
+
+ data->is_enabled = true;
+ return 0;
+}
+
int gxp_core_telemetry_init(struct gxp_dev *gxp)
{
struct gxp_core_telemetry_manager *mgr;
struct buffer_data *log_buff_data, *trace_buff_data;
- uint i;
+ int i, ret;
mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
if (!mgr)
@@ -102,6 +146,7 @@ int gxp_core_telemetry_init(struct gxp_dev *gxp)
dev_warn(gxp->dev,
"Failed to allocate per core log buffer of %u bytes\n",
gxp_core_telemetry_buffer_size);
+ ret = -ENOMEM;
goto err_free_buffers;
}
@@ -111,19 +156,39 @@ int gxp_core_telemetry_init(struct gxp_dev *gxp)
"Failed to allocate per core trace buffer of %u bytes\n",
gxp_core_telemetry_buffer_size);
free_telemetry_buffers(gxp, log_buff_data);
+ ret = -ENOMEM;
goto err_free_buffers;
}
+
+ ret = enable_telemetry_buffers(gxp, log_buff_data,
+ GXP_TELEMETRY_TYPE_LOGGING);
+ if (ret) {
+ dev_warn(gxp->dev, "enable telemetry buffer failed (ret=%d)",
+ ret);
+ goto err_free;
+ }
+ ret = enable_telemetry_buffers(gxp, trace_buff_data,
+ GXP_TELEMETRY_TYPE_TRACING);
+ if (ret) {
+ dev_warn(gxp->dev, "enable telemetry buffer failed (ret=%d)",
+ ret);
+ goto err_free;
+ }
+
gxp->core_telemetry_mgr->logging_buff_data = log_buff_data;
gxp->core_telemetry_mgr->tracing_buff_data = trace_buff_data;
mutex_unlock(&mgr->lock);
return 0;
+err_free:
+ free_telemetry_buffers(gxp, log_buff_data);
+ free_telemetry_buffers(gxp, trace_buff_data);
err_free_buffers:
mutex_unlock(&mgr->lock);
mutex_destroy(&mgr->lock);
devm_kfree(gxp->dev, mgr);
gxp->core_telemetry_mgr = NULL;
- return -ENOMEM;
+ return ret;
}
/* Wrapper struct to be used by the core telemetry vma_ops. */
diff --git a/gxp-dci.c b/gxp-dci.c
index b742a6e..9fdf25c 100644
--- a/gxp-dci.c
+++ b/gxp-dci.c
@@ -20,11 +20,11 @@
#define MBOX_CMD_QUEUE_NUM_ENTRIES 1024
#define MBOX_RESP_QUEUE_NUM_ENTRIES 1024
-static int gxp_dci_mailbox_manager_execute_cmd(struct gxp_mailbox *mailbox,
- u16 cmd_code, u8 cmd_priority,
- u64 cmd_daddr, u32 cmd_size,
- u32 cmd_flags, u64 *resp_seq,
- u16 *resp_status)
+static int gxp_dci_mailbox_manager_execute_cmd(
+ struct gxp_client *client, struct gxp_mailbox *mailbox, int virt_core,
+ u16 cmd_code, u8 cmd_priority, u64 cmd_daddr, u32 cmd_size,
+ u32 cmd_flags, u8 num_cores, struct gxp_power_states power_states,
+ u64 *resp_seq, u16 *resp_status)
{
struct gxp_dci_command cmd;
struct gxp_dci_response resp;
@@ -72,7 +72,7 @@ static int gxp_dci_mailbox_manager_execute_cmd_async(
cmd.priority = cmd_priority; /* currently unused */
cmd.buffer_descriptor = buffer;
- ret = gxp_dci_execute_cmd_async(mailbox, &cmd, &resp_queue->queue,
+ ret = gxp_dci_execute_cmd_async(mailbox, &cmd, &resp_queue->dest_queue,
&resp_queue->lock, &resp_queue->waitq,
requested_states, eventfd);
@@ -102,14 +102,14 @@ static int gxp_dci_mailbox_manager_wait_async_resp(struct gxp_client *client,
* proceed per wake event.
*/
timeout = wait_event_interruptible_lock_irq_timeout_exclusive(
- resp_queue->waitq, !list_empty(&resp_queue->queue),
+ resp_queue->waitq, !list_empty(&resp_queue->dest_queue),
resp_queue->lock, msecs_to_jiffies(MAILBOX_TIMEOUT));
if (timeout <= 0) {
spin_unlock_irq(&resp_queue->lock);
/* unusual case - this only happens when there is no command pushed */
return timeout ? -ETIMEDOUT : timeout;
}
- resp_ptr = list_first_entry(&resp_queue->queue,
+ resp_ptr = list_first_entry(&resp_queue->dest_queue,
struct gxp_dci_async_response, list_entry);
/* Pop the front of the response list */
@@ -177,9 +177,9 @@ static void gxp_dci_mailbox_manager_release_unconsumed_async_resps(
* Do it anyway for consistency.
*/
spin_lock_irqsave(&vd->mailbox_resp_queues[i].lock, flags);
- list_for_each_entry_safe (cur, nxt,
- &vd->mailbox_resp_queues[i].queue,
- list_entry) {
+ list_for_each_entry_safe (
+ cur, nxt, &vd->mailbox_resp_queues[i].dest_queue,
+ list_entry) {
list_del(&cur->list_entry);
gcip_mailbox_release_awaiter(cur->awaiter);
}
diff --git a/gxp-debug-dump.c b/gxp-debug-dump.c
index 646804e..a29d6af 100644
--- a/gxp-debug-dump.c
+++ b/gxp-debug-dump.c
@@ -32,16 +32,13 @@
#define SSCD_MSG_LENGTH 64
-#define SYNC_BARRIER_BLOCK 0x00100000
-#define SYNC_BARRIER_BASE(_x_) ((_x_) << 12)
+#define SYNC_BARRIER_BLOCK 0x00100000
+#define SYNC_BARRIER_BASE(_x_) ((_x_) << 12)
#define DEBUG_DUMP_MEMORY_SIZE 0x400000 /* size in bytes */
/* Enum indicating the debug dump request reason. */
-enum gxp_debug_dump_init_type {
- DEBUG_DUMP_FW_INIT,
- DEBUG_DUMP_KERNEL_INIT
-};
+enum gxp_debug_dump_init_type { DEBUG_DUMP_FW_INIT, DEBUG_DUMP_KERNEL_INIT };
enum gxp_common_segments_idx {
GXP_COMMON_REGISTERS_IDX,
@@ -85,9 +82,9 @@ static u32 gxp_read_sync_barrier_shadow(struct gxp_dev *gxp, uint index)
return gxp_read_32(gxp, barrier_reg_offset);
}
-static void
-gxp_get_common_registers(struct gxp_dev *gxp, struct gxp_seg_header *seg_header,
- struct gxp_common_registers *common_regs)
+static void gxp_get_common_registers(struct gxp_dev *gxp,
+ struct gxp_seg_header *seg_header,
+ struct gxp_common_registers *common_regs)
{
int i;
u32 addr;
@@ -149,7 +146,13 @@ static void gxp_get_lpm_psm_registers(struct gxp_dev *gxp,
{
struct gxp_lpm_state_table_registers *state_table_regs;
int i, j;
- uint offset;
+ uint offset, lpm_psm_offset;
+
+#ifdef GXP_SEPARATE_LPM_OFFSET
+ lpm_psm_offset = 0;
+#else
+ lpm_psm_offset = GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm);
+#endif
/* Get State Table registers */
for (i = 0; i < PSM_STATE_TABLE_COUNT; i++) {
@@ -157,57 +160,56 @@ static void gxp_get_lpm_psm_registers(struct gxp_dev *gxp,
/* Get Trans registers */
for (j = 0; j < PSM_TRANS_COUNT; j++) {
- offset = PSM_STATE_TABLE_BASE(i) + PSM_TRANS_BASE(j);
- state_table_regs->trans[j].next_state =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_NEXT_STATE_OFFSET);
+ offset = PSM_STATE_TABLE_BASE(i) + PSM_TRANS_BASE(j) +
+ lpm_psm_offset;
+ state_table_regs->trans[j].next_state = lpm_read_32(
+ gxp, offset + PSM_NEXT_STATE_OFFSET);
state_table_regs->trans[j].seq_addr =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_SEQ_ADDR_OFFSET);
+ lpm_read_32(gxp, offset + PSM_SEQ_ADDR_OFFSET);
state_table_regs->trans[j].timer_val =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_TIMER_VAL_OFFSET);
+ lpm_read_32(gxp, offset + PSM_TIMER_VAL_OFFSET);
state_table_regs->trans[j].timer_en =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_TIMER_EN_OFFSET);
- state_table_regs->trans[j].trigger_num =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_TRIGGER_NUM_OFFSET);
- state_table_regs->trans[j].trigger_en =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_TRIGGER_EN_OFFSET);
+ lpm_read_32(gxp, offset + PSM_TIMER_EN_OFFSET);
+ state_table_regs->trans[j].trigger_num = lpm_read_32(
+ gxp, offset + PSM_TRIGGER_NUM_OFFSET);
+ state_table_regs->trans[j].trigger_en = lpm_read_32(
+ gxp, offset + PSM_TRIGGER_EN_OFFSET);
}
- state_table_regs->enable_state =
- lpm_read_32_psm(gxp, psm, PSM_STATE_TABLE_BASE(i) +
- PSM_ENABLE_STATE_OFFSET);
+ state_table_regs->enable_state = lpm_read_32(
+ gxp, lpm_psm_offset + PSM_STATE_TABLE_BASE(i) +
+ PSM_ENABLE_STATE_OFFSET);
}
/* Get DMEM registers */
for (i = 0; i < PSM_DATA_COUNT; i++) {
- offset = PSM_DMEM_BASE(i) + PSM_DATA_OFFSET;
- psm_regs->data[i] = lpm_read_32_psm(gxp, psm, offset);
+ offset = PSM_DMEM_BASE(i) + PSM_DATA_OFFSET + lpm_psm_offset;
+ psm_regs->data[i] = lpm_read_32(gxp, offset);
}
- psm_regs->cfg = lpm_read_32_psm(gxp, psm, PSM_CFG_OFFSET);
- psm_regs->status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
+ psm_regs->cfg = lpm_read_32(gxp, lpm_psm_offset + PSM_CFG_OFFSET);
+ psm_regs->status = lpm_read_32(gxp, lpm_psm_offset + PSM_STATUS_OFFSET);
/* Get Debug CSR registers */
- psm_regs->debug_cfg = lpm_read_32_psm(gxp, psm, PSM_DEBUG_CFG_OFFSET);
- psm_regs->break_addr = lpm_read_32_psm(gxp, psm, PSM_BREAK_ADDR_OFFSET);
- psm_regs->gpin_lo_rd = lpm_read_32_psm(gxp, psm, PSM_GPIN_LO_RD_OFFSET);
- psm_regs->gpin_hi_rd = lpm_read_32_psm(gxp, psm, PSM_GPIN_HI_RD_OFFSET);
+ psm_regs->debug_cfg =
+ lpm_read_32(gxp, lpm_psm_offset + PSM_DEBUG_CFG_OFFSET);
+ psm_regs->break_addr =
+ lpm_read_32(gxp, lpm_psm_offset + PSM_BREAK_ADDR_OFFSET);
+ psm_regs->gpin_lo_rd =
+ lpm_read_32(gxp, lpm_psm_offset + PSM_GPIN_LO_RD_OFFSET);
+ psm_regs->gpin_hi_rd =
+ lpm_read_32(gxp, lpm_psm_offset + PSM_GPIN_HI_RD_OFFSET);
psm_regs->gpout_lo_rd =
- lpm_read_32_psm(gxp, psm, PSM_GPOUT_LO_RD_OFFSET);
+ lpm_read_32(gxp, lpm_psm_offset + PSM_GPOUT_LO_RD_OFFSET);
psm_regs->gpout_hi_rd =
- lpm_read_32_psm(gxp, psm, PSM_GPOUT_HI_RD_OFFSET);
+ lpm_read_32(gxp, lpm_psm_offset + PSM_GPOUT_HI_RD_OFFSET);
psm_regs->debug_status =
- lpm_read_32_psm(gxp, psm, PSM_DEBUG_STATUS_OFFSET);
+ lpm_read_32(gxp, lpm_psm_offset + PSM_DEBUG_STATUS_OFFSET);
}
-static void
-gxp_get_lpm_registers(struct gxp_dev *gxp, struct gxp_seg_header *seg_header,
- struct gxp_lpm_registers *lpm_regs)
+static void gxp_get_lpm_registers(struct gxp_dev *gxp,
+ struct gxp_seg_header *seg_header,
+ struct gxp_lpm_registers *lpm_regs)
{
int i;
uint offset;
@@ -366,7 +368,8 @@ static void gxp_user_buffers_vunmap(struct gxp_dev *gxp,
*/
vd = gxp->core_to_vd[core_header->core_id];
if (!vd) {
- dev_err(gxp->dev, "Virtual device is not available for vunmap\n");
+ dev_err(gxp->dev,
+ "Virtual device is not available for vunmap\n");
return;
}
@@ -553,9 +556,9 @@ static int gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
mgr->segs[core_id][seg_idx].size = sizeof(struct gxp_core_header);
seg_idx++;
- data_addr = &core_dump->dump_data[core_id *
- core_header->core_dump_size /
- sizeof(u32)];
+ data_addr =
+ &core_dump->dump_data[core_id * core_header->core_dump_size /
+ sizeof(u32)];
for (i = 0; i < GXP_NUM_CORE_SEGMENTS - 1; i++) {
if (seg_idx >= GXP_NUM_SEGMENTS_PER_CORE) {
diff --git a/gxp-debugfs.c b/gxp-debugfs.c
index 8f118d4..a719062 100644
--- a/gxp-debugfs.c
+++ b/gxp-debugfs.c
@@ -23,9 +23,13 @@
#include "gxp-wakelock.h"
#include "gxp.h"
+#if GXP_HAS_MCU
+#include "gxp-mcu-platform.h"
+#endif
+
static int gxp_debugfs_lpm_test(void *data, u64 val)
{
- struct gxp_dev *gxp = (struct gxp_dev *) data;
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
dev_info(gxp->dev, "%llu\n", val);
@@ -36,45 +40,85 @@ DEFINE_DEBUGFS_ATTRIBUTE(gxp_lpm_test_fops, NULL, gxp_debugfs_lpm_test,
static int gxp_debugfs_mailbox(void *data, u64 val)
{
- int core, retval;
+ int core = 0, retval;
u16 status;
struct gxp_dev *gxp = (struct gxp_dev *)data;
+ struct gxp_mailbox *mbx;
+ struct gxp_power_states power_states = {
+ .power = GXP_POWER_STATE_NOM,
+ .memory = MEMORY_POWER_STATE_UNDEFINED,
+ };
+ u16 cmd_code;
+ int ret;
- core = val / 1000;
- if (core >= GXP_NUM_CORES) {
- dev_notice(gxp->dev,
- "Mailbox for core %d doesn't exist.\n", core);
- return -EINVAL;
- }
+ mutex_lock(&gxp->debugfs_client_lock);
- if (gxp->mailbox_mgr->mailboxes[core] == NULL) {
- dev_notice(
- gxp->dev,
- "Unable to send mailbox command -- mailbox %d not ready\n",
- core);
- return -EINVAL;
+ if (gxp_is_direct_mode(gxp)) {
+ core = val / 1000;
+ if (core >= GXP_NUM_CORES) {
+ dev_notice(gxp->dev,
+ "Mailbox for core %d doesn't exist.\n",
+ core);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (gxp->mailbox_mgr->mailboxes[core] == NULL) {
+ dev_notice(
+ gxp->dev,
+ "Unable to send mailbox command -- mailbox %d not ready\n",
+ core);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mbx = gxp->mailbox_mgr->mailboxes[core];
+ cmd_code = GXP_MBOX_CODE_DISPATCH;
+#if GXP_HAS_MCU
+ } else {
+ if (!gxp->debugfs_client) {
+ dev_err(gxp->dev,
+ "You should load firmwares via gxp/firmware_run first\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ mbx = to_mcu_dev(gxp)->mcu.uci.mbx;
+ if (!mbx) {
+ dev_err(gxp->dev, "UCI is not initialized.\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ cmd_code = CORE_COMMAND;
+#endif
}
down_read(&gxp->vd_semaphore);
- retval =
- gxp->mailbox_mgr->execute_cmd(gxp->mailbox_mgr->mailboxes[core],
- val, 0, 0, 0, 0, NULL, &status);
+ /* In direct mode, gxp->debugfs_client and core will be ignored. */
+ retval = gxp->mailbox_mgr->execute_cmd(gxp->debugfs_client, mbx, core,
+ cmd_code, 0, 0, 0, 0, 1,
+ power_states, NULL, &status);
up_read(&gxp->vd_semaphore);
dev_info(
gxp->dev,
- "Mailbox Command Sent: cmd.code=%d, resp.status=%d, resp.retval=%d\n",
- (u16)val, status, retval);
- return 0;
+ "Mailbox Command Sent: core=%d, resp.status=%d, resp.retval=%d\n",
+ core, status, retval);
+ ret = 0;
+out:
+ mutex_unlock(&gxp->debugfs_client_lock);
+ return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(gxp_mailbox_fops, NULL, gxp_debugfs_mailbox, "%llu\n");
static int gxp_firmware_run_set(void *data, u64 val)
{
- struct gxp_dev *gxp = (struct gxp_dev *) data;
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
struct gxp_client *client;
int ret = 0;
uint core;
+ bool acquired_block_wakelock;
ret = gxp_firmware_request_if_needed(gxp);
if (ret) {
@@ -86,7 +130,7 @@ static int gxp_firmware_run_set(void *data, u64 val)
if (val) {
if (gxp->debugfs_client) {
- dev_err(gxp->dev, "Firmware already running!\n");
+ dev_err(gxp->dev, "Firmware is already running!\n");
ret = -EIO;
goto out;
}
@@ -106,6 +150,7 @@ static int gxp_firmware_run_set(void *data, u64 val)
goto out;
}
}
+ up_write(&gxp->vd_semaphore);
/*
* Cleanup any bad state or corruption the device might've
@@ -121,31 +166,31 @@ static int gxp_firmware_run_set(void *data, u64 val)
}
gxp->debugfs_client = client;
- gxp->debugfs_client->vd = gxp_vd_allocate(gxp, GXP_NUM_CORES);
- if (IS_ERR(gxp->debugfs_client->vd)) {
+ down_write(&client->semaphore);
+
+ ret = gxp_client_allocate_virtual_device(client, GXP_NUM_CORES, 0);
+ if (ret) {
dev_err(gxp->dev, "Failed to allocate VD\n");
- ret = PTR_ERR(gxp->debugfs_client->vd);
- goto err_wakelock;
+ goto err_destroy_client;
}
- ret = gxp_wakelock_acquire(gxp);
+ ret = gxp_client_acquire_block_wakelock(
+ client, &acquired_block_wakelock);
if (ret) {
dev_err(gxp->dev, "Failed to acquire BLOCK wakelock\n");
- goto err_wakelock;
+ goto err_destroy_client;
}
- gxp->debugfs_client->has_block_wakelock = true;
- gxp_pm_update_requested_power_states(gxp, off_states, uud_states);
- ret = gxp_vd_run(gxp->debugfs_client->vd);
- up_write(&gxp->vd_semaphore);
+ ret = gxp_client_acquire_vd_wakelock(client, uud_states);
if (ret) {
- dev_err(gxp->dev, "Failed to start VD\n");
- goto err_start;
+ dev_err(gxp->dev, "Failed to acquire VD wakelock\n");
+ goto err_release_block_wakelock;
}
- gxp->debugfs_client->has_vd_wakelock = true;
+
+ up_write(&client->semaphore);
} else {
if (!gxp->debugfs_client) {
- dev_err(gxp->dev, "Firmware not running!\n");
+ dev_err(gxp->dev, "Firmware is not running!\n");
ret = -EIO;
goto out;
}
@@ -156,7 +201,6 @@ static int gxp_firmware_run_set(void *data, u64 val)
*/
gxp_client_destroy(gxp->debugfs_client);
gxp->debugfs_client = NULL;
- gxp_pm_update_requested_power_states(gxp, uud_states, off_states);
}
out:
@@ -164,12 +208,12 @@ out:
return ret;
-err_start:
- gxp_wakelock_release(gxp);
- gxp_pm_update_requested_power_states(gxp, uud_states, off_states);
-err_wakelock:
+err_release_block_wakelock:
+ gxp_client_release_block_wakelock(client);
+err_destroy_client:
+ up_write(&client->semaphore);
/* Destroying a client cleans up any VDss or wakelocks it held. */
- gxp_client_destroy(gxp->debugfs_client);
+ gxp_client_destroy(client);
gxp->debugfs_client = NULL;
mutex_unlock(&gxp->debugfs_client_lock);
return ret;
@@ -177,7 +221,7 @@ err_wakelock:
static int gxp_firmware_run_get(void *data, u64 *val)
{
- struct gxp_dev *gxp = (struct gxp_dev *) data;
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
down_read(&gxp->vd_semaphore);
*val = gxp->firmware_mgr->firmware_running;
@@ -213,7 +257,8 @@ static int gxp_wakelock_set(void *data, u64 val)
goto out;
}
gxp->debugfs_wakelock_held = true;
- gxp_pm_update_requested_power_states(gxp, off_states, uud_states);
+ gxp_pm_update_requested_power_states(gxp, off_states,
+ uud_states);
} else {
/* Wakelock Release */
if (!gxp->debugfs_wakelock_held) {
@@ -224,7 +269,8 @@ static int gxp_wakelock_set(void *data, u64 val)
gxp_wakelock_release(gxp);
gxp->debugfs_wakelock_held = false;
- gxp_pm_update_requested_power_states(gxp, uud_states, off_states);
+ gxp_pm_update_requested_power_states(gxp, uud_states,
+ off_states);
}
out:
@@ -383,7 +429,8 @@ static int gxp_cmu_mux1_set(void *data, u64 val)
return -ENODEV;
}
if (val > 1) {
- dev_err(gxp->dev, "Incorrect val for cmu_mux1, only 0 and 1 allowed\n");
+ dev_err(gxp->dev,
+ "Incorrect val for cmu_mux1, only 0 and 1 allowed\n");
return -EINVAL;
}
@@ -415,7 +462,8 @@ static int gxp_cmu_mux2_set(void *data, u64 val)
return -ENODEV;
}
if (val > 1) {
- dev_err(gxp->dev, "Incorrect val for cmu_mux2, only 0 and 1 allowed\n");
+ dev_err(gxp->dev,
+ "Incorrect val for cmu_mux2, only 0 and 1 allowed\n");
return -EINVAL;
}
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c
index 7ca60ea..1480761 100644
--- a/gxp-dma-iommu.c
+++ b/gxp-dma-iommu.c
@@ -172,7 +172,7 @@ static int gxp_map_core_shared_buffer(struct gxp_dev *gxp,
struct iommu_domain *domain,
u8 slice_index)
{
- size_t shared_size = GXP_NUM_CORES * gxp->shared_slice_size;
+ size_t shared_size = gxp->shared_slice_size;
if (!gxp->shared_buf.paddr)
return 0;
@@ -185,7 +185,7 @@ static int gxp_map_core_shared_buffer(struct gxp_dev *gxp,
static void gxp_unmap_core_shared_buffer(struct gxp_dev *gxp,
struct iommu_domain *domain)
{
- size_t shared_size = GXP_NUM_CORES * gxp->shared_slice_size;
+ size_t shared_size = gxp->shared_slice_size;
if (!gxp->shared_buf.paddr)
return;
@@ -696,6 +696,39 @@ void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
dma_unmap_sg_attrs(gxp->dev, sg, nents, direction, attrs);
}
+int gxp_dma_map_iova_sgt(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ dma_addr_t iova, struct sg_table *sgt, int prot)
+{
+ ssize_t size_mapped;
+
+ size_mapped = (ssize_t)iommu_map_sg(gdomain->domain, iova, sgt->sgl,
+ sgt->orig_nents, prot);
+ if (size_mapped <= 0) {
+ dev_err(gxp->dev, "map IOVA %pad to SG table failed: %d", &iova,
+ (int)size_mapped);
+ if (size_mapped == 0)
+ return -EINVAL;
+ return size_mapped;
+ }
+
+ return 0;
+}
+
+void gxp_dma_unmap_iova_sgt(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, dma_addr_t iova,
+ struct sg_table *sgt)
+{
+ struct scatterlist *s;
+ int i;
+ size_t size = 0;
+
+ for_each_sg (sgt->sgl, s, sgt->orig_nents, i)
+ size += s->length;
+
+ if (!iommu_unmap(gdomain->domain, iova, size))
+ dev_warn(gxp->dev, "Failed to unmap sgt");
+}
+
void gxp_dma_sync_sg_for_cpu(struct gxp_dev *gxp, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
diff --git a/gxp-dma.h b/gxp-dma.h
index 7b33121..da7d433 100644
--- a/gxp-dma.h
+++ b/gxp-dma.h
@@ -258,6 +258,30 @@ void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
enum dma_data_direction direction, unsigned long attrs);
/**
+ * gxp_dma_map_iova_sgt() - Create a mapping for a scatter-gather list, with specific IOVA.
+ * @gxp: The GXP device to map the scatter-gather list for
+ * @gdomain: The IOMMU domain to be mapped
+ * @iova: The IOVA to be mapped.
+ * @sgt: The scatter-gather list table of the buffer to be mapped
+ * @prot: The protection bits to be passed to IOMMU API
+ *
+ * Return: 0 on success. Negative errno otherwise.
+ */
+int gxp_dma_map_iova_sgt(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ dma_addr_t iova, struct sg_table *sgt, int prot);
+/**
+ * gxp_dma_unmap_iova_sgt() - Revert gxp_dma_map_iova_sgt()
+ * @gxp: The GXP device the scatter-gather list was mapped for
+ * @gdomain: The IOMMU domain mapping was mapped on
+ * @iova: The IOVA to be un-mapped.
+ * @sgt: The scatter-gather list to unmap; The same one passed to
+ * `gxp_dma_map_iova_sgt()`
+ */
+void gxp_dma_unmap_iova_sgt(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, dma_addr_t iova,
+ struct sg_table *sgt);
+
+/**
* gxp_dma_sync_sg_for_cpu() - Sync sg list for reading by the CPU
* @gxp: The GXP device the mapping was created for
* @sg: The mapped scatter-gather list to be synced
@@ -326,4 +350,11 @@ struct gxp_iommu_domain *gxp_iommu_get_domain_for_dev(struct gxp_dev *gxp);
uint gxp_iommu_aux_get_pasid(struct gxp_dev *gxp,
struct gxp_iommu_domain *gdomain);
+/**
+ * gxp_iommu_setup_shareability() - Set shareability to enable IO-Coherency.
+ * @gxp: The GXP device to set shareability for
+ */
+void gxp_iommu_setup_shareability(struct gxp_dev *gxp);
+
+
#endif /* __GXP_DMA_H__ */
diff --git a/gxp-firmware.c b/gxp-firmware.c
index d755f84..fcf6a6f 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -40,6 +40,9 @@
static int gxp_dsp_fw_auth_disable;
module_param_named(dsp_fw_auth_disable, gxp_dsp_fw_auth_disable, int, 0660);
+static bool gxp_core_boot = true;
+module_param_named(core_boot, gxp_core_boot, bool, 0660);
+
static int
request_dsp_firmware(struct gxp_dev *gxp, char *name_prefix,
const struct firmware *out_firmwares[GXP_NUM_CORES])
@@ -828,7 +831,9 @@ static int gxp_firmware_setup(struct gxp_dev *gxp, uint core)
}
/* Mark this as a cold boot */
- gxp_firmware_set_boot_mode(gxp, core, GXP_BOOT_MODE_REQUEST_COLD_BOOT);
+ if (gxp_core_boot)
+ gxp_firmware_set_boot_mode(gxp, core,
+ GXP_BOOT_MODE_REQUEST_COLD_BOOT);
ret = gxp_firmware_setup_hw_after_block_off(gxp, core,
/*verbose=*/true);
@@ -862,31 +867,34 @@ static int gxp_firmware_finish_startup(struct gxp_dev *gxp,
struct gxp_virtual_device *vd,
uint virt_core, uint core)
{
- int ret;
struct work_struct *work;
struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
+ int ret = 0;
- ret = gxp_firmware_handshake(gxp, core);
- if (ret) {
- dev_err(gxp->dev, "Firmware handshake failed on core %u\n",
- core);
- gxp_pm_core_off(gxp, core);
- goto out_firmware_unload;
- }
-
- /* Initialize mailbox */
- if (gxp->mailbox_mgr->allocate_mailbox) {
- gxp->mailbox_mgr->mailboxes[core] =
- gxp->mailbox_mgr->allocate_mailbox(gxp->mailbox_mgr, vd,
- virt_core, core);
- if (IS_ERR(gxp->mailbox_mgr->mailboxes[core])) {
+ if (gxp_core_boot) {
+ ret = gxp_firmware_handshake(gxp, core);
+ if (ret) {
dev_err(gxp->dev,
- "Unable to allocate mailbox (core=%u, ret=%ld)\n",
- core,
- PTR_ERR(gxp->mailbox_mgr->mailboxes[core]));
- ret = PTR_ERR(gxp->mailbox_mgr->mailboxes[core]);
- gxp->mailbox_mgr->mailboxes[core] = NULL;
- goto out_firmware_unload;
+ "Firmware handshake failed on core %u\n", core);
+ goto err_firmware_off;
+ }
+
+ /* Initialize mailbox */
+ if (gxp->mailbox_mgr->allocate_mailbox) {
+ gxp->mailbox_mgr->mailboxes[core] =
+ gxp->mailbox_mgr->allocate_mailbox(
+ gxp->mailbox_mgr, vd, virt_core, core);
+ if (IS_ERR(gxp->mailbox_mgr->mailboxes[core])) {
+ dev_err(gxp->dev,
+ "Unable to allocate mailbox (core=%u, ret=%ld)\n",
+ core,
+ PTR_ERR(gxp->mailbox_mgr
+ ->mailboxes[core]));
+ ret = PTR_ERR(
+ gxp->mailbox_mgr->mailboxes[core]);
+ gxp->mailbox_mgr->mailboxes[core] = NULL;
+ goto err_firmware_off;
+ }
}
}
@@ -904,7 +912,9 @@ static int gxp_firmware_finish_startup(struct gxp_dev *gxp,
return ret;
-out_firmware_unload:
+err_firmware_off:
+ if (gxp_core_boot)
+ gxp_pm_core_off(gxp, core);
gxp_firmware_unload(gxp, core);
return ret;
}
@@ -925,21 +935,24 @@ static void gxp_firmware_stop_core(struct gxp_dev *gxp,
gxp_notification_unregister_handler(gxp, core,
HOST_NOTIF_CORE_TELEMETRY_STATUS);
- if (gxp->mailbox_mgr->release_mailbox) {
- gxp->mailbox_mgr->release_mailbox(
- gxp->mailbox_mgr, vd, virt_core,
- gxp->mailbox_mgr->mailboxes[core]);
- dev_notice(gxp->dev, "Mailbox %u released\n", core);
- }
+ if (gxp_core_boot) {
+ if (gxp->mailbox_mgr->release_mailbox) {
+ gxp->mailbox_mgr->release_mailbox(
+ gxp->mailbox_mgr, vd, virt_core,
+ gxp->mailbox_mgr->mailboxes[core]);
+ dev_notice(gxp->dev, "Mailbox %u released\n", core);
+ }
- if (vd->state == GXP_VD_RUNNING) {
- /*
- * Disable interrupts to prevent cores from being woken up
- * unexpectedly.
- */
- disable_core_interrupts(gxp, core);
- gxp_pm_core_off(gxp, core);
+ if (vd->state == GXP_VD_RUNNING) {
+ /*
+ * Disable interrupts to prevent cores from being woken up
+ * unexpectedly.
+ */
+ disable_core_interrupts(gxp, core);
+ gxp_pm_core_off(gxp, core);
+ }
}
+
gxp_firmware_unload(gxp, core);
}
@@ -970,7 +983,8 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
for (core = 0; core < GXP_NUM_CORES; core++) {
if (core_list & BIT(core)) {
if (!(failed_cores & BIT(core))) {
- gxp_pm_core_off(gxp, core);
+ if (gxp_core_boot)
+ gxp_pm_core_off(gxp, core);
gxp_firmware_unload(gxp, core);
}
}
@@ -991,8 +1005,11 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
}
#endif
/* Switch clock mux to the normal state to guarantee LPM works */
- gxp_pm_force_clkmux_normal(gxp);
- gxp_firmware_wakeup_cores(gxp, core_list);
+ if (gxp_core_boot) {
+ gxp_pm_force_clkmux_normal(gxp);
+ gxp_firmware_wakeup_cores(gxp, core_list);
+ }
+
virt_core = 0;
for (core = 0; core < GXP_NUM_CORES; core++) {
if (core_list & BIT(core)) {
@@ -1021,7 +1038,8 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
}
}
/* Check if we need to set clock mux to low state as requested */
- gxp_pm_resume_clkmux(gxp);
+ if (gxp_core_boot)
+ gxp_pm_resume_clkmux(gxp);
return ret;
}
@@ -1030,7 +1048,8 @@ int gxp_firmware_setup_hw_after_block_off(struct gxp_dev *gxp, uint core,
bool verbose)
{
gxp_program_reset_vector(gxp, core, verbose);
- return gxp_pm_core_on(gxp, core, verbose);
+
+ return gxp_core_boot ? gxp_pm_core_on(gxp, core, verbose) : 0;
}
diff --git a/gxp-firmware.h b/gxp-firmware.h
index 73a21ba..aff602a 100644
--- a/gxp-firmware.h
+++ b/gxp-firmware.h
@@ -8,6 +8,7 @@
#define __GXP_FIRMWARE_H__
#include <linux/bitops.h>
+#include <linux/sizes.h>
#include "gxp-config.h"
#include "gxp-internal.h"
@@ -37,6 +38,9 @@
#define SCRATCHPAD_MSG_OFFSET(_msg_) (_msg_ << 2)
+#define PRIVATE_FW_DATA_SIZE SZ_2M
+#define SHARED_FW_DATA_SIZE SZ_1M
+
struct gxp_firmware_manager {
const struct firmware *firmwares[GXP_NUM_CORES];
char *firmware_name;
diff --git a/gxp-internal.h b/gxp-internal.h
index 0415163..6988bf8 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -118,6 +118,9 @@ struct gxp_dev {
struct gcip_domain_pool *domain_pool;
struct list_head client_list;
struct mutex client_list_lock;
+ /* Pointer and mutex of secure virtual device */
+ struct gxp_virtual_device *secure_vd;
+ struct mutex secure_vd_lock;
/*
* Buffer shared across firmware.
* Its paddr is 0 if the shared buffer is not available.
@@ -138,6 +141,8 @@ struct gxp_dev {
unsigned int num_shared_slices;
struct gxp_usage_stats *usage_stats; /* Stores the usage stats */
+ void __iomem *sysreg_shareability; /* sysreg shareability csr base */
+
/* callbacks for chip-dependent implementations */
/*
diff --git a/gxp-kci.c b/gxp-kci.c
index 8c76190..72ffa4e 100644
--- a/gxp-kci.c
+++ b/gxp-kci.c
@@ -518,7 +518,7 @@ int gxp_kci_shutdown(struct gxp_kci *gkci)
}
int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u32 client_id, u8 num_cores,
- u8 slice_index)
+ u8 slice_index, bool first_open)
{
struct gcip_kci_command_element cmd = {
.code = GCIP_KCI_CODE_ALLOCATE_VMBOX,
@@ -538,6 +538,7 @@ int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u32 client_id, u8 num_cores,
detail->client_id = client_id;
detail->num_cores = num_cores;
detail->slice_index = slice_index;
+ detail->first_open = first_open;
cmd.dma.address = buf.daddr;
cmd.dma.size = sizeof(*detail);
diff --git a/gxp-kci.h b/gxp-kci.h
index 99a2c81..ca7c143 100644
--- a/gxp-kci.h
+++ b/gxp-kci.h
@@ -77,8 +77,10 @@ struct gxp_kci_allocate_vmbox_detail {
* used for MCU<->core mailbox.
*/
u8 slice_index;
+ /* Whether it's the first time allocating a VMBox for this VD. */
+ bool first_open;
/* Reserved */
- u8 reserved[58];
+ u8 reserved[57];
} __packed;
/* Used when sending the details about release_vmbox KCI command. */
@@ -197,7 +199,7 @@ int gxp_kci_notify_throttling(struct gxp_kci *gkci, u32 rate);
* Returns the code of response, or a negative errno on error.
*/
int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u32 client_id, u8 num_cores,
- u8 slice_index);
+ u8 slice_index, bool first_open);
/*
* Releases a virtual mailbox which is allocated by `gxp_kci_allocate_vmbox`.
diff --git a/gxp-lpm.h b/gxp-lpm.h
index 22ae00f..5af1c89 100644
--- a/gxp-lpm.h
+++ b/gxp-lpm.h
@@ -106,11 +106,17 @@ void gxp_lpm_enable_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state)
static inline u32 lpm_read_32(struct gxp_dev *gxp, uint reg_offset)
{
+#ifndef GXP_SEPARATE_LPM_OFFSET
+ reg_offset = GXP_LPM_BASE + reg_offset;
+#endif
return readl(gxp->lpm_regs.vaddr + reg_offset);
}
static inline void lpm_write_32(struct gxp_dev *gxp, uint reg_offset, u32 value)
{
+#ifndef GXP_SEPARATE_LPM_OFFSET
+ reg_offset = GXP_LPM_BASE + reg_offset;
+#endif
writel(value, gxp->lpm_regs.vaddr + reg_offset);
}
diff --git a/gxp-mailbox-manager.h b/gxp-mailbox-manager.h
index a414080..24cd16b 100644
--- a/gxp-mailbox-manager.h
+++ b/gxp-mailbox-manager.h
@@ -54,11 +54,14 @@ typedef void (*reset_mailbox_t)(struct gxp_mailbox *mailbox);
* Returns the value `retval` of `struct gxp_response` when the request succeeds. Otherwise,
* returns a negative value as an error.
*
- * This callback is required if the device is in direct mode, otherwise it is optional.
+ * This callback is always required regardless of the mode of device.
*/
-typedef int (*execute_cmd_t)(struct gxp_mailbox *mailbox, u16 cmd_code,
- u8 cmd_priority, u64 cmd_daddr, u32 cmd_size,
- u32 cmd_flags, u64 *resp_seq, u16 *resp_status);
+typedef int (*execute_cmd_t)(struct gxp_client *client,
+ struct gxp_mailbox *mailbox, int virt_core,
+ u16 cmd_code, u8 cmd_priority, u64 cmd_daddr,
+ u32 cmd_size, u32 cmd_flags, u8 num_cores,
+ struct gxp_power_states power_states,
+ u64 *resp_seq, u16 *resp_status);
/*
* Called when requests asynchronous commands. This callback will be called when
@@ -75,7 +78,8 @@ typedef int (*execute_cmd_async_t)(struct gxp_client *client,
struct gxp_mailbox *mailbox, int virt_core,
u16 cmd_code, u8 cmd_priority, u64 cmd_daddr,
u32 cmd_size, u32 cmd_flags,
- struct gxp_power_states power_states, u64 *cmd_seq);
+ struct gxp_power_states power_states,
+ u64 *cmd_seq);
/*
* Called when waiting for an asynchronous response which is requested by `execute_cmd_async`.
@@ -96,7 +100,7 @@ typedef int (*wait_async_resp_t)(struct gxp_client *client, int virt_core,
* Called when cleans up unconsumed async responses in the queue which arrived or timed out.
* This callback will be called when the @vd is released.
*
- * This callback is always required regardless of whether the device is in direct mode.
+ * This callback is always required regardless of the mode of device.
*/
typedef void (*release_unconsumed_async_resps_t)(struct gxp_virtual_device *vd);
diff --git a/gxp-mcu-fs.c b/gxp-mcu-fs.c
index 8805cc6..0a5fe7d 100644
--- a/gxp-mcu-fs.c
+++ b/gxp-mcu-fs.c
@@ -19,6 +19,101 @@
#include "gxp-uci.h"
#include "gxp.h"
+static int
+gxp_ioctl_uci_command(struct gxp_client *client,
+ struct gxp_mailbox_uci_command_ioctl __user *argp)
+{
+ struct gxp_mailbox_uci_command_ioctl ibuf;
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_mcu *mcu = gxp_mcu_of(gxp);
+ struct gxp_uci_command cmd = {};
+ int ret;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_read(&client->semaphore);
+
+ if (!gxp_client_has_available_vd(client, "GXP_MAILBOX_UCI_COMMAND")) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Caller must hold BLOCK wakelock */
+ if (!client->has_block_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_MAILBOX_UCI_COMMAND requires the client hold a BLOCK wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ memcpy(cmd.opaque, ibuf.opaque, sizeof(cmd.opaque));
+
+ cmd.client_id = client->vd->client_id;
+
+ ret = gxp_uci_send_command(
+ &mcu->uci, client->vd, &cmd,
+ &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].wait_queue,
+ &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].dest_queue,
+ &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
+ &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].waitq,
+ client->mb_eventfds[UCI_RESOURCE_ID]);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to enqueue mailbox command (ret=%d)\n", ret);
+ goto out;
+ }
+ ibuf.sequence_number = cmd.seq;
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ return -EFAULT;
+
+ return 0;
+out:
+ up_read(&client->semaphore);
+ return ret;
+}
+
+static int
+gxp_ioctl_uci_response(struct gxp_client *client,
+ struct gxp_mailbox_uci_response_ioctl __user *argp)
+{
+ struct gxp_mailbox_uci_response_ioctl ibuf;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_read(&client->semaphore);
+
+ if (!gxp_client_has_available_vd(client, "GXP_MAILBOX_UCI_RESPONSE")) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Caller must hold BLOCK wakelock */
+ if (!client->has_block_wakelock) {
+ dev_err(client->gxp->dev,
+ "GXP_MAILBOX_UCI_RESPONSE requires the client hold a BLOCK wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = gxp_uci_wait_async_response(
+ &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID],
+ &ibuf.sequence_number, &ibuf.error_code, ibuf.opaque);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ ret = -EFAULT;
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
static int gxp_ioctl_uci_command_helper(struct gxp_client *client,
struct gxp_mailbox_command_ioctl *ibuf)
{
@@ -74,7 +169,7 @@ static int gxp_ioctl_uci_command_helper(struct gxp_client *client,
ret = -EINVAL;
goto out;
}
- cmd.priority = core;
+ cmd.core_id = core;
}
cmd.client_id = client->vd->client_id;
@@ -85,7 +180,8 @@ static int gxp_ioctl_uci_command_helper(struct gxp_client *client,
*/
ret = gxp_uci_send_command(
&mcu->uci, client->vd, &cmd,
- &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].queue,
+ &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].wait_queue,
+ &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].dest_queue,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].waitq,
client->mb_eventfds[ibuf->virtual_core_id]);
@@ -101,8 +197,9 @@ out:
return ret;
}
-static int gxp_ioctl_uci_command(struct gxp_client *client,
- struct gxp_mailbox_command_ioctl __user *argp)
+static int
+gxp_ioctl_uci_command_legacy(struct gxp_client *client,
+ struct gxp_mailbox_command_ioctl __user *argp)
{
struct gxp_mailbox_command_ioctl ibuf;
int ret;
@@ -121,8 +218,8 @@ static int gxp_ioctl_uci_command(struct gxp_client *client,
}
static int
-gxp_ioctl_uci_response(struct gxp_client *client,
- struct gxp_mailbox_response_ioctl __user *argp)
+gxp_ioctl_uci_response_legacy(struct gxp_client *client,
+ struct gxp_mailbox_response_ioctl __user *argp)
{
struct gxp_mailbox_response_ioctl ibuf;
int ret = 0;
@@ -147,10 +244,11 @@ gxp_ioctl_uci_response(struct gxp_client *client,
ret = gxp_uci_wait_async_response(
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID],
- &ibuf.sequence_number, &ibuf.cmd_retval, &ibuf.error_code);
+ &ibuf.sequence_number, &ibuf.error_code, NULL);
if (ret)
goto out;
+ ibuf.cmd_retval = 0;
if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
ret = -EFAULT;
@@ -206,10 +304,10 @@ long gxp_mcu_ioctl(struct file *file, uint cmd, ulong arg)
return -ENOTTY;
switch (cmd) {
case GXP_MAILBOX_COMMAND:
- ret = gxp_ioctl_uci_command(client, argp);
+ ret = gxp_ioctl_uci_command_legacy(client, argp);
break;
case GXP_MAILBOX_RESPONSE:
- ret = gxp_ioctl_uci_response(client, argp);
+ ret = gxp_ioctl_uci_response_legacy(client, argp);
break;
case GXP_REGISTER_MCU_TELEMETRY_EVENTFD:
ret = gxp_register_mcu_telemetry_eventfd(client, argp);
@@ -217,6 +315,12 @@ long gxp_mcu_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_UNREGISTER_MCU_TELEMETRY_EVENTFD:
ret = gxp_unregister_mcu_telemetry_eventfd(client, argp);
break;
+ case GXP_MAILBOX_UCI_COMMAND:
+ ret = gxp_ioctl_uci_command(client, argp);
+ break;
+ case GXP_MAILBOX_UCI_RESPONSE:
+ ret = gxp_ioctl_uci_response(client, argp);
+ break;
default:
ret = -ENOTTY; /* unknown command */
}
diff --git a/gxp-mcu.c b/gxp-mcu.c
index e59f865..b83d2ba 100644
--- a/gxp-mcu.c
+++ b/gxp-mcu.c
@@ -21,7 +21,7 @@
/* Allocates the MCU <-> cores shared buffer region. */
static int gxp_alloc_shared_buffer(struct gxp_dev *gxp, struct gxp_mcu *mcu)
{
- const size_t size = GXP_SHARED_BUFFER_SIZE * GXP_NUM_CORES;
+ const size_t size = GXP_SHARED_BUFFER_SIZE;
phys_addr_t paddr;
struct gxp_mapped_resource *res = &mcu->gxp->shared_buf;
size_t offset;
diff --git a/gxp-pm.c b/gxp-pm.c
index 33b2834..ead9d7c 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -16,11 +16,16 @@
#include "gxp-bpm.h"
#include "gxp-client.h"
#include "gxp-config.h"
+#include "gxp-dma.h"
#include "gxp-doorbell.h"
#include "gxp-internal.h"
#include "gxp-lpm.h"
#include "gxp-pm.h"
+#define SHUTDOWN_DELAY_US_MIN 200
+#define SHUTDOWN_DELAY_US_MAX 400
+#define SHUTDOWN_MAX_DELAY_COUNT 20
+
/*
* The order of this array decides the voting priority, should be increasing in
* frequencies.
@@ -34,17 +39,20 @@ static const uint aur_memory_state_array[] = {
AUR_MEM_HIGH, AUR_MEM_VERY_HIGH, AUR_MEM_MAX
};
-/*
- * TODO(b/177692488): move frequency values into chip-specific config.
- * TODO(b/221168126): survey how these value are derived from. Below
- * values are copied from the implementation in TPU firmware for PRO,
- * i.e. google3/third_party/darwinn/firmware/janeiro/power_manager.cc.
- */
-static const s32 aur_memory_state2int_table[] = { 0, 0, 0, 200000,
- 332000, 465000, 533000 };
-static const s32 aur_memory_state2mif_table[] = { 0, 0, 0,
- 1014000, 1352000, 2028000,
- 3172000 };
+static const s32 aur_memory_state2int_table[] = { 0,
+ AUR_MEM_INT_MIN,
+ AUR_MEM_INT_VERY_LOW,
+ AUR_MEM_INT_LOW,
+ AUR_MEM_INT_HIGH,
+ AUR_MEM_INT_VERY_HIGH,
+ AUR_MEM_INT_MAX };
+static const s32 aur_memory_state2mif_table[] = { 0,
+ AUR_MEM_MIF_MIN,
+ AUR_MEM_MIF_VERY_LOW,
+ AUR_MEM_MIF_LOW,
+ AUR_MEM_MIF_HIGH,
+ AUR_MEM_MIF_VERY_HIGH,
+ AUR_MEM_MIF_MAX };
static struct gxp_pm_device_ops gxp_aur_ops = {
.pre_blk_powerup = NULL,
@@ -219,15 +227,15 @@ int gxp_pm_blk_on(struct gxp_dev *gxp)
dev_info(gxp->dev, "Powering on BLK ...\n");
mutex_lock(&gxp->power_mgr->pm_lock);
ret = gxp_pm_blkpwr_up(gxp);
- if (!ret) {
- gxp_pm_blk_set_state_acpm(gxp, AUR_INIT_DVFS_STATE);
- gxp->power_mgr->curr_state = AUR_INIT_DVFS_STATE;
- }
-
+ if (ret)
+ goto out;
+ gxp_pm_blk_set_state_acpm(gxp, AUR_INIT_DVFS_STATE);
+ gxp->power_mgr->curr_state = AUR_INIT_DVFS_STATE;
+ gxp_iommu_setup_shareability(gxp);
/* Startup TOP's PSM */
gxp_lpm_init(gxp);
gxp->power_mgr->blk_switch_count++;
-
+out:
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
@@ -260,6 +268,26 @@ int gxp_pm_blk_off(struct gxp_dev *gxp)
return ret;
}
+bool gxp_pm_is_blk_down(struct gxp_dev *gxp)
+{
+ int timeout_cnt = 0;
+ int curr_state;
+
+ if (!gxp->power_mgr->aur_status)
+ return gxp->power_mgr->curr_state == AUR_OFF;
+
+ do {
+ /* Delay 200~400us per retry till blk shutdown finished */
+ usleep_range(SHUTDOWN_DELAY_US_MIN, SHUTDOWN_DELAY_US_MAX);
+ curr_state = readl(gxp->power_mgr->aur_status);
+ if (!curr_state)
+ return true;
+ timeout_cnt++;
+ } while (timeout_cnt < SHUTDOWN_MAX_DELAY_COUNT);
+
+ return false;
+}
+
int gxp_pm_get_blk_switch_count(struct gxp_dev *gxp)
{
int ret;
@@ -677,6 +705,9 @@ int gxp_pm_update_pm_qos(struct gxp_dev *gxp, s32 int_val, s32 mif_val)
int gxp_pm_init(struct gxp_dev *gxp)
{
struct gxp_power_manager *mgr;
+ struct platform_device *pdev =
+ container_of(gxp->dev, struct platform_device, dev);
+ struct resource *r;
uint i;
mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
@@ -707,6 +738,20 @@ int gxp_pm_init(struct gxp_dev *gxp)
gxp->power_mgr->force_mux_normal_count = 0;
gxp->power_mgr->blk_switch_count = 0l;
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "pmu_aur_status");
+ if (!r) {
+ dev_warn(gxp->dev, "Failed to find PMU register base\n");
+ } else {
+ gxp->power_mgr->aur_status = devm_ioremap_resource(gxp->dev, r);
+ if (IS_ERR(gxp->power_mgr->aur_status)) {
+ dev_err(gxp->dev,
+ "Failed to map PMU register base, ret=%ld\n",
+ PTR_ERR(gxp->power_mgr->aur_status));
+ gxp->power_mgr->aur_status = NULL;
+ }
+ }
+
pm_runtime_enable(gxp->dev);
exynos_pm_qos_add_request(&mgr->int_min, PM_QOS_DEVICE_THROUGHPUT, 0);
exynos_pm_qos_add_request(&mgr->mif_min, PM_QOS_BUS_THROUGHPUT, 0);
diff --git a/gxp-pm.h b/gxp-pm.h
index acf9205..188f449 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -138,6 +138,8 @@ struct gxp_power_manager {
/* Max frequency that the thermal driver/ACPM will allow in Hz */
unsigned long thermal_limit;
u64 blk_switch_count;
+ /* PMU AUR_STATUS base address for block status, maybe NULL */
+ void __iomem *aur_status;
};
/**
@@ -163,6 +165,15 @@ int gxp_pm_blk_on(struct gxp_dev *gxp);
int gxp_pm_blk_off(struct gxp_dev *gxp);
/**
+ * gxp_pm_is_blk_down() - Check weather the blk is turned off or not.
+ * @gxp: The GXP device to check
+ *
+ * Return:
+ * * true - blk is turned off.
+ */
+bool gxp_pm_is_blk_down(struct gxp_dev *gxp);
+
+/**
* gxp_pm_get_blk_state() - Get the blk power state
* @gxp: The GXP device to sample state
*
diff --git a/gxp-uci.c b/gxp-uci.c
index d9bf21f..83883f1 100644
--- a/gxp-uci.c
+++ b/gxp-uci.c
@@ -19,11 +19,61 @@
#include "gxp-vd.h"
#include "gxp.h"
+#if IS_ENABLED(CONFIG_GXP_TEST)
+#include "unittests/factory/fake-gxp-mcu-firmware.h"
+
+#define TEST_FLUSH_FIRMWARE_WORK() fake_gxp_mcu_firmware_flush_work_all()
+#else
+#define TEST_FLUSH_FIRMWARE_WORK()
+#endif
+
#define CIRCULAR_QUEUE_WRAP_BIT BIT(15)
#define MBOX_CMD_QUEUE_NUM_ENTRIES 1024
#define MBOX_RESP_QUEUE_NUM_ENTRIES 1024
+static int gxp_uci_mailbox_manager_execute_cmd(
+ struct gxp_client *client, struct gxp_mailbox *mailbox, int virt_core,
+ u16 cmd_code, u8 cmd_priority, u64 cmd_daddr, u32 cmd_size,
+ u32 cmd_flags, u8 num_cores, struct gxp_power_states power_states,
+ u64 *resp_seq, u16 *resp_status)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_virtual_device *vd = client->vd;
+ struct gxp_uci_command cmd;
+ struct gxp_uci_response resp;
+ int ret;
+
+ if (gxp_is_direct_mode(gxp))
+ return -EOPNOTSUPP;
+
+ if (!gxp_vd_has_and_use_credit(vd))
+ return -EBUSY;
+
+ /* Pack the command structure */
+ cmd.core_command_params.address = cmd_daddr;
+ cmd.core_command_params.size = cmd_size;
+ cmd.core_command_params.num_cores = num_cores;
+ /* Plus 1 to align with power states in MCU firmware. */
+ cmd.core_command_params.dsp_operating_point = power_states.power + 1;
+ cmd.core_command_params.memory_operating_point = power_states.memory;
+ cmd.type = cmd_code;
+ cmd.core_id = 0;
+ cmd.client_id = vd->client_id;
+
+ ret = gxp_mailbox_send_cmd(mailbox, &cmd, &resp);
+
+ /* resp.seq and resp.status can be updated even though it failed to process the command */
+ if (resp_seq)
+ *resp_seq = resp.seq;
+ if (resp_status)
+ *resp_status = resp.code;
+
+ gxp_vd_release_credit(vd);
+
+ return ret;
+}
+
static void gxp_uci_mailbox_manager_release_unconsumed_async_resps(
struct gxp_virtual_device *vd)
{
@@ -31,24 +81,65 @@ static void gxp_uci_mailbox_manager_release_unconsumed_async_resps(
unsigned long flags;
/*
- * Cleanup any unconsumed responses.
- * Since VD is releasing, it is not necessary to lock here.
- * Do it anyway for consistency.
+ * We should hold a lock to prevent removing WAKELOCK responses from the arrived callback
+ * while iterating @wait_queue.
*/
spin_lock_irqsave(&vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
flags);
- list_for_each_entry_safe (
- cur, nxt, &vd->mailbox_resp_queues[UCI_RESOURCE_ID].queue,
- list_entry) {
- list_del(&cur->list_entry);
- gcip_mailbox_release_awaiter(cur->awaiter);
+
+ /* Let arrived and timedout callbacks not to handle responses. */
+ list_for_each_entry (
+ cur, &vd->mailbox_resp_queues[UCI_RESOURCE_ID].wait_queue,
+ wait_list_entry) {
+ cur->wait_queue = NULL;
}
+
spin_unlock_irqrestore(&vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
flags);
+
+ /*
+ * From here it is guaranteed that @wait_queue will not be manipulated by the arrived
+ * callback.
+ */
+
+ /*
+ * Flush the work of fake firmware to simulate firing arrived or timedout callbacks in the
+ * middle of this function. If there is no work to be done, this is the same as NO-OP.
+ */
+ TEST_FLUSH_FIRMWARE_WORK();
+
+ /* Ensure no responses will be called by arrived or timedout handlers. */
+ list_for_each_entry (
+ cur, &vd->mailbox_resp_queues[UCI_RESOURCE_ID].wait_queue,
+ wait_list_entry) {
+ gcip_mailbox_cancel_awaiter(cur->awaiter);
+ }
+
+ /*
+ * From here it is guaranteed that no responses will access @vd and be handled by arrived
+ * or timedout callbacks. Therefore, @dest_queue will not be changed anymore.
+ */
+
+ /* Clean up unconsumed responses in the @dest_queue. */
+ list_for_each_entry_safe (
+ cur, nxt, &vd->mailbox_resp_queues[UCI_RESOURCE_ID].dest_queue,
+ dest_list_entry) {
+ list_del(&cur->dest_list_entry);
+ }
+
+ /* Clean up @wait_queue and release awaiters. */
+ list_for_each_entry_safe (
+ cur, nxt, &vd->mailbox_resp_queues[UCI_RESOURCE_ID].wait_queue,
+ wait_list_entry) {
+ list_del(&cur->wait_list_entry);
+ gcip_mailbox_release_awaiter(cur->awaiter);
+ }
}
static void gxp_uci_mailbox_manager_set_ops(struct gxp_mailbox_manager *mgr)
{
+ /* This operator will be used only from the gxp-debugfs.c. */
+ mgr->execute_cmd = gxp_uci_mailbox_manager_execute_cmd;
/*
* Most mailbox manager operators are used by the `gxp-common-platform.c` when the device
* uses direct mode. The only one that should be implemented among them from the UCI is the
@@ -119,25 +210,21 @@ gxp_uci_handle_awaiter_arrived(struct gcip_mailbox *mailbox,
struct gxp_uci_async_response *async_resp = awaiter->data;
unsigned long flags;
- /*
- * If dest_queue is a null pointer, it means we don't care the response
- * of the command. Skip it.
- */
+ spin_lock_irqsave(async_resp->queue_lock, flags);
+
+ if (!async_resp->wait_queue)
+ goto out;
+
+ async_resp->wait_queue = NULL;
+ list_del(&async_resp->wait_list_entry);
+
if (!async_resp->dest_queue) {
- gcip_mailbox_release_awaiter(awaiter);
- return;
+ /* If @dest_queue is NULL, vd will not consume it. We can release it right away. */
+ gcip_mailbox_release_awaiter(async_resp->awaiter);
+ goto out;
}
- spin_lock_irqsave(async_resp->dest_queue_lock, flags);
-
- list_add_tail(&async_resp->list_entry, async_resp->dest_queue);
- /*
- * Marking the dest_queue as NULL indicates the
- * response was handled in case its timeout
- * handler fired between acquiring the
- * wait_list_lock and cancelling the timeout.
- */
- async_resp->dest_queue = NULL;
+ list_add_tail(&async_resp->dest_list_entry, async_resp->dest_queue);
if (async_resp->eventfd) {
gxp_eventfd_signal(async_resp->eventfd);
@@ -145,8 +232,8 @@ gxp_uci_handle_awaiter_arrived(struct gcip_mailbox *mailbox,
}
wake_up(async_resp->dest_queue_waitq);
-
- spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
+out:
+ spin_unlock_irqrestore(async_resp->queue_lock, flags);
}
static void
@@ -164,11 +251,21 @@ gxp_uci_handle_awaiter_timedout(struct gcip_mailbox *mailbox,
* wait_list_lock. If this happens, this callback will be called with the destination queue
* of response as a NULL, otherwise as not NULL.
*/
- spin_lock_irqsave(async_resp->dest_queue_lock, flags);
+ spin_lock_irqsave(async_resp->queue_lock, flags);
+
+ if (!async_resp->wait_queue) {
+ spin_unlock_irqrestore(async_resp->queue_lock, flags);
+ return;
+ }
+
+ async_resp->wait_queue = NULL;
+ list_del(&async_resp->wait_list_entry);
+
if (async_resp->dest_queue) {
async_resp->resp.code = GXP_RESP_CANCELLED;
- list_add_tail(&async_resp->list_entry, async_resp->dest_queue);
- spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
+ list_add_tail(&async_resp->dest_list_entry,
+ async_resp->dest_queue);
+ spin_unlock_irqrestore(async_resp->queue_lock, flags);
if (async_resp->eventfd) {
gxp_eventfd_signal(async_resp->eventfd);
@@ -177,21 +274,12 @@ gxp_uci_handle_awaiter_timedout(struct gcip_mailbox *mailbox,
wake_up(async_resp->dest_queue_waitq);
} else {
- spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
+ /* If @dest_queue is NULL, vd will not consume it. We can release it right away. */
+ gcip_mailbox_release_awaiter(async_resp->awaiter);
+ spin_unlock_irqrestore(async_resp->queue_lock, flags);
}
}
-static void gxp_uci_flush_awaiter(struct gcip_mailbox *mailbox,
- struct gcip_mailbox_resp_awaiter *awaiter)
-{
- struct gxp_uci_async_response *async_resp = awaiter->data;
- unsigned long flags;
-
- spin_lock_irqsave(async_resp->dest_queue_lock, flags);
- async_resp->dest_queue = NULL;
- spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
-}
-
static void gxp_uci_release_awaiter_data(void *data)
{
struct gxp_uci_async_response *async_resp = data;
@@ -227,7 +315,6 @@ static const struct gcip_mailbox_ops gxp_uci_gcip_mbx_ops = {
.after_fetch_resps = gxp_mailbox_gcip_ops_after_fetch_resps,
.handle_awaiter_arrived = gxp_uci_handle_awaiter_arrived,
.handle_awaiter_timedout = gxp_uci_handle_awaiter_timedout,
- .flush_awaiter = gxp_uci_flush_awaiter,
.release_awaiter_data = gxp_uci_release_awaiter_data,
};
@@ -338,6 +425,7 @@ void gxp_uci_exit(struct gxp_uci *uci)
int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_virtual_device *vd,
struct gxp_uci_command *cmd,
+ struct list_head *wait_queue,
struct list_head *resp_queue, spinlock_t *queue_lock,
wait_queue_head_t *queue_waitq,
struct gxp_eventfd *eventfd)
@@ -355,6 +443,7 @@ int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_virtual_device *vd,
async_resp->uci = uci;
async_resp->vd = vd;
+ async_resp->wait_queue = wait_queue;
/*
* If the command is a wakelock command, keep dest_queue as a null
* pointer to indicate that we will not expose the response to the
@@ -362,7 +451,7 @@ int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_virtual_device *vd,
*/
if (cmd->type != WAKELOCK_COMMAND)
async_resp->dest_queue = resp_queue;
- async_resp->dest_queue_lock = queue_lock;
+ async_resp->queue_lock = queue_lock;
async_resp->dest_queue_waitq = queue_waitq;
if (eventfd && gxp_eventfd_get(eventfd))
async_resp->eventfd = eventfd;
@@ -376,6 +465,9 @@ int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_virtual_device *vd,
goto err_free_resp;
}
+ /* Put async_resp into the waiting queue. */
+ list_add_tail(&async_resp->wait_list_entry, wait_queue);
+
return 0;
err_free_resp:
@@ -386,8 +478,7 @@ err_release_credit:
}
int gxp_uci_wait_async_response(struct mailbox_resp_queue *uci_resp_queue,
- u64 *resp_seq, u32 *resp_retval,
- u16 *error_code)
+ u64 *resp_seq, u16 *error_code, u8 *opaque)
{
long timeout;
struct gxp_uci_async_response *async_resp;
@@ -401,19 +492,19 @@ int gxp_uci_wait_async_response(struct mailbox_resp_queue *uci_resp_queue,
* per wake event.
*/
timeout = wait_event_interruptible_lock_irq_timeout_exclusive(
- uci_resp_queue->waitq, !list_empty(&uci_resp_queue->queue),
+ uci_resp_queue->waitq, !list_empty(&uci_resp_queue->dest_queue),
uci_resp_queue->lock, msecs_to_jiffies(MAILBOX_TIMEOUT));
if (timeout <= 0) {
spin_unlock_irq(&uci_resp_queue->lock);
/* unusual case - this only happens when there is no command pushed */
return timeout ? -ETIMEDOUT : timeout;
}
- async_resp =
- list_first_entry(&uci_resp_queue->queue,
- struct gxp_uci_async_response, list_entry);
+ async_resp = list_first_entry(&uci_resp_queue->dest_queue,
+ struct gxp_uci_async_response,
+ dest_list_entry);
/* Pop the front of the response list */
- list_del(&(async_resp->list_entry));
+ list_del(&(async_resp->dest_list_entry));
spin_unlock_irq(&uci_resp_queue->lock);
@@ -421,8 +512,9 @@ int gxp_uci_wait_async_response(struct mailbox_resp_queue *uci_resp_queue,
switch (async_resp->resp.code) {
case GXP_RESP_OK:
*error_code = GXP_RESPONSE_ERROR_NONE;
- /* payload is only valid if code == GXP_RESP_OK */
- *resp_retval = async_resp->resp.payload;
+ if (opaque)
+ memcpy(opaque, async_resp->resp.opaque,
+ sizeof(async_resp->resp.opaque));
break;
case GXP_RESP_CANCELLED:
*error_code = GXP_RESPONSE_ERROR_TIMEOUT;
diff --git a/gxp-uci.h b/gxp-uci.h
index 31a0aaa..a6c1a92 100644
--- a/gxp-uci.h
+++ b/gxp-uci.h
@@ -52,19 +52,19 @@ struct gxp_uci_core_command_params {
struct gxp_uci_command {
/* sequence number, should match the corresponding response */
uint64_t seq;
- /* unique ID for each client that identifies client VM & security realm*/
+ /* unique ID for each client that identifies client VM & security realm */
uint32_t client_id;
/* type of the command */
enum gxp_uci_type type;
- /* priority level for this command */
- uint8_t priority;
+ /* hint for which core the job should be assigned to */
+ uint8_t core_id;
/* reserved field */
uint8_t reserved[2];
/* All possible command parameters */
union {
struct gxp_uci_core_command_params core_command_params;
struct gxp_uci_wakelock_command_params wakelock_command_params;
- uint8_t max_param_size[16];
+ uint8_t opaque[48];
};
};
@@ -77,8 +77,7 @@ struct gxp_uci_response {
uint16_t code;
/* reserved field */
uint8_t reserved[2];
- /* returned payload field */
- uint64_t payload;
+ uint8_t opaque[16];
};
/*
@@ -86,18 +85,32 @@ struct gxp_uci_response {
* sent the command.
*/
struct gxp_uci_async_response {
- struct list_head list_entry;
+ /*
+ * List entry which will be inserted to the waiting queue of the vd.
+ * It will be pushed into the waiting queue when the response is sent.
+ * (i.e, the `gxp_uci_send_command` function is called)
+ * It will be poped when the response is consumed by the vd.
+ */
+ struct list_head wait_list_entry;
+ /*
+ * List entry which will be inserted to the dest_queue of the vd.
+ * It will be pushed into the dest_queue when the response is arrived or timed out.
+ * It will be poped when the response is consumed by the vd.
+ */
+ struct list_head dest_list_entry;
/* Stores the response. */
struct gxp_uci_response resp;
struct gxp_uci *uci;
+ /* Queue where to be removed from once it is complete or timed out. */
+ struct list_head *wait_queue;
/* Queue to add the response to once it is complete or timed out. */
struct list_head *dest_queue;
/*
- * The lock that protects queue pointed to by `dest_queue`.
- * The mailbox code also uses this lock to protect changes to the
- * `dest_queue` pointer itself when processing this response.
+ * The lock that protects queues pointed to by `dest_queue` and `wait_queue`.
+ * The mailbox code also uses this lock to protect changes to the `wait_queue` pointer
+ * itself when processing this response.
*/
- spinlock_t *dest_queue_lock;
+ spinlock_t *queue_lock;
/* Queue of clients to notify when this response is processed. */
wait_queue_head_t *dest_queue_waitq;
/* gxp_eventfd to signal when the response completes. May be NULL. */
@@ -151,6 +164,7 @@ void gxp_uci_exit(struct gxp_uci *uci);
*/
int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_virtual_device *vd,
struct gxp_uci_command *cmd,
+ struct list_head *wait_queue,
struct list_head *resp_queue, spinlock_t *queue_lock,
wait_queue_head_t *queue_waitq,
struct gxp_eventfd *eventfd);
@@ -162,7 +176,6 @@ int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_virtual_device *vd,
* Returns 0 on success, a negative errno on failure.
*/
int gxp_uci_wait_async_response(struct mailbox_resp_queue *uci_resp_queue,
- u64 *resp_seq, u32 *resp_retval,
- u16 *error_code);
+ u64 *resp_seq, u16 *error_code, u8 *opaque);
#endif /* __GXP_UCI_H__ */
diff --git a/gxp-vd.c b/gxp-vd.c
index 0456d0a..3bd01cd 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -10,6 +10,8 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <gcip/gcip-alloc-helper.h>
+
#include "gxp-config.h"
#include "gxp-core-telemetry.h"
#include "gxp-debug-dump.h"
@@ -59,8 +61,8 @@ static int map_core_telemetry_buffers(struct gxp_dev *gxp,
return 0;
mutex_lock(&gxp->core_telemetry_mgr->lock);
- data[0] = gxp->core_telemetry_mgr->logging_buff_data_legacy;
- data[1] = gxp->core_telemetry_mgr->tracing_buff_data_legacy;
+ data[0] = gxp->core_telemetry_mgr->logging_buff_data;
+ data[1] = gxp->core_telemetry_mgr->tracing_buff_data;
for (i = 0; i < ARRAY_SIZE(data); i++) {
if (!data[i] || !data[i]->is_enabled)
@@ -111,8 +113,8 @@ static void unmap_core_telemetry_buffers(struct gxp_dev *gxp,
if (!gxp->core_telemetry_mgr)
return;
mutex_lock(&gxp->core_telemetry_mgr->lock);
- data[0] = gxp->core_telemetry_mgr->logging_buff_data_legacy;
- data[1] = gxp->core_telemetry_mgr->tracing_buff_data_legacy;
+ data[0] = gxp->core_telemetry_mgr->logging_buff_data;
+ data[1] = gxp->core_telemetry_mgr->tracing_buff_data;
for (i = 0; i < ARRAY_SIZE(data); i++) {
if (!data[i] || !data[i]->is_enabled)
@@ -187,6 +189,7 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
u16 requested_cores)
{
struct gxp_virtual_device *vd;
+ unsigned int size;
int i;
int err;
@@ -207,6 +210,7 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
vd->tpu_client_id = -1;
spin_lock_init(&vd->credit_lock);
vd->credit = GXP_COMMAND_CREDIT_PER_VD;
+ vd->first_open = true;
vd->domain = gxp_domain_pool_alloc(gxp->domain_pool);
if (!vd->domain) {
@@ -224,15 +228,25 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
}
}
+ size = GXP_NUM_CORES * PRIVATE_FW_DATA_SIZE;
+ vd->fwdata_sgt = gcip_alloc_noncontiguous(gxp->dev, size, GFP_KERNEL);
+ if (!vd->fwdata_sgt) {
+ dev_err(gxp->dev, "allocate firmware data size=%x failed",
+ size);
+ err = -ENOMEM;
+ goto error_free_slice_index;
+ }
+
vd->mailbox_resp_queues = kcalloc(
vd->num_cores, sizeof(*vd->mailbox_resp_queues), GFP_KERNEL);
if (!vd->mailbox_resp_queues) {
err = -ENOMEM;
- goto error_free_slice_index;
+ goto error_free_fwdata;
}
for (i = 0; i < vd->num_cores; i++) {
- INIT_LIST_HEAD(&vd->mailbox_resp_queues[i].queue);
+ INIT_LIST_HEAD(&vd->mailbox_resp_queues[i].wait_queue);
+ INIT_LIST_HEAD(&vd->mailbox_resp_queues[i].dest_queue);
spin_lock_init(&vd->mailbox_resp_queues[i].lock);
init_waitqueue_head(&vd->mailbox_resp_queues[i].waitq);
}
@@ -251,14 +265,17 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
goto error_unassign_cores;
}
}
- /* TODO(b/255706432): Adopt vd->slice_index after the firmware supports this. */
err = gxp_dma_map_core_resources(gxp, vd->domain, vd->core_list,
- /*slice_index=*/0);
+ vd->slice_index);
if (err)
goto error_destroy_fw_data;
- err = map_core_telemetry_buffers(gxp, vd, vd->core_list);
+ err = gxp_dma_map_iova_sgt(gxp, vd->domain, GXP_IOVA_PRIV_FW_DATA,
+ vd->fwdata_sgt, IOMMU_READ | IOMMU_WRITE);
if (err)
goto error_unmap_core_resources;
+ err = map_core_telemetry_buffers(gxp, vd, vd->core_list);
+ if (err)
+ goto error_unmap_fw_data;
err = map_debug_dump_buffer(gxp, vd);
if (err)
goto error_unmap_core_telemetry_buffer;
@@ -267,6 +284,8 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
error_unmap_core_telemetry_buffer:
unmap_core_telemetry_buffers(gxp, vd, vd->core_list);
+error_unmap_fw_data:
+ gxp_dma_unmap_iova_sgt(gxp, vd->domain, GXP_IOVA_PRIV_FW_DATA, vd->fwdata_sgt);
error_unmap_core_resources:
gxp_dma_unmap_core_resources(gxp, vd->domain, vd->core_list);
error_destroy_fw_data:
@@ -275,6 +294,8 @@ error_unassign_cores:
unassign_cores(vd);
error_free_resp_queues:
kfree(vd->mailbox_resp_queues);
+error_free_fwdata:
+ gcip_free_noncontiguous(vd->fwdata_sgt);
error_free_slice_index:
if (vd->slice_index >= 0)
ida_free(&gxp->shared_slice_idp, vd->slice_index);
@@ -294,9 +315,17 @@ void gxp_vd_release(struct gxp_virtual_device *vd)
uint core_list = vd->core_list;
lockdep_assert_held_write(&gxp->vd_semaphore);
+
+ if (vd->is_secure) {
+ mutex_lock(&gxp->secure_vd_lock);
+ gxp->secure_vd = NULL;
+ mutex_unlock(&gxp->secure_vd_lock);
+ }
+
unassign_cores(vd);
unmap_debug_dump_buffer(gxp, vd);
unmap_core_telemetry_buffers(gxp, vd, core_list);
+ gxp_dma_unmap_iova_sgt(gxp, vd->domain, GXP_IOVA_PRIV_FW_DATA, vd->fwdata_sgt);
gxp_dma_unmap_core_resources(gxp, vd->domain, core_list);
if (!IS_ERR_OR_NULL(vd->fw_app)) {
@@ -320,6 +349,7 @@ void gxp_vd_release(struct gxp_virtual_device *vd)
up_write(&vd->mappings_semaphore);
kfree(vd->mailbox_resp_queues);
+ gcip_free_noncontiguous(vd->fwdata_sgt);
if (vd->slice_index >= 0)
ida_free(&vd->gxp->shared_slice_idp, vd->slice_index);
gxp_domain_pool_free(vd->gxp->domain_pool, vd->domain);
@@ -450,7 +480,6 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
CORE_NOTIF_SUSPEND_REQUEST);
}
}
-
/* Wait for all cores to complete core suspension. */
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp->core_to_vd[core] == vd) {
diff --git a/gxp-vd.h b/gxp-vd.h
index 22ef800..704e40f 100644
--- a/gxp-vd.h
+++ b/gxp-vd.h
@@ -12,6 +12,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
+#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
@@ -22,9 +23,14 @@
/* TODO(b/259192112): set to 8 once the runtime has added the credit limit. */
#define GXP_COMMAND_CREDIT_PER_VD 256
+/* A special client ID for secure workloads pre-agreed with MCU firmware. */
+#define SECURE_CLIENT_ID (3 << 10)
+
struct mailbox_resp_queue {
- /* Queue of async responses */
- struct list_head queue;
+ /* Queue of waiting async responses */
+ struct list_head wait_queue;
+ /* Queue of arrived async responses */
+ struct list_head dest_queue;
/* Lock protecting access to the `queue` */
spinlock_t lock;
/* Waitqueue to wait on if the queue is empty */
@@ -65,6 +71,10 @@ struct gxp_virtual_device {
* of slice is used by this VD.
*/
int slice_index;
+ /*
+ * The SG table that holds the firmware data region.
+ */
+ struct sg_table *fwdata_sgt;
uint core_list;
/*
* The ID of DSP client. -1 if it is not allocated.
@@ -93,6 +103,9 @@ struct gxp_virtual_device {
* Only used in MCU mode.
*/
uint credit;
+ /* Whether it's the first time allocating a VMBox for this VD. */
+ bool first_open;
+ bool is_secure;
};
/*
@@ -128,7 +141,8 @@ void gxp_vd_destroy(struct gxp_dev *gxp);
* cores to be assigned to @vd
* * -ENOSPC - There is no more available shared slices
*/
-struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp, u16 requested_cores);
+struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
+ u16 requested_cores);
/**
* gxp_vd_release() - Cleanup and free a struct gxp_virtual_device
diff --git a/gxp.h b/gxp.h
index c6a05ea..0048584 100644
--- a/gxp.h
+++ b/gxp.h
@@ -13,7 +13,7 @@
/* Interface Version */
#define GXP_INTERFACE_VERSION_MAJOR 1
-#define GXP_INTERFACE_VERSION_MINOR 5
+#define GXP_INTERFACE_VERSION_MINOR 6
#define GXP_INTERFACE_VERSION_BUILD 0
/*
@@ -48,8 +48,15 @@
/* To check whether the driver is working in MCU mode. */
#define GXP_SPEC_FEATURE_MODE_MCU (1 << 0)
+/* To specify the secureness of the virtual device. */
+#define GXP_ALLOCATE_VD_SECURE BIT(0)
+
/* Core telemetry buffer size is a multiple of 64 kB */
#define GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE 0x10000u
+/* Magic code used to indicate the validity of telemetry buffer contents */
+#define GXP_TELEMETRY_BUFFER_VALID_MAGIC_CODE 0xC0DEC0DEu
+/* Magic code used to indicate the validity of secure telemetry buffer contents */
+#define GXP_TELEMETRY_SECURE_BUFFER_VALID_MAGIC_CODE 0xA0B0C0D0u
struct gxp_map_ioctl {
/*
@@ -179,7 +186,7 @@ struct gxp_mailbox_response_ioctl {
};
/*
- * Pop element from the mailbox response queue. Blocks until mailbox response
+ * Pop an element from the mailbox response queue. Blocks until mailbox response
* is available.
*
* The client must hold a VIRTUAL_DEVICE wakelock.
@@ -229,6 +236,16 @@ struct gxp_virtual_device_ioctl {
*/
__u8 core_count;
/*
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [0:0] - GXP_ALLOCATE_VD_SECURE setting for vd secureness
+ * 0 = Non-secure, default value
+ * 1 = Secure
+ * [31:1] - RESERVED
+ */
+ __u8 flags;
+ /*
* Input:
* The number of threads requested per core.
*/
@@ -649,7 +666,7 @@ struct gxp_mailbox_command_ioctl {
};
/*
- * Push element to the mailbox commmand queue.
+ * Push an element to the mailbox command queue.
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
@@ -857,4 +874,62 @@ struct gxp_interface_version_ioctl {
#define GXP_UNREGISTER_MCU_TELEMETRY_EVENTFD \
_IOW(GXP_IOCTL_BASE, 29, struct gxp_register_telemetry_eventfd_ioctl)
+struct gxp_mailbox_uci_command_ioctl {
+ /*
+ * Output:
+ * The sequence number assigned to this command. The caller can use
+ * this value to match responses fetched via `GXP_MAILBOX_UCI_RESPONSE`
+ * with this command.
+ */
+ __u64 sequence_number;
+ /* reserved fields */
+ __u8 reserved[8];
+ /*
+ * Input:
+ * Will be copied to the UCI command without modification.
+ */
+ __u8 opaque[48];
+};
+
+/*
+ * Push an element to the UCI command queue.
+ *
+ * The client must hold a BLOCK wakelock.
+ */
+#define GXP_MAILBOX_UCI_COMMAND \
+ _IOWR(GXP_IOCTL_BASE, 30, struct gxp_mailbox_uci_command_ioctl)
+
+struct gxp_mailbox_uci_response_ioctl {
+ /*
+ * Output:
+ * Sequence number indicating which command this response is for.
+ */
+ __u64 sequence_number;
+ /*
+ * Output:
+ * Driver error code.
+ * Indicates if the response was obtained successfully,
+ * `GXP_RESPONSE_ERROR_NONE`, or what error prevented the command
+ * from completing successfully.
+ */
+ __u16 error_code;
+ /* reserved fields */
+ __u8 reserved[6];
+ /*
+ * Output:
+ * Is copied from the UCI response without modification.
+ * Only valid if `error_code` == GXP_RESPONSE_ERROR_NONE
+ */
+ __u8 opaque[16];
+};
+
+/*
+ * Pop an element from the UCI response queue. Blocks until mailbox response
+ * is available.
+ *
+ * The client must hold a BLOCK wakelock.
+ */
+#define GXP_MAILBOX_UCI_RESPONSE \
+ _IOR(GXP_IOCTL_BASE, 31, struct gxp_mailbox_uci_response_ioctl)
+
#endif /* __GXP_H__ */