summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurora zuma automerger <aurora-zuma-automerger@google.com>2023-01-18 06:39:40 +0000
committerCopybara-Service <copybara-worker@google.com>2023-01-17 23:58:40 -0800
commitdf93ed835156837cd4c6e2dfd32d34e015250055 (patch)
treebe2c277271f2b0a111f8652ab54b0fd7e0d5771e
parentfaa60e619e819769e825306a9cc25e8867023cce (diff)
downloadzuma-df93ed835156837cd4c6e2dfd32d34e015250055.tar.gz
gxp: [Copybara Auto Merge] Merge branch 'zuma' into 'android14-gs-pixel-5.15'
gxp: move core config region to second half of buf gcip: conditionally zero the non-contiguous region GCIP_MAIN_REV_ID: 0a2e7b7e345705db78c8108890781cbe5c8c4eb7 gxp: boot status and doorbell for suspend/resume Bug: 265742153 gxp: set VD and core config region Bug: 265742153 (repeat) gxp: use core cfg region for firmware handshake Bug: 265742153 (repeat) gxp: boot with virt_core on per-VD config method Bug: 265742153 (repeat) gxp: only map fwdata region in legacy protocol Bug: 265742153 (repeat) gxp: populate system config on fw data create Bug: 265742153 (repeat) gxp: flush SGT after map iova sgt Bug: 265564221 gxp: remove gxp_vd_phys_core_list gxp: signal eventfd when client is invalidated Bug: 264830822 gxp: introduce GXP_(UN)REGISTER_INVALIDATED_EVENTFD ioctl Bug: 264830822 (repeat) gxp: add create/destroy app legacy functions Bug: 265742153 (repeat) gxp: pass vd to fw_data_create_app Bug: 265742153 (repeat) gxp: add structures for new app config Bug: 265742153 (repeat) gxp: add sys_cfg mapping support Bug: 265605775 gxp: all chips/modes have shared region Bug: 265605775 (repeat) gxp: map config regions per image config Bug: 265605775 (repeat) gxp: set shared_buf vaddr Bug: 265605775 (repeat) gxp: map image config NS mappings Bug: 265564221 (repeat) gxp: use per VD scratchpad region Bug: 261797596 gxp: conditionally map FW image region Bug: 265105395 gxp: add lock_class_key to struct client gxp: add image config parser to VD Bug: 265105395 (repeat) gxp: record image config on firmware requested Bug: 265105395 (repeat) gxp: add missing up_read in gxp_debugfs_mailbox gxp: implement vd invalidation Bug: 264831024 gxp: implement MCU fw crash handler Bug: 237739631 gxp: implement gxp_mcu_firmware_restart_locked Bug: 233159020 gxp: pass timeout_ms to gxp_pm_is_blk_down gxp: set is_firmware_requested on SysFS FW load gxp: set SSMT to bypass in MCU mode Bug: 264950137 Revert "gxp: fetch segment boundaries from ELF header" gxp: add gcip_dma_fence_manager to gxp_dev Bug: 258876786 gxp: add gxp-dma-fence support Bug: 258876786 (repeat) gxp: introduce GXP_NAME macro gxp: prevent unloading MCU fw before UCI sync resp returns gcip: expose config size calculation to header Bug: 265605775 (repeat) gcip: sync RKCI codes with fw gcip: update comments of gcip_dma_fence_init Bug: 264220687 gcip: add to_gcip_fence to gcip-dma-fence Bug: 264220687 (repeat) GCIP_HEADERS_REV_ID: 0a85dc4b06195de6efa46e2ca314548f07f30097 gcip: expose config size calculation to header Bug: 265605775 (repeat) gcip: Use strscpy instead of memcpy gcip: enhance image config NS mapping decoding Bug: 265565307 gcip: don't fail dma_fence_init on long name Bug: 264220687 (repeat) GCIP_MAIN_REV_ID: e0d9923db0788f7de94c7b41e580dee62deb192f GitOrigin-RevId: 26871ed8b04b195ef351d46bb52795d62ee0d529 Change-Id: I8c3d55b379df8c7ac8c29aaaf7b756968a10cd3c
-rw-r--r--Makefile1
-rw-r--r--callisto/config.h9
-rw-r--r--callisto/iova.h2
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c6
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-dma-fence.c7
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-image-config.c42
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-dma-fence.h18
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-image-config.h35
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-kci.h5
-rw-r--r--gxp-client.c7
-rw-r--r--gxp-client.h2
-rw-r--r--gxp-common-platform.c65
-rw-r--r--gxp-debug-dump.c3
-rw-r--r--gxp-debugfs.c3
-rw-r--r--gxp-dma-fence.c64
-rw-r--r--gxp-dma-fence.h38
-rw-r--r--gxp-dma-iommu.c78
-rw-r--r--gxp-firmware-data.c313
-rw-r--r--gxp-firmware-data.h16
-rw-r--r--gxp-firmware.c590
-rw-r--r--gxp-firmware.h54
-rw-r--r--gxp-host-device-structs.h253
-rw-r--r--gxp-internal.h13
-rw-r--r--gxp-kci.c9
-rw-r--r--gxp-mcu-firmware.c185
-rw-r--r--gxp-mcu-firmware.h10
-rw-r--r--gxp-mcu.c6
-rw-r--r--gxp-pm.c9
-rw-r--r--gxp-pm.h3
-rw-r--r--gxp-ssmt.h7
-rw-r--r--gxp-uci.c7
-rw-r--r--gxp-vd.c895
-rw-r--r--gxp-vd.h53
-rw-r--r--gxp.h19
34 files changed, 2136 insertions, 691 deletions
diff --git a/Makefile b/Makefile
index 80592a1..c1b4ca2 100644
--- a/Makefile
+++ b/Makefile
@@ -15,6 +15,7 @@ gxp-objs += \
gxp-core-telemetry.o \
gxp-debug-dump.o \
gxp-debugfs.o \
+ gxp-dma-fence.o \
gxp-dma-iommu.o \
gxp-dmabuf.o \
gxp-domain-pool.o \
diff --git a/callisto/config.h b/callisto/config.h
index 7a62b47..4bcc215 100644
--- a/callisto/config.h
+++ b/callisto/config.h
@@ -8,6 +8,8 @@
#ifndef __CALLISTO_CONFIG_H__
#define __CALLISTO_CONFIG_H__
+#include <linux/sizes.h>
+
#define GXP_DRIVER_NAME "gxp_callisto"
#define DSP_FIRMWARE_DEFAULT_PREFIX "gxp_callisto_fw_core"
#define GXP_DEFAULT_MCU_FIRMWARE "google/gxp-callisto.fw"
@@ -28,6 +30,13 @@
/* three for cores, one for MCU */
#define GXP_NUM_WAKEUP_DOORBELLS (GXP_NUM_CORES + 1)
+/* The total size of the configuration region. */
+#define GXP_SHARED_BUFFER_SIZE SZ_512K
+/* Size of slice per VD. */
+#define GXP_SHARED_SLICE_SIZE SZ_32K
+/* TODO(b/265730182): set to 7 */
+#define GXP_NUM_SHARED_SLICES 16
+
/* TODO(b/234098135): remove this when FW supports suspend / resume */
#define DISABLE_VD_SUSPEND_RESUME_SUPPORT
/*
diff --git a/callisto/iova.h b/callisto/iova.h
index 496d863..0a81a56 100644
--- a/callisto/iova.h
+++ b/callisto/iova.h
@@ -13,8 +13,6 @@
#define GXP_IOVA_EXT_TPU_MBX (0x1A050000)
#define GXP_IOVA_FIRMWARE(_x_) (0xFA000000 + (_x_) * 0x00100000)
#define GXP_IOVA_SHARED_BUFFER (0xFA3A8000)
-#define GXP_SHARED_BUFFER_SIZE (0x00080000) /* 512K */
-#define GXP_SHARED_SLICE_SIZE (0x00008000) /* 32K, per VD */
#define GXP_IOVA_FW_DATA (0xFA400000)
#define GXP_IOVA_PRIV_FW_DATA (0xFA500000)
#define GXP_IOVA_TPU_MBX_BUFFER(_x_) (0xFE100000 + (_x_) * 0x00040000)
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c b/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
index 33c95e2..6d4eeee 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
@@ -7,6 +7,7 @@
#include <asm/page.h>
#include <linux/device.h>
+#include <linux/gfp.h>
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
@@ -47,7 +48,10 @@ struct sg_table *gcip_alloc_noncontiguous(struct device *dev, size_t size, gfp_t
size = PAGE_ALIGN(size);
count = size >> PAGE_SHIFT;
- mem = vzalloc_node(size, dev_to_node(dev));
+ if (gfp & __GFP_ZERO)
+ mem = vzalloc_node(size, dev_to_node(dev));
+ else
+ mem = vmalloc_node(size, dev_to_node(dev));
if (!mem) {
dev_err(dev, "GCIP noncontiguous alloc size=%#zx failed", size);
goto err_free_sh;
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-dma-fence.c b/gcip-kernel-driver/drivers/gcip/gcip-dma-fence.c
index 9da1f24..4f83670 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-dma-fence.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-dma-fence.c
@@ -84,16 +84,13 @@ bool gcip_dma_fence_always_true(struct dma_fence *fence)
int gcip_dma_fence_init(struct gcip_dma_fence_manager *mgr, struct gcip_dma_fence *gfence,
struct gcip_dma_fence_data *data)
{
- size_t timeline_len = strlen(data->timeline_name);
unsigned long flags;
int fd;
struct sync_file *sync_file;
int ret;
- if (timeline_len >= GCIP_FENCE_TIMELINE_NAME_LEN)
- return -EINVAL;
- memcpy(gfence->timeline_name, data->timeline_name, timeline_len);
- gfence->timeline_name[timeline_len] = '\0';
+ strscpy(gfence->timeline_name, data->timeline_name, GCIP_FENCE_TIMELINE_NAME_LEN);
+
spin_lock_init(&gfence->lock);
INIT_LIST_HEAD(&gfence->fence_list);
gfence->mgr = mgr;
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-image-config.c b/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
index 312bbdc..62acd0b 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
@@ -12,28 +12,6 @@
#include <gcip/gcip-image-config.h>
-#define ADDR_SHIFT 12
-#define SIZE_MODE_BIT BIT(ADDR_SHIFT - 1)
-#define SECURE_SIZE_MASK (SIZE_MODE_BIT - 1u)
-#define NS_SIZE_MASK (BIT(ADDR_SHIFT) - 1u)
-#define ADDR_MASK ~(BIT(ADDR_SHIFT) - 1u)
-
-/* used by ns_iommu_mappings */
-#define CONFIG_TO_MBSIZE(a) (((a) & NS_SIZE_MASK) << 20)
-
-/* used by iommu_mappings */
-static inline __u32 config_to_size(__u32 cfg)
-{
- __u32 page_size;
-
- if (cfg & SIZE_MODE_BIT)
- page_size = cfg & SECURE_SIZE_MASK;
- else
- page_size = BIT(cfg & SECURE_SIZE_MASK);
-
- return page_size << PAGE_SHIFT;
-}
-
static int setup_iommu_mappings(struct gcip_image_config_parser *parser,
struct gcip_image_config *config)
{
@@ -49,8 +27,8 @@ static int setup_iommu_mappings(struct gcip_image_config_parser *parser,
ret = -EIO;
goto err;
}
- size = config_to_size(config->iommu_mappings[i].image_config_value);
- paddr = config->iommu_mappings[i].image_config_value & ADDR_MASK;
+ size = gcip_config_to_size(config->iommu_mappings[i].image_config_value);
+ paddr = config->iommu_mappings[i].image_config_value & GCIP_IMG_CFG_ADDR_MASK;
dev_dbg(parser->dev, "Image config adding IOMMU mapping: %pad -> %pap", &daddr,
&paddr);
@@ -74,7 +52,7 @@ static int setup_iommu_mappings(struct gcip_image_config_parser *parser,
err:
while (i--) {
daddr = config->iommu_mappings[i].virt_address;
- size = config_to_size(config->iommu_mappings[i].image_config_value);
+ size = gcip_config_to_size(config->iommu_mappings[i].image_config_value);
parser->ops->unmap(parser->data, daddr, size, GCIP_IMAGE_CONFIG_FLAGS_SECURE);
}
return ret;
@@ -89,7 +67,7 @@ static void clear_iommu_mappings(struct gcip_image_config_parser *parser,
for (i = config->num_iommu_mappings - 1; i >= 0; i--) {
daddr = config->iommu_mappings[i].virt_address;
- size = config_to_size(config->iommu_mappings[i].image_config_value);
+ size = gcip_config_to_size(config->iommu_mappings[i].image_config_value);
dev_dbg(parser->dev, "Image config removing IOMMU mapping: %pad size=%#lx", &daddr,
size);
parser->ops->unmap(parser->data, daddr, size, GCIP_IMAGE_CONFIG_FLAGS_SECURE);
@@ -105,13 +83,13 @@ static int setup_ns_iommu_mappings(struct gcip_image_config_parser *parser,
phys_addr_t paddr = 0;
for (i = 0; i < config->num_ns_iommu_mappings; i++) {
- daddr = config->ns_iommu_mappings[i] & ADDR_MASK;
+ daddr = config->ns_iommu_mappings[i] & GCIP_IMG_CFG_ADDR_MASK;
if (unlikely(!daddr)) {
dev_warn(parser->dev, "Invalid config, device address is zero");
ret = -EIO;
goto err;
}
- size = CONFIG_TO_MBSIZE(config->ns_iommu_mappings[i]);
+ size = gcip_ns_config_to_size(config->ns_iommu_mappings[i]);
dev_dbg(parser->dev, "Image config adding NS IOMMU mapping: %pad -> %pap", &daddr,
&paddr);
if (unlikely(daddr + size <= daddr || paddr + size <= paddr)) {
@@ -128,8 +106,8 @@ static int setup_ns_iommu_mappings(struct gcip_image_config_parser *parser,
err:
while (i--) {
- size = CONFIG_TO_MBSIZE(config->ns_iommu_mappings[i]);
- daddr = config->ns_iommu_mappings[i] & ADDR_MASK;
+ size = gcip_ns_config_to_size(config->ns_iommu_mappings[i]);
+ daddr = config->ns_iommu_mappings[i] & GCIP_IMG_CFG_ADDR_MASK;
parser->ops->unmap(parser->data, daddr, size, 0);
}
return ret;
@@ -143,8 +121,8 @@ static void clear_ns_iommu_mappings(struct gcip_image_config_parser *parser,
int i;
for (i = config->num_ns_iommu_mappings - 1; i >= 0; i--) {
- size = CONFIG_TO_MBSIZE(config->ns_iommu_mappings[i]);
- daddr = config->ns_iommu_mappings[i] & ADDR_MASK;
+ size = gcip_ns_config_to_size(config->ns_iommu_mappings[i]);
+ daddr = config->ns_iommu_mappings[i] & GCIP_IMG_CFG_ADDR_MASK;
dev_dbg(parser->dev, "Image config removing NS IOMMU mapping: %pad size=%#lx",
&daddr, size);
parser->ops->unmap(parser->data, daddr, size, 0);
diff --git a/gcip-kernel-driver/include/gcip/gcip-dma-fence.h b/gcip-kernel-driver/include/gcip/gcip-dma-fence.h
index 232360b..c0a7d68 100644
--- a/gcip-kernel-driver/include/gcip/gcip-dma-fence.h
+++ b/gcip-kernel-driver/include/gcip/gcip-dma-fence.h
@@ -30,6 +30,8 @@
#define gcip_for_each_fence(mgr, gfence) \
list_for_each_entry(gfence, &mgr->fence_list_head, fence_list)
+#define to_gcip_fence(fence) container_of(fence, struct gcip_dma_fence, fence)
+
struct gcip_dma_fence_manager {
/* The list of all fence objects for debugging. */
struct list_head fence_list_head;
@@ -59,8 +61,6 @@ struct gcip_dma_fence_data {
char *timeline_name;
/*
* The DMA fence operators to initialize the fence with.
- *
- * Note: @ops->release is called when after_init() fails.
*/
const struct dma_fence_ops *ops;
/* The sequence number to initialize the fence with. */
@@ -99,6 +99,20 @@ bool gcip_dma_fence_always_true(struct dma_fence *fence);
/* End of helpers for setting dma_fence_ops. */
+/*
+ * This function does
+ * 1. Initialize the DMA fence object
+ * 2. Call after_init() if present
+ * 3. Install an FD associates to the created DMA fence
+ *
+ * This function never fails on step 1, so this function returns an error only if after_init() fails
+ * (step 2) or FD allocation fails (step 3).
+ * In either failure case, @ops->release is always called. Therefore @ops->release may need to
+ * distinguish whether after_init() succeeded.
+ *
+ * It's always safe to call gcip_dma_fence_exit() in @ops->release because that function reverts
+ * step 1.
+ */
int gcip_dma_fence_init(struct gcip_dma_fence_manager *mgr, struct gcip_dma_fence *gfence,
struct gcip_dma_fence_data *data);
diff --git a/gcip-kernel-driver/include/gcip/gcip-image-config.h b/gcip-kernel-driver/include/gcip/gcip-image-config.h
index a995188..1d00ef9 100644
--- a/gcip-kernel-driver/include/gcip/gcip-image-config.h
+++ b/gcip-kernel-driver/include/gcip/gcip-image-config.h
@@ -8,6 +8,8 @@
#ifndef __GCIP_IMAGE_CONFIG_H__
#define __GCIP_IMAGE_CONFIG_H__
+#include <asm/page.h>
+#include <linux/bits.h>
#include <linux/types.h>
#define GCIP_FW_NUM_VERSIONS 4
@@ -80,6 +82,39 @@ struct gcip_image_config_parser {
struct gcip_image_config last_config;
};
+#define GCIP_IMG_CFG_ADDR_SHIFT 12
+#define GCIP_IMG_CFG_MB_SHIFT 20
+#define GCIP_IMG_CFG_SIZE_MODE_BIT BIT(GCIP_IMG_CFG_ADDR_SHIFT - 1)
+#define GCIP_IMG_CFG_SECURE_SIZE_MASK (GCIP_IMG_CFG_SIZE_MODE_BIT - 1u)
+#define GCIP_IMG_CFG_NS_SIZE_MASK (GCIP_IMG_CFG_SIZE_MODE_BIT - 1u)
+#define GCIP_IMG_CFG_ADDR_MASK ~(BIT(GCIP_IMG_CFG_ADDR_SHIFT) - 1u)
+
+/* For decoding the size of ns_iommu_mappings. */
+static inline u32 gcip_ns_config_to_size(u32 cfg)
+{
+ u32 size;
+
+ if (cfg & GCIP_IMG_CFG_SIZE_MODE_BIT)
+ size = (cfg & GCIP_IMG_CFG_NS_SIZE_MASK) << PAGE_SHIFT;
+ else
+ size = (cfg & GCIP_IMG_CFG_NS_SIZE_MASK) << GCIP_IMG_CFG_MB_SHIFT;
+
+ return size;
+}
+
+/* For decoding the size of iommu_mappings. */
+static inline u32 gcip_config_to_size(u32 cfg)
+{
+ u32 page_size;
+
+ if (cfg & GCIP_IMG_CFG_SIZE_MODE_BIT)
+ page_size = cfg & GCIP_IMG_CFG_SECURE_SIZE_MASK;
+ else
+ page_size = BIT(cfg & GCIP_IMG_CFG_SECURE_SIZE_MASK);
+
+ return page_size << PAGE_SHIFT;
+}
+
/*
* Initializes the image configuration parser.
*
diff --git a/gcip-kernel-driver/include/gcip/gcip-kci.h b/gcip-kernel-driver/include/gcip/gcip-kci.h
index bda1b40..3649779 100644
--- a/gcip-kernel-driver/include/gcip/gcip-kci.h
+++ b/gcip-kernel-driver/include/gcip/gcip-kci.h
@@ -109,6 +109,11 @@ enum gcip_kci_code {
*/
enum gcip_reverse_kci_code {
GCIP_RKCI_CHIP_CODE_FIRST = 0,
+ GCIP_RKCI_PM_QOS_REQUEST = GCIP_RKCI_CHIP_CODE_FIRST + 1,
+ GCIP_RKCI_CHANGE_BTS_SCENARIO = GCIP_RKCI_CHIP_CODE_FIRST + 2,
+ GCIP_RKCI_PM_QOS_BTS_REQUEST = GCIP_RKCI_CHIP_CODE_FIRST + 3,
+ GCIP_RKCI_DSP_CORE_TELEMETRY_TRY_READ = GCIP_RKCI_CHIP_CODE_FIRST + 4,
+ GCIP_RKCI_CLIENT_FATAL_ERROR_NOTIFY = GCIP_RKCI_CHIP_CODE_FIRST + 5,
GCIP_RKCI_CHIP_CODE_LAST = 0x7FFF,
GCIP_RKCI_GENERIC_CODE_FIRST = 0x8000,
GCIP_RKCI_FIRMWARE_CRASH = GCIP_RKCI_GENERIC_CODE_FIRST + 0,
diff --git a/gxp-client.c b/gxp-client.c
index 16fb5bc..813de48 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -25,7 +25,8 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
return ERR_PTR(-ENOMEM);
client->gxp = gxp;
- init_rwsem(&client->semaphore);
+ lockdep_register_key(&client->key);
+ __init_rwsem(&client->semaphore, "&client->semaphore", &client->key);
client->has_block_wakelock = false;
client->has_vd_wakelock = false;
client->requested_states = off_states;
@@ -53,6 +54,9 @@ void gxp_client_destroy(struct gxp_client *client)
gxp_eventfd_put(client->mb_eventfds[core]);
}
+ if (client->vd_invalid_eventfd)
+ gxp_eventfd_put(client->vd_invalid_eventfd);
+
#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
!IS_ENABLED(CONFIG_GXP_GEM5)
if (client->tpu_file) {
@@ -84,6 +88,7 @@ void gxp_client_destroy(struct gxp_client *client)
up_write(&gxp->vd_semaphore);
}
+ lockdep_unregister_key(&client->key);
kfree(client);
}
diff --git a/gxp-client.h b/gxp-client.h
index 01d0b2c..935b23d 100644
--- a/gxp-client.h
+++ b/gxp-client.h
@@ -28,6 +28,7 @@ struct gxp_client {
* lock this semaphore for reading for the duration of that operation.
*/
struct rw_semaphore semaphore;
+ struct lock_class_key key;
bool has_block_wakelock;
bool has_vd_wakelock;
@@ -39,6 +40,7 @@ struct gxp_client {
struct gxp_tpu_mbx_desc mbx_desc;
struct gxp_eventfd *mb_eventfds[GXP_NUM_CORES];
+ struct gxp_eventfd *vd_invalid_eventfd;
/* client process thread group ID is really the main process ID. */
pid_t tgid;
diff --git a/gxp-common-platform.c b/gxp-common-platform.c
index 3cdb436..369d4b2 100644
--- a/gxp-common-platform.c
+++ b/gxp-common-platform.c
@@ -22,6 +22,8 @@
#include <linux/uaccess.h>
#include <linux/uidgid.h>
+#include <gcip/gcip-dma-fence.h>
+
#include "gxp-client.h"
#include "gxp-config.h"
#include "gxp-core-telemetry.h"
@@ -887,7 +889,7 @@ static int map_tpu_mbx_queue(struct gxp_client *client,
down_read(&gxp->vd_semaphore);
core_count = client->vd->num_cores;
- phys_core_list = gxp_vd_phys_core_list(client->vd);
+ phys_core_list = client->vd->core_list;
mbx_info = kmalloc(
sizeof(struct edgetpu_ext_mailbox_info) +
@@ -1537,6 +1539,48 @@ out_unlock_client_semaphore:
return ret;
}
+static int gxp_register_invalidated_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_invalidated_eventfd_ioctl __user *argp)
+{
+ struct gxp_register_invalidated_eventfd_ioctl ibuf;
+ struct gxp_eventfd *eventfd;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ eventfd = gxp_eventfd_create(ibuf.eventfd);
+ if (IS_ERR(eventfd)) {
+ ret = PTR_ERR(eventfd);
+ goto out;
+ }
+
+ if (client->vd_invalid_eventfd)
+ gxp_eventfd_put(client->vd_invalid_eventfd);
+ client->vd_invalid_eventfd = eventfd;
+
+out:
+ up_write(&client->semaphore);
+ return ret;
+}
+
+static int gxp_unregister_invalidated_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_invalidated_eventfd_ioctl __user *argp)
+{
+ down_write(&client->semaphore);
+
+ if (client->vd_invalid_eventfd)
+ gxp_eventfd_put(client->vd_invalid_eventfd);
+ client->vd_invalid_eventfd = NULL;
+
+ up_write(&client->semaphore);
+ return 0;
+}
+
static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
{
struct gxp_client *client = file->private_data;
@@ -1628,6 +1672,12 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_TRIGGER_DEBUG_DUMP:
ret = gxp_trigger_debug_dump(client, argp);
break;
+ case GXP_REGISTER_INVALIDATED_EVENTFD:
+ ret = gxp_register_invalidated_eventfd(client, argp);
+ break;
+ case GXP_UNREGISTER_INVALIDATED_EVENTFD:
+ ret = gxp_unregister_invalidated_eventfd(client, argp);
+ break;
default:
ret = -ENOTTY; /* unknown command */
}
@@ -1955,16 +2005,23 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
dev_warn(dev, "Failed to init thermal driver: %d\n", ret);
}
+ gxp->gfence_mgr = gcip_dma_fence_manager_create(gxp->dev);
+ if (IS_ERR(gxp->gfence_mgr)) {
+ ret = PTR_ERR(gxp->gfence_mgr);
+ dev_err(dev, "Failed to init DMA fence manager: %d\n", ret);
+ goto err_thermal_destroy;
+ }
+
INIT_LIST_HEAD(&gxp->client_list);
mutex_init(&gxp->client_list_lock);
if (gxp->after_probe) {
ret = gxp->after_probe(gxp);
if (ret)
- goto err_thermal_destroy;
+ goto err_dma_fence_destroy;
}
gxp->misc_dev.minor = MISC_DYNAMIC_MINOR;
- gxp->misc_dev.name = "gxp";
+ gxp->misc_dev.name = GXP_NAME;
gxp->misc_dev.fops = &gxp_fops;
ret = misc_register(&gxp->misc_dev);
if (ret) {
@@ -1981,6 +2038,8 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
err_before_remove:
if (gxp->before_remove)
gxp->before_remove(gxp);
+err_dma_fence_destroy:
+ /* DMA fence manager creation doesn't need revert */
err_thermal_destroy:
/* thermal init doesn't need revert */
gxp_core_telemetry_exit(gxp);
diff --git a/gxp-debug-dump.c b/gxp-debug-dump.c
index a29d6af..36b8a23 100644
--- a/gxp-debug-dump.c
+++ b/gxp-debug-dump.c
@@ -685,7 +685,8 @@ static void gxp_debug_dump_process_dump(struct work_struct *work)
*/
down_read(&gxp->vd_semaphore);
- boot_mode = gxp_firmware_get_boot_mode(gxp, core_id);
+ /* TODO(b/234172464): pass proper VD */
+ boot_mode = gxp_firmware_get_boot_mode(gxp, NULL, core_id);
if (gxp_is_fw_running(gxp, core_id) &&
(boot_mode == GXP_BOOT_MODE_STATUS_COLD_BOOT_COMPLETED ||
diff --git a/gxp-debugfs.c b/gxp-debugfs.c
index 0497ee3..baf273f 100644
--- a/gxp-debugfs.c
+++ b/gxp-debugfs.c
@@ -94,6 +94,7 @@ static int gxp_debugfs_mailbox(void *data, u64 val)
if (!gxp_client_has_available_vd(gxp->debugfs_client,
"GXP_MAILBOX_COMMAND")) {
ret = -ENODEV;
+ up_read(&gxp->debugfs_client->semaphore);
goto out;
}
up_read(&gxp->debugfs_client->semaphore);
@@ -511,7 +512,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(gxp_cmu_mux2_fops, gxp_cmu_mux2_get, gxp_cmu_mux2_set,
void gxp_create_debugfs(struct gxp_dev *gxp)
{
- gxp->d_entry = debugfs_create_dir("gxp", NULL);
+ gxp->d_entry = debugfs_create_dir(GXP_NAME, NULL);
if (IS_ERR_OR_NULL(gxp->d_entry))
return;
diff --git a/gxp-dma-fence.c b/gxp-dma-fence.c
new file mode 100644
index 0000000..900ea23
--- /dev/null
+++ b/gxp-dma-fence.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP support for DMA fence.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#include <linux/slab.h>
+
+#include <gcip/gcip-dma-fence.h>
+
+#include "gxp-dma-fence.h"
+#include "gxp-internal.h"
+#include "gxp-vd.h"
+#include "gxp.h"
+
+static const char *gxp_get_driver_name(struct dma_fence *fence)
+{
+ return GXP_NAME;
+}
+
+static void gxp_dma_fence_release(struct dma_fence *fence)
+{
+ struct gxp_dma_fence *gxp_fence = to_gxp_fence(fence);
+
+ gcip_dma_fence_exit(&gxp_fence->gfence);
+ kfree(gxp_fence);
+}
+
+static const struct dma_fence_ops gxp_dma_fence_ops = {
+ .get_driver_name = gxp_get_driver_name,
+ .get_timeline_name = gcip_dma_fence_get_timeline_name,
+ .wait = dma_fence_default_wait,
+ .enable_signaling = gcip_dma_fence_always_true,
+ .release = gxp_dma_fence_release,
+};
+
+int gxp_dma_fence_create(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ struct gxp_create_sync_fence_data *datap)
+{
+ struct gcip_dma_fence_data data = {
+ .timeline_name = datap->timeline_name,
+ .ops = &gxp_dma_fence_ops,
+ .seqno = datap->seqno,
+ };
+ struct gxp_dma_fence *gxp_fence =
+ kzalloc(sizeof(*gxp_fence), GFP_KERNEL);
+ int ret;
+
+ if (!gxp_fence)
+ return -ENOMEM;
+
+ /* TODO(b/264855736): add VD association support */
+
+ ret = gcip_dma_fence_init(gxp->gfence_mgr, &gxp_fence->gfence, &data);
+ if (!ret)
+ datap->fence = data.fence;
+ /*
+ * We don't need to kfree(gxp_fence) on error because that's called in
+ * gxp_dma_fence_release.
+ */
+
+ return ret;
+}
diff --git a/gxp-dma-fence.h b/gxp-dma-fence.h
new file mode 100644
index 0000000..c7ad95e
--- /dev/null
+++ b/gxp-dma-fence.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GXP support for DMA fence.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#ifndef __GXP_DMA_FENCE_H__
+#define __GXP_DMA_FENCE_H__
+
+#include <gcip/gcip-dma-fence.h>
+
+#include "gxp-vd.h"
+#include "gxp.h"
+
+/* Converts struct dma_fence to gxp_dma_fence. */
+#define to_gxp_fence(fence) \
+ container_of(to_gcip_fence(fence), struct gxp_dma_fence, gfence)
+
+struct gxp_dma_fence {
+ struct gcip_dma_fence gfence;
+ /* The owner of this DMA fence */
+ struct gxp_virtual_device *vd;
+ /* List of DMA fences owned by the same VD. */
+ struct list_head fence_list;
+};
+
+/*
+ * Creates a DMA fence associates with @vd.
+ *
+ * @datap->fence is set to the fence FD on success.
+ *
+ * Returns 0 on success. Otherwise a negative errno.
+ */
+int gxp_dma_fence_create(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ struct gxp_create_sync_fence_data *datap);
+
+#endif /* __GXP_DMA_FENCE_H__ */
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c
index 1480761..3dfd70b 100644
--- a/gxp-dma-iommu.c
+++ b/gxp-dma-iommu.c
@@ -15,6 +15,7 @@
#include "gxp-config.h"
#include "gxp-dma.h"
+#include "gxp-firmware.h" /* gxp_core_boot */
#include "gxp-mailbox.h"
#include "gxp-mapping.h"
#include "gxp-pm.h"
@@ -79,13 +80,19 @@ static int gxp_dma_ssmt_program(struct gxp_dev *gxp,
int pasid;
uint core;
- pasid = iommu_aux_get_pasid(domain, gxp->dev);
- for (core = 0; core < GXP_NUM_CORES; core++)
- if (BIT(core) & core_list) {
- dev_dbg(gxp->dev, "Assign core%u to PASID %d\n", core,
- pasid);
- gxp_ssmt_set_core_vid(&mgr->ssmt, core, pasid);
- }
+ /* Program VID only when cores are managed by us. */
+ if (gxp_is_direct_mode(gxp) || gxp_core_boot) {
+ pasid = iommu_aux_get_pasid(domain, gxp->dev);
+ for (core = 0; core < GXP_NUM_CORES; core++)
+ if (BIT(core) & core_list) {
+ dev_dbg(gxp->dev, "Assign core%u to PASID %d\n",
+ core, pasid);
+ gxp_ssmt_set_core_vid(&mgr->ssmt, core, pasid);
+ }
+ } else {
+ for (core = 0; core < GXP_NUM_CORES; core++)
+ gxp_ssmt_set_core_bypass(&mgr->ssmt, core);
+ }
return 0;
}
@@ -167,31 +174,6 @@ static void gxp_unmap_csrs(struct gxp_dev *gxp, struct iommu_domain *domain,
#endif /* GXP_HAS_LAP */
-/* Maps the shared buffer region to @domain. */
-static int gxp_map_core_shared_buffer(struct gxp_dev *gxp,
- struct iommu_domain *domain,
- u8 slice_index)
-{
- size_t shared_size = gxp->shared_slice_size;
-
- if (!gxp->shared_buf.paddr)
- return 0;
- return iommu_map(domain, gxp->shared_buf.daddr,
- gxp->shared_buf.paddr + shared_size * slice_index,
- shared_size, IOMMU_READ | IOMMU_WRITE);
-}
-
-/* Reverts gxp_map_core_shared_buffer. */
-static void gxp_unmap_core_shared_buffer(struct gxp_dev *gxp,
- struct iommu_domain *domain)
-{
- size_t shared_size = gxp->shared_slice_size;
-
- if (!gxp->shared_buf.paddr)
- return;
- iommu_unmap(domain, gxp->shared_buf.daddr, shared_size);
-}
-
/* gxp-dma.h Interface */
uint gxp_iommu_aux_get_pasid(struct gxp_dev *gxp,
@@ -343,20 +325,11 @@ int gxp_dma_map_core_resources(struct gxp_dev *gxp,
if (ret)
goto err;
}
- /*
- * TODO(b/202213606): Map FW regions of all cores in a VD for
- * each other at VD creation.
- */
- ret = iommu_map(domain, gxp->fwbufs[0].daddr, gxp->fwbufs[0].paddr,
- gxp->fwbufs[0].size * GXP_NUM_CORES,
- IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- ret = iommu_map(domain, gxp->fwdatabuf.daddr, gxp->fwdatabuf.paddr,
- gxp->fwdatabuf.size, IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- ret = gxp_map_core_shared_buffer(gxp, domain, slice_index);
+ /* TODO(b/265748027): directly remove this map */
+ if (gxp->fwdatabuf.daddr)
+ ret = iommu_map(domain, gxp->fwdatabuf.daddr,
+ gxp->fwdatabuf.paddr, gxp->fwdatabuf.size,
+ IOMMU_READ | IOMMU_WRITE);
if (ret)
goto err;
/* Only map the TPU mailboxes if they were found on probe */
@@ -402,15 +375,8 @@ void gxp_dma_unmap_core_resources(struct gxp_dev *gxp,
EXT_TPU_MBX_SIZE);
}
}
- gxp_unmap_core_shared_buffer(gxp, domain);
- iommu_unmap(domain, gxp->fwdatabuf.daddr, gxp->fwdatabuf.size);
- /*
- * TODO(b/202213606): A core should only have access to the FW
- * of other cores if they're in the same VD, and have the FW
- * region unmapped on VD destruction.
- */
- iommu_unmap(domain, gxp->fwbufs[0].daddr,
- gxp->fwbufs[0].size * GXP_NUM_CORES);
+ if (gxp->fwdatabuf.daddr)
+ iommu_unmap(domain, gxp->fwdatabuf.daddr, gxp->fwdatabuf.size);
for (i = 0; i < GXP_NUM_CORES; i++) {
if (!(BIT(i) & core_list))
continue;
@@ -710,6 +676,8 @@ int gxp_dma_map_iova_sgt(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
return -EINVAL;
return size_mapped;
}
+ dma_sync_sg_for_device(gxp->dev, sgt->sgl, sgt->orig_nents,
+ DMA_BIDIRECTIONAL);
return 0;
}
diff --git a/gxp-firmware-data.c b/gxp-firmware-data.c
index 6f22f8d..cf2b3e0 100644
--- a/gxp-firmware-data.c
+++ b/gxp-firmware-data.c
@@ -11,9 +11,11 @@
#include "gxp-debug-dump.h"
#include "gxp-firmware-data.h"
+#include "gxp-firmware.h" /* gxp_core_boot */
#include "gxp-host-device-structs.h"
#include "gxp-internal.h"
#include "gxp-range-alloc.h"
+#include "gxp-vd.h"
#include "gxp.h"
/*
@@ -89,11 +91,23 @@ struct gxp_fw_data_manager {
struct fw_memory wdog_mem;
struct fw_memory core_telemetry_mem;
struct fw_memory debug_dump_mem;
+
+ /*
+ * A host-view of the System configuration descriptor. This same desc
+ * is provided to all VDs and all cores. This is the R/O section.
+ */
+ struct gxp_system_descriptor_ro *sys_desc_ro;
+ /*
+ * A host-view of the System configuration descriptor. This same desc
+ * is provided to all VDs and all cores. This is the R/W section.
+ */
+ struct gxp_system_descriptor_rw *sys_desc_rw;
};
/* A container holding information for a single GXP application. */
struct app_metadata {
struct gxp_fw_data_manager *mgr;
+ struct gxp_virtual_device *vd;
uint application_id;
uint core_count;
uint core_list; /* bitmap of cores allocated to this app */
@@ -463,6 +477,208 @@ static struct fw_memory init_application(struct app_metadata *app)
return mem;
}
+static struct app_metadata *gxp_fw_data_create_app_legacy(struct gxp_dev *gxp,
+ uint core_list)
+{
+ struct gxp_fw_data_manager *mgr = gxp->data_mgr;
+ struct app_metadata *app;
+ void *err;
+ int i;
+
+ app = kzalloc(sizeof(struct app_metadata), GFP_KERNEL);
+ if (!app)
+ return ERR_PTR(-ENOMEM);
+
+ /* Create resource and memory allocations for new app */
+ app->mgr = mgr;
+ app->application_id = DEFAULT_APP_ID;
+ app->core_count = hweight_long(core_list);
+ app->core_list = core_list;
+
+ /* User doorbells */
+ app->user_doorbells_count = DEFAULT_APP_USER_DOORBELL_COUNT;
+ app->user_doorbells =
+ kcalloc(app->user_doorbells_count, sizeof(int), GFP_KERNEL);
+ if (!app->user_doorbells) {
+ err = ERR_PTR(-ENOMEM);
+ goto err_user_doorbells;
+ }
+
+ for (i = 0; i < app->user_doorbells_count; i++) {
+ range_alloc_get_any(mgr->doorbell_allocator,
+ &app->user_doorbells[i]);
+ }
+
+ /* User sync barrier */
+ app->user_barriers_count = DEFAULT_APP_USER_BARRIER_COUNT;
+ app->user_barriers =
+ kcalloc(app->user_barriers_count, sizeof(int), GFP_KERNEL);
+ if (!app->user_barriers) {
+ err = ERR_PTR(-ENOMEM);
+ goto err_user_barriers;
+ }
+
+ for (i = 0; i < app->user_barriers_count; i++) {
+ range_alloc_get_any(mgr->sync_barrier_allocator,
+ &app->user_barriers[i]);
+ }
+
+ /* Application region. */
+ app->app_mem = init_application(app);
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (core_list & BIT(i)) {
+ mgr->system_desc->app_descriptor_dev_addr[i] =
+ app->app_mem.device_addr;
+ }
+ }
+
+ return app;
+
+err_user_barriers:
+ for (i = 0; i < app->user_doorbells_count; i++)
+ range_alloc_put(mgr->doorbell_allocator,
+ app->user_doorbells[i]);
+ kfree(app->user_doorbells);
+err_user_doorbells:
+ kfree(app);
+
+ return err;
+}
+
+static void gxp_fw_data_destroy_app_legacy(struct gxp_dev *gxp,
+ struct app_metadata *app)
+{
+ struct gxp_fw_data_manager *mgr = gxp->data_mgr;
+ int i;
+
+ for (i = 0; i < app->user_doorbells_count; i++)
+ range_alloc_put(mgr->doorbell_allocator,
+ app->user_doorbells[i]);
+ kfree(app->user_doorbells);
+
+ for (i = 0; i < app->user_barriers_count; i++)
+ range_alloc_put(mgr->sync_barrier_allocator,
+ app->user_barriers[i]);
+ kfree(app->user_barriers);
+
+ mem_alloc_free(mgr->allocator, &app->user_mem);
+ mem_alloc_free(mgr->allocator, &app->doorbells_mem);
+ mem_alloc_free(mgr->allocator, &app->sync_barriers_mem);
+ mem_alloc_free(mgr->allocator, &app->semaphores_mem);
+ mem_alloc_free(mgr->allocator, &app->cores_mem);
+ for (i = 0; i < app->core_count; i++) {
+ mem_alloc_free(mgr->allocator, &app->core_cmd_queues_mem[i]);
+ mem_alloc_free(mgr->allocator, &app->core_rsp_queues_mem[i]);
+ }
+ mem_alloc_free(mgr->allocator, &app->app_mem);
+
+ kfree(app);
+}
+
+/*
+ * Here assumes sys_cfg contains gxp_system_descriptor_ro in the first page and
+ * gxp_system_descriptor_rw in the second page.
+ */
+static void set_system_cfg_region(struct gxp_dev *gxp, void *sys_cfg)
+{
+ struct gxp_system_descriptor_ro *des_ro = sys_cfg;
+ struct gxp_system_descriptor_rw *des_rw = sys_cfg + PAGE_SIZE;
+ struct gxp_core_telemetry_descriptor *descriptor =
+ gxp->data_mgr->core_telemetry_mem.host_addr;
+ struct telemetry_descriptor_ro *tel_ro;
+ struct telemetry_descriptor_rw *tel_rw;
+ struct core_telemetry_descriptor *tel_des;
+ int i;
+
+ if (gxp->debug_dump_mgr)
+ des_ro->debug_dump_dev_addr = gxp->debug_dump_mgr->buf.dsp_addr;
+ else
+ des_ro->debug_dump_dev_addr = 0;
+
+#define COPY_FIELDS \
+ do { \
+ tel_ro->host_status = tel_des->host_status; \
+ tel_ro->buffer_addr = tel_des->buffer_addr; \
+ tel_ro->buffer_size = tel_des->buffer_size; \
+ tel_rw->device_status = tel_des->device_status; \
+ tel_rw->data_available = tel_des->watermark_level; \
+ } while (0)
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ tel_ro = &des_ro->telemetry_desc.per_core_loggers[i];
+ tel_rw = &des_rw->telemetry_desc.per_core_loggers[i];
+ tel_des = &descriptor->per_core_loggers[i];
+ COPY_FIELDS;
+ tel_ro = &des_ro->telemetry_desc.per_core_tracers[i];
+ tel_rw = &des_rw->telemetry_desc.per_core_tracers[i];
+ tel_des = &descriptor->per_core_tracers[i];
+ COPY_FIELDS;
+ }
+#undef COPY_FIELDS
+}
+
+static struct app_metadata *
+_gxp_fw_data_create_app(struct gxp_dev *gxp, struct gxp_virtual_device *vd)
+{
+ struct app_metadata *app;
+ struct gxp_host_control_region *core_cfg;
+ struct gxp_job_descriptor job;
+ struct gxp_vd_descriptor *vd_desc;
+ int i;
+
+ /*
+ * If we are able to know where sys_cfg's virt is on init() then we
+ * don't need this here, but to keep compatibility with
+ * !use_per_vd_config, we keep gxp_fw_data_init() doing the
+ * initialization of legacy mode, and have here copy the values to the
+ * config region.
+ */
+ if (vd->vdid == 0)
+ set_system_cfg_region(gxp, vd->sys_cfg.vaddr);
+ app = kzalloc(sizeof(*app), GFP_KERNEL);
+ if (!app)
+ return ERR_PTR(-ENOMEM);
+
+ if (!gxp_core_boot) {
+ dev_info(gxp->dev, "Skip setting VD and core CFG");
+ return app;
+ }
+ /* Set up VD config region. */
+ vd_desc = vd->vd_cfg.vaddr;
+ vd_desc->application_id = DEFAULT_APP_ID;
+ vd_desc->vd_is_initialized = 0;
+ /* Set up core config region. */
+ job.workers_count = vd->num_cores;
+ for (i = 0; i < ARRAY_SIZE(job.worker_to_fw); i++) {
+ /*
+ * Kernel-initiated workloads always act like the entire VD is
+ * one giant N-core job where N is the number of cores allocated
+ * to that VD.
+ * The MCU, on the other hand, can have multiple jobs dispatched
+ * to the same VD at the same time.
+ */
+ if (i < job.workers_count)
+ job.worker_to_fw[i] = i;
+ else
+ job.worker_to_fw[i] = -1;
+ }
+ /* Give each VD a unique HW resources slot. */
+ job.hardware_resources_slot = gxp_vd_hw_slot_id(vd);
+ /* Assign the same job descriptor to all cores in this VD */
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ core_cfg = vd->core_cfg.vaddr +
+ vd->core_cfg.size / GXP_NUM_CORES * i;
+ core_cfg->job_descriptor = job;
+ }
+
+ return app;
+}
+
+static void _gxp_fw_data_destroy_app(struct gxp_dev *gxp,
+ struct app_metadata *app)
+{
+ kfree(app);
+}
+
int gxp_fw_data_init(struct gxp_dev *gxp)
{
struct gxp_fw_data_manager *mgr;
@@ -486,6 +702,7 @@ int gxp_fw_data_init(struct gxp_dev *gxp)
res = -ENODEV;
goto err;
}
+ gxp->fwdatabuf.vaddr = mgr->fw_data_virt;
/* Instantiate the doorbells allocator with all doorbells */
mgr->doorbell_allocator =
@@ -607,101 +824,31 @@ err:
return res;
}
-void *gxp_fw_data_create_app(struct gxp_dev *gxp, uint core_list)
+void *gxp_fw_data_create_app(struct gxp_dev *gxp, struct gxp_virtual_device *vd)
{
- struct gxp_fw_data_manager *mgr = gxp->data_mgr;
struct app_metadata *app;
- void *err;
- int i;
-
- app = kzalloc(sizeof(struct app_metadata), GFP_KERNEL);
- if (!app)
- return ERR_PTR(-ENOMEM);
-
- /* Create resource and memory allocations for new app */
- app->mgr = mgr;
- app->application_id = DEFAULT_APP_ID;
- app->core_count = hweight_long(core_list);
- app->core_list = core_list;
-
- /* User doorbells */
- app->user_doorbells_count = DEFAULT_APP_USER_DOORBELL_COUNT;
- app->user_doorbells =
- kcalloc(app->user_doorbells_count, sizeof(int), GFP_KERNEL);
- if (!app->user_doorbells) {
- err = ERR_PTR(-ENOMEM);
- goto err_user_doorbells;
- }
-
- for (i = 0; i < app->user_doorbells_count; i++) {
- range_alloc_get_any(mgr->doorbell_allocator,
- &app->user_doorbells[i]);
- }
-
- /* User sync barrier */
- app->user_barriers_count = DEFAULT_APP_USER_BARRIER_COUNT;
- app->user_barriers =
- kcalloc(app->user_barriers_count, sizeof(int), GFP_KERNEL);
- if (!app->user_barriers) {
- err = ERR_PTR(-ENOMEM);
- goto err_user_barriers;
- }
- for (i = 0; i < app->user_barriers_count; i++) {
- range_alloc_get_any(mgr->sync_barrier_allocator,
- &app->user_barriers[i]);
- }
+ if (gxp_fw_data_use_per_vd_config(vd))
+ app = _gxp_fw_data_create_app(gxp, vd);
+ else
+ app = gxp_fw_data_create_app_legacy(gxp, vd->core_list);
- /* Application region. */
- app->app_mem = init_application(app);
- for (i = 0; i < GXP_NUM_CORES; i++) {
- if (core_list & BIT(i)) {
- mgr->system_desc->app_descriptor_dev_addr[i] =
- app->app_mem.device_addr;
- }
- }
+ if (IS_ERR(app))
+ return app;
+ app->vd = vd;
return app;
-
-err_user_barriers:
- for (i = 0; i < app->user_doorbells_count; i++)
- range_alloc_put(mgr->doorbell_allocator,
- app->user_doorbells[i]);
- kfree(app->user_doorbells);
-err_user_doorbells:
- kfree(app);
-
- return err;
}
void gxp_fw_data_destroy_app(struct gxp_dev *gxp, void *application)
{
struct app_metadata *app = application;
- struct gxp_fw_data_manager *mgr = gxp->data_mgr;
- int i;
-
- for (i = 0; i < app->user_doorbells_count; i++)
- range_alloc_put(mgr->doorbell_allocator,
- app->user_doorbells[i]);
- kfree(app->user_doorbells);
- for (i = 0; i < app->user_barriers_count; i++)
- range_alloc_put(mgr->sync_barrier_allocator,
- app->user_barriers[i]);
- kfree(app->user_barriers);
-
- mem_alloc_free(mgr->allocator, &app->user_mem);
- mem_alloc_free(mgr->allocator, &app->doorbells_mem);
- mem_alloc_free(mgr->allocator, &app->sync_barriers_mem);
- mem_alloc_free(mgr->allocator, &app->semaphores_mem);
- mem_alloc_free(mgr->allocator, &app->cores_mem);
- for (i = 0; i < app->core_count; i++) {
- mem_alloc_free(mgr->allocator, &app->core_cmd_queues_mem[i]);
- mem_alloc_free(mgr->allocator, &app->core_rsp_queues_mem[i]);
- }
- mem_alloc_free(mgr->allocator, &app->app_mem);
-
- kfree(app);
+ if (!app)
+ return;
+ if (gxp_fw_data_use_per_vd_config(app->vd))
+ return _gxp_fw_data_destroy_app(gxp, app);
+ return gxp_fw_data_destroy_app_legacy(gxp, app);
}
void gxp_fw_data_destroy(struct gxp_dev *gxp)
diff --git a/gxp-firmware-data.h b/gxp-firmware-data.h
index a947cb8..e2296bd 100644
--- a/gxp-firmware-data.h
+++ b/gxp-firmware-data.h
@@ -11,6 +11,12 @@
#include "gxp-dma.h"
#include "gxp-internal.h"
+#include "gxp-vd.h"
+
+enum gxp_fw_data_protocol {
+ /* Use the per-VD configuration region. */
+ FW_DATA_PROTOCOL_PER_VD_CONFIG = 2,
+};
/**
* gxp_fw_data_init() - Initializes the FW data manager submodule.
@@ -30,14 +36,15 @@ int gxp_fw_data_init(struct gxp_dev *gxp);
* virtual device) used by the specified physical
* cores.
* @gxp: The parent GXP device
- * @core_list: A bitmap of the physical cores used in this application
+ * @vd: The virtual device this app is being created for
*
* Return:
* ptr - A pointer of the newly created application handle, an error pointer
* (PTR_ERR) otherwise.
* -ENOMEM - Insufficient memory to create the application
*/
-void *gxp_fw_data_create_app(struct gxp_dev *gxp, uint core_list);
+void *gxp_fw_data_create_app(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd);
/**
* gxp_fw_data_destroy_app() - Deallocates the HW and memory resources used by
@@ -94,4 +101,9 @@ int gxp_fw_data_set_core_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
u32 gxp_fw_data_get_core_telemetry_device_status(struct gxp_dev *gxp, uint core,
u8 type);
+static inline bool gxp_fw_data_use_per_vd_config(struct gxp_virtual_device *vd)
+{
+ return vd->config_version >= FW_DATA_PROTOCOL_PER_VD_CONFIG;
+}
+
#endif /* __GXP_FIRMWARE_DATA_H__ */
diff --git a/gxp-firmware.c b/gxp-firmware.c
index fcf6a6f..5cc0335 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -16,11 +16,16 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include <gcip/gcip-alloc-helper.h>
+#include <gcip/gcip-common-image-header.h>
+#include <gcip/gcip-image-config.h>
+
#include "gxp-bpm.h"
#include "gxp-config.h"
#include "gxp-core-telemetry.h"
#include "gxp-debug-dump.h"
#include "gxp-doorbell.h"
+#include "gxp-firmware-data.h"
#include "gxp-firmware.h"
#include "gxp-host-device-structs.h"
#include "gxp-internal.h"
@@ -34,15 +39,47 @@
#include "unittests/factory/fake-gxp-firmware.h"
#endif
-#define FW_HEADER_SIZE (0x1000)
-#define FW_IMAGE_TYPE_OFFSET (0x400)
+#define FW_HEADER_SIZE GCIP_FW_HEADER_SIZE
static int gxp_dsp_fw_auth_disable;
module_param_named(dsp_fw_auth_disable, gxp_dsp_fw_auth_disable, int, 0660);
-static bool gxp_core_boot = true;
+bool gxp_core_boot = true;
module_param_named(core_boot, gxp_core_boot, bool, 0660);
+/*
+ * Fetches and records image config of the first firmware.
+ */
+static void gxp_firmware_get_image_config(struct gxp_dev *gxp,
+ struct gxp_firmware_manager *mgr)
+{
+ struct gcip_common_image_header *hdr =
+ (struct gcip_common_image_header *)mgr->firmwares[0]->data;
+ struct gcip_image_config *cfg;
+
+ if (unlikely(mgr->firmwares[0]->size < FW_HEADER_SIZE))
+ return;
+ cfg = get_image_config_from_hdr(hdr);
+ if (cfg)
+ mgr->img_cfg = *cfg;
+ else
+ dev_warn(gxp->dev,
+ "Firmware doesn't have a valid image config");
+}
+
+/*
+ * Call this function when mgr->firmwares have been populated.
+ * This function sets is_firmware_requested to true.
+ *
+ * Caller holds mgr->dsp_firmware_lock.
+ */
+static void gxp_firmware_has_requested(struct gxp_dev *gxp,
+ struct gxp_firmware_manager *mgr)
+{
+ gxp_firmware_get_image_config(gxp, mgr);
+ mgr->is_firmware_requested = true;
+}
+
static int
request_dsp_firmware(struct gxp_dev *gxp, char *name_prefix,
const struct firmware *out_firmwares[GXP_NUM_CORES])
@@ -97,25 +134,67 @@ static int elf_load_segments(struct gxp_dev *gxp, const u8 *elf_data,
ehdr = (struct elf32_hdr *)elf_data;
phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+ if ((ehdr->e_ident[EI_MAG0] != ELFMAG0) ||
+ (ehdr->e_ident[EI_MAG1] != ELFMAG1) ||
+ (ehdr->e_ident[EI_MAG2] != ELFMAG2) ||
+ (ehdr->e_ident[EI_MAG3] != ELFMAG3)) {
+ dev_err(gxp->dev, "Invalid ELF format.");
+ return -EINVAL;
+ }
+
/* go through the available ELF segments */
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
const u64 da = phdr->p_paddr;
const u32 memsz = phdr->p_memsz;
const u32 filesz = phdr->p_filesz;
+ const u32 offset = phdr->p_offset;
+ const u32 p_flags = phdr->p_flags;
void *ptr;
- if (phdr->p_type != PT_LOAD || !phdr->p_flags || !memsz)
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ if (!phdr->p_flags)
+ continue;
+
+ if (!memsz)
continue;
if (!(da >= buffer->daddr &&
- da + memsz <= buffer->daddr + buffer->size))
+ da + memsz <= buffer->daddr + buffer->size)) {
+ /*
+ * Some BSS data may be referenced from TCM, and can be
+ * skipped while loading
+ */
+ dev_err(gxp->dev,
+ "Segment out of bounds: da 0x%llx mem 0x%x. Skipping...",
+ da, memsz);
continue;
+ }
+
+ dev_info(gxp->dev,
+ "phdr: da %#llx memsz %#x filesz %#x perm %d", da,
+ memsz, filesz, p_flags);
+
+ if (filesz > memsz) {
+ dev_err(gxp->dev, "Bad phdr filesz %#x memsz %#x",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > size) {
+ dev_err(gxp->dev, "Truncated fw: need %#x avail %#zx",
+ offset + filesz, size);
+ ret = -EINVAL;
+ break;
+ }
/* grab the kernel address for this device address */
ptr = buffer->vaddr + (da - buffer->daddr);
if (!ptr) {
- dev_err(gxp->dev, "Bad phdr: da 0x%llx mem 0x%x\n",
- da, memsz);
+ dev_err(gxp->dev, "Bad phdr: da 0x%#llx mem 0x%#x", da,
+ memsz);
ret = -EINVAL;
break;
}
@@ -232,137 +311,91 @@ error:
return ret;
}
-static int gxp_firmware_fetch_boundary(struct gxp_dev *gxp, const u8 *elf_data,
- size_t size,
- const struct gxp_mapped_resource *buffer,
- dma_addr_t *boundary_ptr)
-{
- struct elf32_hdr *ehdr = (struct elf32_hdr *)elf_data;
- struct elf32_phdr *phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
- int i, ret = 0;
- dma_addr_t boundary = 0;
-
- if ((ehdr->e_ident[EI_MAG0] != ELFMAG0) ||
- (ehdr->e_ident[EI_MAG1] != ELFMAG1) ||
- (ehdr->e_ident[EI_MAG2] != ELFMAG2) ||
- (ehdr->e_ident[EI_MAG3] != ELFMAG3)) {
- dev_err(gxp->dev, "Invalid ELF format.");
- return -EINVAL;
- }
-
- /* go through the available ELF segments */
- for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
- const u64 da = phdr->p_paddr;
- const u32 memsz = phdr->p_memsz;
- const u32 filesz = phdr->p_filesz;
- const u32 offset = phdr->p_offset;
- const u32 p_flags = phdr->p_flags;
-
- if (phdr->p_type != PT_LOAD || !p_flags || !memsz)
- continue;
-
- if (!(da >= buffer->daddr &&
- da + memsz <= buffer->daddr + buffer->size)) {
- /*
- * Some BSS data may be referenced from TCM, and can be
- * skipped while loading
- */
- dev_err(gxp->dev, "Segment out of bounds: da 0x%llx mem 0x%x. Skipping...",
- da, memsz);
- continue;
- }
-
- dev_info(gxp->dev,
- "phdr: da %#llx memsz %#x filesz %#x perm %d", da,
- memsz, filesz, p_flags);
-
- if (filesz > memsz) {
- dev_err(gxp->dev, "Bad phdr filesz %#x memsz %#x",
- filesz, memsz);
- ret = -EINVAL;
- break;
- }
-
- if (offset + filesz > size) {
- dev_err(gxp->dev, "Truncated fw: need %#x avail %#zx",
- offset + filesz, size);
- ret = -EINVAL;
- break;
- }
- if (p_flags & PF_W) {
- if (!boundary)
- boundary = da;
- } else if (boundary) {
- dev_err(gxp->dev,
- "Found RO region after a writable segment");
- ret = -EINVAL;
- break;
- }
- }
- /* no boundary has been found - assume the whole image is RO */
- if (!boundary)
- boundary = buffer->daddr + buffer->size;
- if (!ret)
- *boundary_ptr = boundary;
-
- return ret;
-}
-
-/*
- * Sets @rw_boundaries by analyzing LOAD segments in ELF headers.
- *
- * Assumes the LOAD segments are arranged with RO first then RW. Returns -EINVAL
- * if this is not true.
- */
-static int gxp_firmware_fetch_boundaries(struct gxp_dev *gxp,
- struct gxp_firmware_manager *mgr)
-{
- int core, ret;
-
- for (core = 0; core < GXP_NUM_CORES; core++) {
- ret = gxp_firmware_fetch_boundary(
- gxp, mgr->firmwares[core]->data + FW_HEADER_SIZE,
- mgr->firmwares[core]->size - FW_HEADER_SIZE,
- &gxp->fwbufs[core], &mgr->rw_boundaries[core]);
- if (ret) {
- dev_err(gxp->dev,
- "failed to fetch boundary of core %d: %d", core,
- ret);
- goto error;
- }
- }
- return 0;
-
-error:
- memset(mgr->rw_boundaries, 0, sizeof(mgr->rw_boundaries));
- return ret;
-}
-
/* Forward declaration for usage inside gxp_firmware_load(..). */
static void gxp_firmware_unload(struct gxp_dev *gxp, uint core);
-static void gxp_program_reset_vector(struct gxp_dev *gxp, uint core, bool verbose)
+static void gxp_program_reset_vector(struct gxp_dev *gxp, uint core,
+ uint phys_core, bool verbose)
{
u32 reset_vec;
- reset_vec = gxp_read_32(gxp, GXP_CORE_REG_ALT_RESET_VECTOR(core));
+ reset_vec = gxp_read_32(gxp, GXP_CORE_REG_ALT_RESET_VECTOR(phys_core));
if (verbose)
dev_notice(gxp->dev,
"Current Aurora reset vector for core %u: 0x%x\n",
- core, reset_vec);
- gxp_write_32(gxp, GXP_CORE_REG_ALT_RESET_VECTOR(core),
+ phys_core, reset_vec);
+ gxp_write_32(gxp, GXP_CORE_REG_ALT_RESET_VECTOR(phys_core),
gxp->firmware_mgr->entry_points[core]);
if (verbose)
dev_notice(gxp->dev,
"New Aurora reset vector for core %u: 0x%x\n",
- core, gxp->firmware_mgr->entry_points[core]);
+ phys_core, gxp->firmware_mgr->entry_points[core]);
}
-static int gxp_firmware_load(struct gxp_dev *gxp, uint core)
+static void *get_scratchpad_base(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core)
+{
+ void *mem;
+ size_t rw_size;
+
+ if (vd && gxp_fw_data_use_per_vd_config(vd))
+ return vd->core_cfg.vaddr +
+ vd->core_cfg.size / GXP_NUM_CORES * core;
+
+ if (!vd || !vd->rwdata_sgt[core])
+ return gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF;
+
+ /* Return the last AURORA_SCRATCHPAD_LEN of rwdata_sgt. */
+ mem = gcip_noncontiguous_sgt_to_mem(vd->rwdata_sgt[core]);
+ rw_size = gxp->fwbufs[core].size - vd->fw_ro_size;
+ return mem + rw_size - AURORA_SCRATCHPAD_LEN;
+}
+
+/* TODO(b/265562894): remove scratchpad region support */
+static void flush_scratchpad_region(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core)
+{
+ if (!vd || gxp_fw_data_use_per_vd_config(vd) || !vd->rwdata_sgt[core])
+ return;
+ dma_sync_sg_for_device(gxp->dev, vd->rwdata_sgt[core]->sgl,
+ vd->rwdata_sgt[core]->orig_nents,
+ DMA_BIDIRECTIONAL);
+}
+
+static void invalidate_scratchpad_region(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint core)
+{
+ if (!vd || gxp_fw_data_use_per_vd_config(vd) || !vd->rwdata_sgt[core])
+ return;
+ dma_sync_sg_for_cpu(gxp->dev, vd->rwdata_sgt[core]->sgl,
+ vd->rwdata_sgt[core]->orig_nents,
+ DMA_BIDIRECTIONAL);
+}
+
+static void reset_core_config_region(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core)
+{
+ struct gxp_host_control_region *core_cfg;
+
+ core_cfg = get_scratchpad_base(gxp, vd, core);
+ if (gxp_fw_data_use_per_vd_config(vd)) {
+ core_cfg->core_alive_magic = 0;
+ core_cfg->top_access_ok = 0;
+ core_cfg->boot_status = GXP_BOOT_STATUS_NONE;
+ gxp_firmware_set_boot_mode(gxp, vd, core,
+ GXP_BOOT_MODE_COLD_BOOT);
+ } else {
+ memset(core_cfg, 0, AURORA_SCRATCHPAD_LEN);
+ gxp_firmware_set_boot_mode(gxp, vd, core,
+ GXP_BOOT_MODE_REQUEST_COLD_BOOT);
+ }
+}
+
+static int gxp_firmware_load(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint core)
{
struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
- u32 offset;
- void __iomem *core_scratchpad_base;
int ret;
if (!mgr->firmwares[core])
@@ -382,24 +415,11 @@ static int gxp_firmware_load(struct gxp_dev *gxp, uint core)
mgr->firmwares[core]->data + FW_HEADER_SIZE,
core);
- memset(gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF, 0,
- AURORA_SCRATCHPAD_LEN);
-
- core_scratchpad_base = gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF;
- offset = SCRATCHPAD_MSG_OFFSET(MSG_CORE_ALIVE);
- writel(0, core_scratchpad_base + offset);
- offset = SCRATCHPAD_MSG_OFFSET(MSG_TOP_ACCESS_OK);
- writel(0, core_scratchpad_base + offset);
-
/* TODO(b/188970444): Cleanup logging of addresses */
dev_notice(gxp->dev,
"ELF loaded at virtual: %pK and physical: 0x%llx\n",
gxp->fwbufs[core].vaddr, gxp->fwbufs[core].paddr);
- /* Configure bus performance monitors */
- gxp_bpm_configure(gxp, core, INST_BPM_OFFSET, BPM_EVENT_READ_XFER);
- gxp_bpm_configure(gxp, core, DATA_BPM_OFFSET, BPM_EVENT_WRITE_XFER);
-
return 0;
out_firmware_unload:
@@ -407,18 +427,20 @@ out_firmware_unload:
return ret;
}
-static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
+static int gxp_firmware_handshake(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ uint phys_core)
{
- u32 offset;
u32 __maybe_unused expected_top_value;
- void __iomem *core_scratchpad_base;
+ /* Prevent the read loop below from being optimized. */
+ volatile struct gxp_host_control_region *core_cfg;
int ctr;
/* Wait for core to come up */
- dev_notice(gxp->dev, "Waiting for core %u to power up...\n", core);
+ dev_notice(gxp->dev, "Waiting for core %u to power up...\n", phys_core);
ctr = 1000;
while (ctr) {
- if (gxp_lpm_is_powered(gxp, CORE_TO_PSM(core)))
+ if (gxp_lpm_is_powered(gxp, CORE_TO_PSM(phys_core)))
break;
udelay(1 * GXP_TIME_DELAY_FACTOR);
ctr--;
@@ -432,9 +454,9 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
/* Wait for 500ms. Then check if Q7 core is alive */
dev_notice(gxp->dev, "Waiting for core %u to respond...\n",
- core);
+ phys_core);
- core_scratchpad_base = gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF;
+ core_cfg = get_scratchpad_base(gxp, vd, core);
/*
* Currently, the hello_world FW writes a magic number
@@ -442,7 +464,6 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
* space as an alive message
*/
ctr = 5000;
- offset = SCRATCHPAD_MSG_OFFSET(MSG_CORE_ALIVE);
#if IS_ENABLED(CONFIG_GXP_TEST)
fake_gxp_firmware_flush_work_all();
/*
@@ -454,16 +475,18 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
#endif
usleep_range(50 * GXP_TIME_DELAY_FACTOR, 60 * GXP_TIME_DELAY_FACTOR);
while (ctr--) {
- if (readl(core_scratchpad_base + offset) == Q7_ALIVE_MAGIC)
+ invalidate_scratchpad_region(gxp, vd, core);
+ if (core_cfg->core_alive_magic == Q7_ALIVE_MAGIC)
break;
usleep_range(1 * GXP_TIME_DELAY_FACTOR,
10 * GXP_TIME_DELAY_FACTOR);
}
- if (readl(core_scratchpad_base + offset) != Q7_ALIVE_MAGIC) {
- dev_err(gxp->dev, "Core %u did not respond!\n", core);
+ invalidate_scratchpad_region(gxp, vd, core);
+ if (core_cfg->core_alive_magic != Q7_ALIVE_MAGIC) {
+ dev_err(gxp->dev, "Core %u did not respond!\n", phys_core);
return -EIO;
}
- dev_notice(gxp->dev, "Core %u is alive!\n", core);
+ dev_notice(gxp->dev, "Core %u is alive!\n", phys_core);
#if !IS_ENABLED(CONFIG_GXP_GEM5)
/*
@@ -477,26 +500,27 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
* handshakes in Gem5.
*/
ctr = 1000;
- offset = SCRATCHPAD_MSG_OFFSET(MSG_TOP_ACCESS_OK);
- expected_top_value = BIT(CORE_WAKEUP_DOORBELL(core));
+ expected_top_value = BIT(CORE_WAKEUP_DOORBELL(phys_core));
while (ctr--) {
- if (readl(core_scratchpad_base + offset) == expected_top_value)
+ invalidate_scratchpad_region(gxp, vd, core);
+ if (core_cfg->top_access_ok == expected_top_value)
break;
udelay(1 * GXP_TIME_DELAY_FACTOR);
}
- if (readl(core_scratchpad_base + offset) != expected_top_value) {
- dev_err(gxp->dev, "TOP access from core %u failed!\n", core);
+ if (core_cfg->top_access_ok != expected_top_value) {
+ dev_err(gxp->dev, "TOP access from core %u failed!\n", phys_core);
return -EIO;
}
- dev_notice(gxp->dev, "TOP access from core %u successful!\n", core);
+ dev_notice(gxp->dev, "TOP access from core %u successful!\n", phys_core);
#endif
/* Stop bus performance monitors */
- gxp_bpm_stop(gxp, core);
+ gxp_bpm_stop(gxp, phys_core);
dev_notice(gxp->dev, "Core%u Instruction read transactions: 0x%x\n",
- core, gxp_bpm_read_counter(gxp, core, INST_BPM_OFFSET));
- dev_notice(gxp->dev, "Core%u Data write transactions: 0x%x\n", core,
- gxp_bpm_read_counter(gxp, core, DATA_BPM_OFFSET));
+ core, gxp_bpm_read_counter(gxp, phys_core, INST_BPM_OFFSET));
+ dev_notice(gxp->dev, "Core%u Data write transactions: 0x%x\n",
+ phys_core,
+ gxp_bpm_read_counter(gxp, phys_core, DATA_BPM_OFFSET));
return 0;
}
@@ -597,20 +621,14 @@ static ssize_t load_dsp_firmware_store(struct device *dev,
mgr->firmwares[core] = firmwares[core];
}
- ret = gxp_firmware_fetch_boundaries(gxp, mgr);
- if (ret)
- goto err_fetch_boundaries;
-
kfree(mgr->firmware_name);
mgr->firmware_name = name_buf;
+ gxp_firmware_has_requested(gxp, mgr);
mutex_unlock(&mgr->dsp_firmware_lock);
up_read(&gxp->vd_semaphore);
return count;
-err_fetch_boundaries:
- for (core = 0; core < GXP_NUM_CORES; core++)
- mgr->firmwares[core] = NULL;
err_authenticate_firmware:
for (core = 0; core < GXP_NUM_CORES; core++)
release_firmware(firmwares[core]);
@@ -776,11 +794,7 @@ int gxp_firmware_request_if_needed(struct gxp_dev *gxp)
if (ret)
goto err_authenticate_firmware;
- ret = gxp_firmware_fetch_boundaries(gxp, mgr);
- if (ret)
- goto err_authenticate_firmware;
-
- mgr->is_firmware_requested = true;
+ gxp_firmware_has_requested(gxp, mgr);
out:
mutex_unlock(&mgr->dsp_firmware_lock);
@@ -813,37 +827,49 @@ static void disable_core_interrupts(struct gxp_dev *gxp, uint core)
gxp_write_32(gxp, GXP_CORE_REG_DEDICATED_INT_MASK(core), 0);
}
-static int gxp_firmware_setup(struct gxp_dev *gxp, uint core)
+static inline uint select_core(struct gxp_virtual_device *vd, uint virt_core,
+ uint phys_core)
+{
+ return gxp_fw_data_use_per_vd_config(vd) ? virt_core : phys_core;
+}
+
+static int gxp_firmware_setup(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ uint phys_core)
{
int ret = 0;
struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
- if (mgr->firmware_running & BIT(core)) {
+ if (gxp_core_boot && mgr->firmware_running & BIT(phys_core)) {
dev_err(gxp->dev, "Firmware is already running on core %u\n",
- core);
+ phys_core);
return -EBUSY;
}
- ret = gxp_firmware_load(gxp, core);
+ ret = gxp_firmware_load(gxp, vd, core);
if (ret) {
- dev_err(gxp->dev, "Failed to load firmware on core %u\n", core);
+ dev_err(gxp->dev, "Failed to load firmware on core %u\n",
+ phys_core);
return ret;
}
+ /* Configure bus performance monitors */
+ gxp_bpm_configure(gxp, phys_core, INST_BPM_OFFSET, BPM_EVENT_READ_XFER);
+ gxp_bpm_configure(gxp, phys_core, DATA_BPM_OFFSET, BPM_EVENT_WRITE_XFER);
/* Mark this as a cold boot */
- if (gxp_core_boot)
- gxp_firmware_set_boot_mode(gxp, core,
- GXP_BOOT_MODE_REQUEST_COLD_BOOT);
-
- ret = gxp_firmware_setup_hw_after_block_off(gxp, core,
- /*verbose=*/true);
- if (ret) {
- dev_err(gxp->dev, "Failed to power up core %u\n", core);
- gxp_firmware_unload(gxp, core);
- return ret;
+ if (gxp_core_boot) {
+ reset_core_config_region(gxp, vd, core);
+ ret = gxp_firmware_setup_hw_after_block_off(gxp, core,
+ phys_core,
+ /*verbose=*/true);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to power up core %u\n", core);
+ gxp_firmware_unload(gxp, core);
+ return ret;
+ }
}
- enable_core_interrupts(gxp, core);
+ enable_core_interrupts(gxp, phys_core);
return ret;
}
@@ -865,82 +891,85 @@ static void gxp_firmware_wakeup_cores(struct gxp_dev *gxp, uint core_list)
static int gxp_firmware_finish_startup(struct gxp_dev *gxp,
struct gxp_virtual_device *vd,
- uint virt_core, uint core)
+ uint virt_core, uint phys_core)
{
struct work_struct *work;
struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
int ret = 0;
+ uint core = select_core(vd, virt_core, phys_core);
if (gxp_core_boot) {
- ret = gxp_firmware_handshake(gxp, core);
+ ret = gxp_firmware_handshake(gxp, vd, core, phys_core);
if (ret) {
dev_err(gxp->dev,
- "Firmware handshake failed on core %u\n", core);
+ "Firmware handshake failed on core %u\n",
+ phys_core);
goto err_firmware_off;
}
/* Initialize mailbox */
if (gxp->mailbox_mgr->allocate_mailbox) {
- gxp->mailbox_mgr->mailboxes[core] =
+ gxp->mailbox_mgr->mailboxes[phys_core] =
gxp->mailbox_mgr->allocate_mailbox(
- gxp->mailbox_mgr, vd, virt_core, core);
- if (IS_ERR(gxp->mailbox_mgr->mailboxes[core])) {
+ gxp->mailbox_mgr, vd, virt_core, phys_core);
+ if (IS_ERR(gxp->mailbox_mgr->mailboxes[phys_core])) {
dev_err(gxp->dev,
"Unable to allocate mailbox (core=%u, ret=%ld)\n",
- core,
+ phys_core,
PTR_ERR(gxp->mailbox_mgr
- ->mailboxes[core]));
+ ->mailboxes[phys_core]));
ret = PTR_ERR(
- gxp->mailbox_mgr->mailboxes[core]);
- gxp->mailbox_mgr->mailboxes[core] = NULL;
+ gxp->mailbox_mgr->mailboxes[phys_core]);
+ gxp->mailbox_mgr->mailboxes[phys_core] = NULL;
goto err_firmware_off;
}
}
+ mgr->firmware_running |= BIT(phys_core);
}
- work = gxp_debug_dump_get_notification_handler(gxp, core);
+ work = gxp_debug_dump_get_notification_handler(gxp, phys_core);
if (work)
gxp_notification_register_handler(
- gxp, core, HOST_NOTIF_DEBUG_DUMP_READY, work);
+ gxp, phys_core, HOST_NOTIF_DEBUG_DUMP_READY, work);
- work = gxp_core_telemetry_get_notification_handler(gxp, core);
+ work = gxp_core_telemetry_get_notification_handler(gxp, phys_core);
if (work)
gxp_notification_register_handler(
- gxp, core, HOST_NOTIF_CORE_TELEMETRY_STATUS, work);
-
- mgr->firmware_running |= BIT(core);
+ gxp, phys_core, HOST_NOTIF_CORE_TELEMETRY_STATUS, work);
return ret;
err_firmware_off:
if (gxp_core_boot)
- gxp_pm_core_off(gxp, core);
+ gxp_pm_core_off(gxp, phys_core);
gxp_firmware_unload(gxp, core);
return ret;
}
static void gxp_firmware_stop_core(struct gxp_dev *gxp,
struct gxp_virtual_device *vd,
- uint virt_core, uint core)
+ uint virt_core, uint phys_core)
{
struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
- if (!(mgr->firmware_running & BIT(core)))
- dev_err(gxp->dev, "Firmware is not running on core %u\n", core);
+ if (gxp_core_boot && !(mgr->firmware_running & BIT(phys_core)))
+ dev_err(gxp->dev, "Firmware is not running on core %u\n",
+ phys_core);
- mgr->firmware_running &= ~BIT(core);
+ mgr->firmware_running &= ~BIT(phys_core);
- gxp_notification_unregister_handler(gxp, core,
+ gxp_notification_unregister_handler(gxp, phys_core,
HOST_NOTIF_DEBUG_DUMP_READY);
- gxp_notification_unregister_handler(gxp, core,
+ gxp_notification_unregister_handler(gxp, phys_core,
HOST_NOTIF_CORE_TELEMETRY_STATUS);
if (gxp_core_boot) {
if (gxp->mailbox_mgr->release_mailbox) {
gxp->mailbox_mgr->release_mailbox(
gxp->mailbox_mgr, vd, virt_core,
- gxp->mailbox_mgr->mailboxes[core]);
- dev_notice(gxp->dev, "Mailbox %u released\n", core);
+ gxp->mailbox_mgr->mailboxes[phys_core]);
+ dev_notice(gxp->dev, "Mailbox %u released\n",
+ phys_core);
}
if (vd->state == GXP_VD_RUNNING) {
@@ -948,46 +977,55 @@ static void gxp_firmware_stop_core(struct gxp_dev *gxp,
* Disable interrupts to prevent cores from being woken up
* unexpectedly.
*/
- disable_core_interrupts(gxp, core);
- gxp_pm_core_off(gxp, core);
+ disable_core_interrupts(gxp, phys_core);
+ gxp_pm_core_off(gxp, phys_core);
}
}
- gxp_firmware_unload(gxp, core);
+ gxp_firmware_unload(gxp, select_core(vd, virt_core, phys_core));
}
int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
uint core_list)
{
int ret;
- uint core, virt_core;
+ uint phys_core, virt_core;
uint failed_cores = 0;
int failed_ret;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (core_list & BIT(core)) {
- ret = gxp_firmware_setup(gxp, core);
- if (ret) {
- failed_cores |= BIT(core);
- failed_ret = ret;
- dev_err(gxp->dev, "Failed to run firmware on core %u\n",
- core);
- }
+ virt_core = 0;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ uint core = select_core(vd, virt_core, phys_core);
+
+ if (!(core_list & BIT(phys_core)))
+ continue;
+
+ ret = gxp_firmware_setup(gxp, vd, core, phys_core);
+ if (ret) {
+ failed_cores |= BIT(phys_core);
+ failed_ret = ret;
+ dev_err(gxp->dev, "Failed to run firmware on core %u\n",
+ phys_core);
}
+ virt_core++;
}
if (failed_cores != 0) {
/*
* Shut down the cores which call `gxp_firmware_setup`
* successfully
*/
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (core_list & BIT(core)) {
- if (!(failed_cores & BIT(core))) {
- if (gxp_core_boot)
- gxp_pm_core_off(gxp, core);
- gxp_firmware_unload(gxp, core);
- }
+ virt_core = 0;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ uint core = select_core(vd, virt_core, phys_core);
+
+ if (!(core_list & BIT(phys_core)))
+ continue;
+ if (!(failed_cores & BIT(phys_core))) {
+ if (gxp_core_boot)
+ gxp_pm_core_off(gxp, phys_core);
+ gxp_firmware_unload(gxp, core);
}
+ virt_core++;
}
return failed_ret;
}
@@ -997,11 +1035,11 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
* gxp_doorbell_enable_for_core here to set GXP_REG_COMMON_INT_MASK_0
* first to enable the firmware handshakes.
*/
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ if (!(core_list & BIT(phys_core)))
continue;
- gxp_doorbell_enable_for_core(gxp, CORE_WAKEUP_DOORBELL(core),
- core);
+ gxp_doorbell_enable_for_core(
+ gxp, CORE_WAKEUP_DOORBELL(phys_core), phys_core);
}
#endif
/* Switch clock mux to the normal state to guarantee LPM works */
@@ -1011,30 +1049,28 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
}
virt_core = 0;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (core_list & BIT(core)) {
- ret = gxp_firmware_finish_startup(gxp, vd, virt_core,
- core);
- if (ret) {
- failed_cores |= BIT(core);
- dev_err(gxp->dev,
- "Failed to run firmware on core %u\n",
- core);
- }
- virt_core++;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ if (!(core_list & BIT(phys_core)))
+ continue;
+ ret = gxp_firmware_finish_startup(gxp, vd, virt_core,
+ phys_core);
+ if (ret) {
+ failed_cores |= BIT(phys_core);
+ dev_err(gxp->dev, "Failed to run firmware on core %u\n",
+ phys_core);
}
+ virt_core++;
}
if (failed_cores != 0) {
virt_core = 0;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (core_list & BIT(core)) {
- if (!(failed_cores & BIT(core))) {
- gxp_firmware_stop_core(gxp, vd,
- virt_core, core);
- }
- virt_core++;
- }
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ if (!(core_list & BIT(phys_core)))
+ continue;
+ if (!(failed_cores & BIT(phys_core)))
+ gxp_firmware_stop_core(gxp, vd, virt_core,
+ phys_core);
+ virt_core++;
}
}
/* Check if we need to set clock mux to low state as requested */
@@ -1045,14 +1081,12 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
}
int gxp_firmware_setup_hw_after_block_off(struct gxp_dev *gxp, uint core,
- bool verbose)
+ uint phys_core, bool verbose)
{
- gxp_program_reset_vector(gxp, core, verbose);
-
- return gxp_core_boot ? gxp_pm_core_on(gxp, core, verbose) : 0;
+ gxp_program_reset_vector(gxp, core, phys_core, verbose);
+ return gxp_pm_core_on(gxp, phys_core, verbose);
}
-
void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
uint core_list)
{
@@ -1066,30 +1100,50 @@ void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
}
}
-void gxp_firmware_set_boot_mode(struct gxp_dev *gxp, uint core, u32 mode)
+void gxp_firmware_set_boot_mode(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ u32 mode)
{
- void __iomem *boot_mode_addr;
+ struct gxp_host_control_region *core_cfg;
/* Callers shouldn't call the function under this condition. */
if (!gxp->fwbufs[core].vaddr)
return;
- boot_mode_addr = gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF +
- SCRATCHPAD_MSG_OFFSET(MSG_BOOT_MODE);
-
- writel(mode, boot_mode_addr);
+ core_cfg = get_scratchpad_base(gxp, vd, core);
+ core_cfg->boot_mode = mode;
+ flush_scratchpad_region(gxp, vd, core);
}
-u32 gxp_firmware_get_boot_mode(struct gxp_dev *gxp, uint core)
+u32 gxp_firmware_get_boot_mode(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core)
{
- void __iomem *boot_mode_addr;
+ struct gxp_host_control_region *core_cfg;
/* Callers shouldn't call the function under this condition. */
if (!gxp->fwbufs[core].vaddr)
return 0;
- boot_mode_addr = gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF +
- SCRATCHPAD_MSG_OFFSET(MSG_BOOT_MODE);
+ core_cfg = get_scratchpad_base(gxp, vd, core);
+ invalidate_scratchpad_region(gxp, vd, core);
+ return core_cfg->boot_mode;
+}
+
+void gxp_firmware_set_boot_status(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ u32 status)
+{
+ struct gxp_host_control_region *core_cfg;
+
+ core_cfg = get_scratchpad_base(gxp, vd, core);
+ core_cfg->boot_status = status;
+}
+
+u32 gxp_firmware_get_boot_status(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core)
+{
+ struct gxp_host_control_region *core_cfg;
- return readl(boot_mode_addr);
+ core_cfg = get_scratchpad_base(gxp, vd, core);
+ return core_cfg->boot_status;
}
diff --git a/gxp-firmware.h b/gxp-firmware.h
index aff602a..f19adc9 100644
--- a/gxp-firmware.h
+++ b/gxp-firmware.h
@@ -10,6 +10,8 @@
#include <linux/bitops.h>
#include <linux/sizes.h>
+#include <gcip/gcip-image-config.h>
+
#include "gxp-config.h"
#include "gxp-internal.h"
@@ -41,6 +43,15 @@
#define PRIVATE_FW_DATA_SIZE SZ_2M
#define SHARED_FW_DATA_SIZE SZ_1M
+extern bool gxp_core_boot;
+
+/* Indexes same as image_config.IommuMappingIdx in the firmware side. */
+enum gxp_imgcfg_idx {
+ CORE_CFG_REGION_IDX,
+ VD_CFG_REGION_IDX,
+ SYS_CFG_REGION_IDX,
+};
+
struct gxp_firmware_manager {
const struct firmware *firmwares[GXP_NUM_CORES];
char *firmware_name;
@@ -49,18 +60,15 @@ struct gxp_firmware_manager {
struct mutex dsp_firmware_lock;
/* Firmware status bitmap. Accessors must hold `vd_semaphore`. */
u32 firmware_running;
- /*
- * The boundary of readonly segments and writable segments.
- * The mappings are programmed as
- * [fwbufs[i].daddr, rw_boundaries[i]): RO
- * [rw_boundaries[i], daddr + fwbufs[i].size): RW
- *
- * The boundary information is collected by parsing the ELF
- * header after @firmwares have been fetched.
- */
- dma_addr_t rw_boundaries[GXP_NUM_CORES];
/* Store the entry point of the DSP core firmware. */
u32 entry_points[GXP_NUM_CORES];
+ /*
+ * Cached image config, for easier fetching config entries.
+ * Not a pointer to the firmware buffer because we want to forcely change the
+ * privilege level to NS.
+ * Only valid on firmware requested.
+ */
+ struct gcip_image_config img_cfg;
};
enum aurora_msg {
@@ -102,9 +110,12 @@ int gxp_firmware_request_if_needed(struct gxp_dev *gxp);
/*
* Re-program the reset vector and power on the core's LPM if the block had
* been shut down.
+ *
+ * @core should be virt core when using per-VD config method, otherwise should
+ * be phys core.
*/
int gxp_firmware_setup_hw_after_block_off(struct gxp_dev *gxp, uint core,
- bool verbose);
+ uint phys_core, bool verbose);
/*
* Loads the firmware for the cores in system memory and powers up the cores
@@ -123,12 +134,29 @@ void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
* Sets the specified core's boot mode or suspend request value.
* This function should be called only after the firmware has been run.
*/
-void gxp_firmware_set_boot_mode(struct gxp_dev *gxp, uint core, u32 mode);
+void gxp_firmware_set_boot_mode(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ u32 mode);
/*
* Returns the specified core's boot mode or boot status.
* This function should be called only after the firmware has been run.
*/
-u32 gxp_firmware_get_boot_mode(struct gxp_dev *gxp, uint core);
+u32 gxp_firmware_get_boot_mode(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core);
+
+/*
+ * Sets the specified core's boot status or suspend request value.
+ */
+void gxp_firmware_set_boot_status(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ u32 status);
+
+/*
+ * Returns the specified core's boot status or boot status.
+ * This function should be called only after the firmware has been run.
+ */
+u32 gxp_firmware_get_boot_status(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core);
#endif /* __GXP_FIRMWARE_H__ */
diff --git a/gxp-host-device-structs.h b/gxp-host-device-structs.h
index 1c993f1..4597a28 100644
--- a/gxp-host-device-structs.h
+++ b/gxp-host-device-structs.h
@@ -17,6 +17,85 @@
#define MAX_NUM_CORES 4
#define NUM_SYSTEM_SEMAPHORES 64
+/* The number of physical doorbells and sync barriers allocated to each VD */
+#define GXP_NUM_DOORBELLS_PER_VD 7
+#define GXP_NUM_SYNC_BARRIERS_PER_VD 4
+
+/* The first allowed doorbell and sync barrier to be used for VDs' usage */
+#define GXP_DOORBELLS_START 4 /* The first 4 are used for boot */
+#define GXP_SYNC_BARRIERS_START 1 /* The first 1 is used for UART */
+
+/* Definitions for host->device boot mode requests */
+/*
+ * No boot action is needed. This is a valid mode once a core is running.
+ * However, it's an invalid state when a FW is powering on. The DSP core will
+ * write it to the boot mode register once it starts a transition.
+ * This is helpful in case the core reboots/crashes while performing the
+ * transition so it doesn't get stuck in a boot loop.
+ */
+#define GXP_BOOT_MODE_NONE 0
+
+/*
+ * Request that the core performs a normal cold boot on the next power-on event.
+ * This does not actually wake the core up, but is required before powering the
+ * core up if cold boot is desired.
+ * Core power-on could be performed using any wake-up source like the doorbells.
+ * Upon success, the boot status should be GXP_BOOT_STATUS_ACTIVE.
+ */
+#define GXP_BOOT_MODE_COLD_BOOT 1
+
+/*
+ * Request that the core suspends on the next suspend signal arrival. This does
+ * not trigger a suspend operation. A subsequent mailbox command or notification
+ * is needed to trigger the actual transition. Upon success, the boot status
+ * should be GXP_BOOT_STATUS_SUSPENDED.
+ */
+#define GXP_BOOT_MODE_SUSPEND 2
+
+/*
+ * Request that the core to preempt the active workload on the next suspend
+ * signal arrival.Upon success, the boot status should be
+ * GXP_BOOT_STATUS_SUSPENDED.
+ */
+#define GXP_BOOT_MODE_PREEMPT 3
+
+/*
+ * Request the core resumes on the next power on-event. This does not trigger a
+ * resume operation, but is required before powering the core up if warm
+ * boot/resume is desired.
+ * Core power-on could be performed using any wake-up source like direct LPM
+ * transition into PS0. Upon success, the boot status should be
+ * GXP_BOOT_STATUS_ACTIVE
+ */
+#define GXP_BOOT_MODE_RESUME 4
+
+/*
+ * Request the core shutdown. A subsequent mailbox command or notification
+ * is needed to trigger the actual transition. Upon success, the boot status
+ * should be GXP_BOOT_STATUS_OFF.
+ */
+#define GXP_BOOT_MODE_SHUTDOWN 5
+
+/* Definitions for host->device boot status */
+/* Initial status */
+#define GXP_BOOT_STATUS_NONE 0
+
+/* Final status */
+#define GXP_BOOT_STATUS_ACTIVE 1
+#define GXP_BOOT_STATUS_SUSPENDED 2
+#define GXP_BOOT_STATUS_OFF 3
+
+/* Transition status */
+#define GXP_BOOT_STATUS_INVALID_MODE 4
+#define GXP_BOOT_STATUS_BOOTING 5
+#define GXP_BOOT_STATUS_BOOTING_FAILED 6
+#define GXP_BOOT_STATUS_SUSPENDING 7
+#define GXP_BOOT_STATUS_SUSPENDING_FAILED 8
+#define GXP_BOOT_STATUS_SUSPENDING_FAILED_ACTIVE_WL 9
+#define GXP_BOOT_STATUS_WAITING_FOR_WORKLOAD 10
+#define GXP_BOOT_STATUS_WAITING_FOR_DMA 11
+#define GXP_BOOT_STATUS_SHUTTING_DOWN 12
+
/* Bit masks for the status fields in the core telemetry structures. */
/* The core telemetry buffers have been setup by the host. */
#define GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED (1 << 0)
@@ -324,4 +403,178 @@ struct gxp_core_to_core_response {
int32_t cmd_retval;
};
+/*
+ * A structure for describing the state of the job this worker core is part of.
+ * This struct is expected to change per dispatch/context switch/preepmtion as
+ * it describes the HW resources, FW IDs, and other parameters that may change
+ * across job dispatches.
+ * It also establishes a slot used for the various HW resources this VD is
+ * expected to use.
+ * Each FW in a VD is expected to be provided its own copy of this structure
+ * based on the job that it's part of.
+ */
+struct gxp_job_descriptor {
+ /* The number of workers participating in this job. */
+ uint32_t workers_count;
+
+ /*
+ * A mapping between a worker ID and the FW ID handling it. The FW ID
+ * used for handling worker 'w' is defined in worker_to_fw[w].
+ */
+ int32_t worker_to_fw[MAX_NUM_CORES];
+
+ /*
+ * A slot ID between 0 and MAX_NUM_CORES (exclusive) that indicates
+ * which block of HW resources this VD is expected to use. All system
+ * HW resources (such as doorbells, sync barriers, etc) are split across
+ * the slots evenly; usually starting at a specific physical ID and
+ * spanning a number consecutive instances. The start ID for each HW
+ * resource category is defined in GXP_<resource_name>_START; and the
+ * number of resources alloted to each slot is defined in
+ * GXP_NUM_<resource_name>_PER_VD.
+ */
+ uint32_t hardware_resources_slot;
+};
+
+/*
+ * A per-FW control structure used to communicate between the host (MCU or
+ * kernel) and the DSP core. The region is expected to be hosted in uncached
+ * memory.
+ */
+struct gxp_host_control_region {
+ /*
+ * Written to by the FW to indicate to the host that the core is
+ * alive.
+ */
+ uint32_t core_alive_magic;
+
+ /*
+ * Written to by the FW to indicate to the host that the core can read
+ * TOP registers.
+ */
+ uint32_t top_access_ok;
+
+ /*
+ * Written to by the host to specify the request FW boot mode. See the
+ * GXP_BOOT_MODE_* definitions for valid values. Always set by the FW to
+ * GXP_BOOT_MODE_NONE once the requested boot mode transition is
+ * completed.
+ */
+ uint32_t boot_mode;
+
+ /*
+ * Written to by the FW to indicate the boot status. See
+ * GXP_BOOT_STATUS_* definitions for valid values.
+ */
+ uint32_t boot_status;
+
+ /* Reserved fields for future expansion */
+ uint32_t reserved_boot[12];
+
+ /* To be used to communicate statistics for timing events during boot */
+ uint32_t timing_entries[16];
+
+ /* To be used to communicate crash events in case of failures */
+ uint32_t valid_crash_info;
+ uint32_t crash_exccause;
+ uint32_t crash_excvaddr;
+ uint32_t crash_epc1;
+ uint32_t reserved_crash_info[12];
+
+ /* Reserved for more categories */
+ uint32_t reserved[16];
+
+ /*
+ * The per-core job descriptor. This struct will be inspected by the FW
+ * at the beginning of every dispatch.
+ */
+ struct gxp_job_descriptor job_descriptor;
+};
+
+/*
+ * A structure describing the telemetry (logging and tracing) parameters and
+ * buffers; this describes R/O aspects of the telemetry buffers.
+ */
+struct gxp_telemetry_descriptor_ro {
+ struct telemetry_descriptor_ro {
+ /*
+ * The telemetry status from the host's point of view. See the
+ * top of the file for the appropriate flags.
+ */
+ uint32_t host_status;
+
+ /*
+ * The device address for the buffer used for storing events.
+ * The head and tail indices are described inside the data
+ * pointed to by `buffer_addr`.
+ */
+ uint32_t buffer_addr;
+
+ /* The size of the buffer (in bytes) */
+ uint32_t buffer_size;
+ } per_core_loggers[MAX_NUM_CORES], per_core_tracers[MAX_NUM_CORES];
+};
+
+/*
+ * A structure describing the external state of the VD. This structure is read
+ * once by the FW upon the first cold boot and is never checked again.
+ */
+struct gxp_vd_descriptor {
+ /* The ID for this GXP application. */
+ uint32_t application_id;
+
+ /*
+ * Whether or not this VD has been initialized by one of its cores.
+ * This variable is protected by sync barrier at offset 0. Should be
+ * initialized by the host to 0.
+ */
+ uint32_t vd_is_initialized;
+};
+
+/*
+ * A descriptor for data that is common to the entire system; usually accessed
+ * by physical core. This region is mapped as R/O for all VDs. Should be
+ * writable by the host (MCU/Kernel)
+ */
+struct gxp_system_descriptor_ro {
+ /* A device address for the common debug dump region */
+ uint32_t debug_dump_dev_addr;
+
+ /*
+ * A R/O descriptor for the telemetry data. Describing buffer
+ * parameters.
+ */
+ struct gxp_telemetry_descriptor_ro telemetry_desc;
+};
+
+/*
+ * A structure describing the telemetry (logging and tracing) parameters; this
+ * describes R/W aspects of the telemetry system.
+ */
+struct gxp_telemetry_descriptor_rw {
+ /* A struct for describing R/W status parameters of the buffer */
+ struct telemetry_descriptor_rw {
+ /*
+ * The telemetry status from the device point of view. See the
+ * top of the file for the appropriate flags.
+ */
+ uint32_t device_status;
+
+ /*
+ * Whether or not this telemetry category has data available
+ * for the host
+ */
+ uint32_t data_available;
+ } per_core_loggers[MAX_NUM_CORES], per_core_tracers[MAX_NUM_CORES];
+};
+
+/*
+ * A descriptor for data that is common to the entire system; usually accessed
+ * by physical core. This region is mapped as R/W for all VDs.
+ */
+struct gxp_system_descriptor_rw {
+ /* A R/W descriptor for the telemetry data */
+ struct gxp_telemetry_descriptor_rw telemetry_desc;
+};
+
#endif /* __GXP_HOST_DEVICE_STRUCTURES_H__ */
diff --git a/gxp-internal.h b/gxp-internal.h
index df1b4e9..1eaa5c0 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -27,6 +27,8 @@
#define IS_GXP_TEST IS_ENABLED(CONFIG_GXP_TEST)
+#define GXP_NAME "gxp"
+
enum gxp_chip_revision {
GXP_CHIP_A0,
GXP_CHIP_B0,
@@ -125,8 +127,6 @@ struct gxp_dev {
/*
* Buffer shared across firmware.
* Its paddr is 0 if the shared buffer is not available.
- * Its vaddr is always 0 as this region is not expected to be accessible
- * to us.
*/
struct gxp_mapped_resource shared_buf;
/*
@@ -134,18 +134,15 @@ struct gxp_dev {
* which indexes of slices are used by ID allocator.
*/
struct ida shared_slice_idp;
- size_t shared_slice_size; /* The size of each slice. */
- /*
- * The total number of slices.
- * It can be zero if there is no shared buffer support.
- */
- unsigned int num_shared_slices;
struct gxp_usage_stats *usage_stats; /* Stores the usage stats */
void __iomem *sysreg_shareability; /* sysreg shareability csr base */
/* Next virtual device ID. */
atomic_t next_vdid;
+ /* To manage DMA fences. */
+ struct gcip_dma_fence_manager *gfence_mgr;
+
/* callbacks for chip-dependent implementations */
/*
diff --git a/gxp-kci.c b/gxp-kci.c
index 72ffa4e..79fc069 100644
--- a/gxp-kci.c
+++ b/gxp-kci.c
@@ -22,6 +22,7 @@
#include "gxp-mcu.h"
#include "gxp-pm.h"
#include "gxp-usage-stats.h"
+#include "gxp-vd.h"
#define GXP_MCU_USAGE_BUFFER_SIZE 4096
@@ -128,6 +129,11 @@ static void gxp_kci_handle_rkci(struct gxp_kci *gkci,
gxp_kci_resp_rkci_ack(gkci, resp);
break;
}
+ case GCIP_RKCI_CLIENT_FATAL_ERROR_NOTIFY:
+ /* TODO(b/265092842): Create a debug dump of corresponding cores (resp->status). */
+ gxp_vd_invalidate(gxp, resp->retval);
+ gxp_kci_resp_rkci_ack(gkci, resp);
+ break;
default:
dev_warn(gxp->dev, "Unrecognized reverse KCI request: %#x",
resp->code);
@@ -150,8 +156,7 @@ gxp_reverse_kci_handle_response(struct gcip_kci *kci,
switch (resp->code) {
case GCIP_RKCI_FIRMWARE_CRASH:
- /* TODO(b/239638427): Handle firmware crash */
- dev_dbg(gxp->dev, "MCU firmware is crashed");
+ gxp_mcu_firmware_crash_handler(gxp, resp->retval);
break;
case GCIP_RKCI_JOB_LOCKUP:
/* TODO(b/239638427): Handle job lookup */
diff --git a/gxp-mcu-firmware.c b/gxp-mcu-firmware.c
index aba2be5..ab0f866 100644
--- a/gxp-mcu-firmware.c
+++ b/gxp-mcu-firmware.c
@@ -121,11 +121,6 @@ static int gxp_mcu_firmware_load_locked(struct gxp_mcu_firmware *mcu_fw,
memcpy(mcu_fw->image_buf.vaddr, fw->data + offset, size);
- if (!gxp->gsa_dev)
- program_iremap_csr(gxp, &mcu_fw->image_buf);
- gxp_bpm_configure(gxp, GXP_MCU_CORE_ID, INST_BPM_OFFSET,
- BPM_EVENT_READ_XFER);
-
out_release_firmware:
release_firmware(fw);
return ret;
@@ -206,7 +201,8 @@ static void gxp_mcu_firmware_stop_locked(struct gxp_mcu_firmware *mcu_fw)
if (ret)
dev_warn(gxp->dev, "KCI shutdown failed: %d", ret);
- if (!gxp_lpm_wait_state_eq(gxp, CORE_TO_PSM(GXP_MCU_CORE_ID), LPM_PG_STATE))
+ if (!gxp_lpm_wait_state_eq(gxp, CORE_TO_PSM(GXP_MCU_CORE_ID),
+ LPM_PG_STATE))
dev_warn(gxp->dev,
"MCU PSM transition to PS3 fails, current state: %u\n",
gxp_lpm_get_state(gxp, CORE_TO_PSM(GXP_MCU_CORE_ID)));
@@ -214,6 +210,39 @@ static void gxp_mcu_firmware_stop_locked(struct gxp_mcu_firmware *mcu_fw)
gxp_mcu_firmware_unload_locked(mcu_fw);
}
+static int gxp_mcu_firmware_power_up(struct gxp_mcu_firmware *mcu_fw,
+ const char *name)
+{
+ struct gxp_dev *gxp = mcu_fw->gxp;
+ int ret;
+
+ if (!gxp->gsa_dev)
+ program_iremap_csr(gxp, &mcu_fw->image_buf);
+ gxp_bpm_configure(gxp, GXP_MCU_CORE_ID, INST_BPM_OFFSET,
+ BPM_EVENT_READ_XFER);
+
+ ret = gxp_lpm_up(gxp, GXP_MCU_CORE_ID);
+ if (ret)
+ return ret;
+ /* Raise wakeup doorbell */
+ dev_notice(gxp->dev, "Raising doorbell %d interrupt\n",
+ CORE_WAKEUP_DOORBELL(GXP_MCU_CORE_ID));
+ gxp_doorbell_enable_for_core(gxp, CORE_WAKEUP_DOORBELL(GXP_MCU_CORE_ID),
+ GXP_MCU_CORE_ID);
+ gxp_doorbell_set(gxp, CORE_WAKEUP_DOORBELL(GXP_MCU_CORE_ID));
+
+ ret = gxp_mcu_firmware_handshake(mcu_fw);
+ if (ret)
+ goto err_lpm_down;
+ dev_info(gxp->dev, "MCU firmware %s run succeeded", name);
+
+ return ret;
+
+err_lpm_down:
+ gxp_lpm_down(gxp, GXP_MCU_CORE_ID);
+ return ret;
+}
+
/*
* Runs the firmware without checking current status.
*
@@ -229,7 +258,6 @@ static int gxp_mcu_firmware_run_locked(struct gxp_mcu_firmware *mcu_fw,
const char *name)
{
int ret;
- struct gxp_dev *gxp = mcu_fw->gxp;
lockdep_assert_held(&mcu_fw->lock);
@@ -240,27 +268,14 @@ static int gxp_mcu_firmware_run_locked(struct gxp_mcu_firmware *mcu_fw,
ret = gxp_mcu_firmware_load_locked(mcu_fw, name);
if (ret)
goto err_invalid;
- ret = gxp_lpm_up(gxp, GXP_MCU_CORE_ID);
+ ret = gxp_mcu_firmware_power_up(mcu_fw, name);
if (ret)
goto err_unload;
- /* Raise wakeup doorbell */
- dev_notice(gxp->dev, "Raising doorbell %d interrupt\n",
- CORE_WAKEUP_DOORBELL(GXP_MCU_CORE_ID));
- gxp_doorbell_enable_for_core(gxp, CORE_WAKEUP_DOORBELL(GXP_MCU_CORE_ID),
- GXP_MCU_CORE_ID);
- gxp_doorbell_set(gxp, CORE_WAKEUP_DOORBELL(GXP_MCU_CORE_ID));
-
- ret = gxp_mcu_firmware_handshake(mcu_fw);
- if (ret)
- goto err_lpm_down;
- dev_info(gxp->dev, "MCU firmware %s run succeeded", name);
mcu_fw->status = GCIP_FW_VALID;
mcu_fw->name = name;
return 0;
-err_lpm_down:
- gxp_lpm_down(gxp, GXP_MCU_CORE_ID);
err_unload:
gxp_mcu_firmware_unload_locked(mcu_fw);
err_invalid:
@@ -271,8 +286,19 @@ err_invalid:
static int gxp_mcu_firmware_restart_locked(struct gxp_mcu_firmware *mcu_fw)
{
- /* TODO(b/233159020): implement restart */
- return gxp_mcu_firmware_run_locked(mcu_fw, mcu_fw->name);
+ struct gxp_dev *gxp = mcu_fw->gxp;
+ int ret;
+
+ lockdep_assert_held(&mcu_fw->lock);
+
+ ret = gxp_mcu_firmware_power_up(mcu_fw, mcu_fw->name);
+ if (ret) {
+ dev_warn(gxp->dev, "Failed to restart, reload MCU fw entirely");
+ gxp_mcu_firmware_unload_locked(mcu_fw);
+ return gxp_mcu_firmware_run_locked(mcu_fw, mcu_fw->name);
+ }
+
+ return 0;
}
static int init_mcu_firmware_buf(struct gxp_dev *gxp,
@@ -445,7 +471,13 @@ int gxp_mcu_firmware_run(struct gxp_mcu_firmware *mcu_fw)
int ret;
mutex_lock(&mcu_fw->lock);
- if (mcu_fw->status == GCIP_FW_VALID)
+ /*
+ * TODO(b/233159020): Currently, the stop function unloads the firmware image and
+ * we have to reload it by calling the run function. We have implemented the restart
+ * function for non-GSA environment, but let's enable it by removing " && 0" once we
+ * refactor the whole logic for supporting the GSA device.
+ */
+ if (mcu_fw->status == GCIP_FW_VALID && 0)
ret = gxp_mcu_firmware_restart_locked(mcu_fw);
else
ret = gxp_mcu_firmware_run_locked(mcu_fw, mcu_fw->name);
@@ -459,3 +491,108 @@ void gxp_mcu_firmware_stop(struct gxp_mcu_firmware *mcu_fw)
gxp_mcu_firmware_stop_locked(mcu_fw);
mutex_unlock(&mcu_fw->lock);
}
+
+void gxp_mcu_firmware_crash_handler(struct gxp_dev *gxp,
+ enum gcip_fw_crash_type crash_type)
+{
+ struct gxp_mcu_firmware *mcu_fw = gxp_mcu_firmware_of(gxp);
+ struct gxp_client *client;
+ int ret;
+
+ dev_err(gxp->dev, "MCU firmware is crashed, crash_type=%d", crash_type);
+
+ if (crash_type != GCIP_FW_CRASH_UNRECOVERABLE_FAULT)
+ return;
+
+ dev_err(gxp->dev, "Unrecoverable MCU firmware fault, handle it");
+
+ /*
+ * Prevent @gxp->client_list is being changed while handling the crash.
+ * The user cannot open or close a fd until this function releases the lock.
+ */
+ mutex_lock(&gxp->client_list_lock);
+
+ /*
+ * Hold @client->semaphore first to prevent deadlock.
+ * By holding this lock, clients cannot proceed most IOCTLs.
+ */
+ list_for_each_entry (client, &gxp->client_list, list_entry) {
+ down_write(&client->semaphore);
+ }
+
+ /*
+ * Holding @client->semaphore will block the most client actions, but let's make sure
+ * it by holding the locks directly related to the actions we want to block accordingly.
+ * For example, in the case of the block wakelock, the debug dump can try to acquire it
+ * which cannot be blocked by holding @client->semaphore.
+ */
+
+ /*
+ * We have to block allocating a new vd by the runtime. Otherwise, if it is holding the
+ * block wakelock, it will try to send a `allocate_vmbox` KCI to the crashed MCU firmware.
+ *
+ * The runtime cannot allocate a new virtual device or closing its client until this
+ * function releases the lock.
+ */
+ down_write(&gxp->vd_semaphore);
+
+ /*
+ * As we are recovering the MCU firmware, the number of clients holding the block wakelock
+ * should not be changed until the rescuing is finished.
+ *
+ * The runtime cannot acquire or release the block wakelock until this function releases
+ * the lock.
+ */
+ mutex_lock(&gxp->wakelock_mgr->lock);
+
+ /*
+ * Discard all pending/unconsumed UCI responses and change the state of all virtual devices
+ * to GXP_VD_UNAVAILABLE. From now on, all clients cannot request new UCI commands.
+ */
+ list_for_each_entry (client, &gxp->client_list, list_entry) {
+ if (client->has_block_wakelock && client->vd) {
+ gxp->mailbox_mgr->release_unconsumed_async_resps(
+ client->vd);
+ client->vd->state = GXP_VD_UNAVAILABLE;
+ if (client->vd_invalid_eventfd)
+ gxp_eventfd_signal(client->vd_invalid_eventfd);
+ }
+ }
+
+ /*
+ * Turn off and on the Aurora block and rerun the MCU firmware.
+ * TODO(b/264621513): Change the power state of LPM instead of turning off and on the
+ * whole Aurora block.
+ */
+ mutex_lock(&mcu_fw->lock);
+
+ ret = gxp_pm_blk_off(gxp);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to turn off BLK_AUR (ret=%d)\n", ret);
+ goto out;
+ }
+
+ if (!gxp_pm_is_blk_down(gxp, 5000)) {
+ dev_err(gxp->dev, "BLK_AUR hasn't been turned off");
+ goto out;
+ }
+
+ ret = gxp_pm_blk_on(gxp);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to turn on BLK_AUR (ret=%d)\n", ret);
+ goto out;
+ }
+
+ ret = gxp_mcu_firmware_restart_locked(mcu_fw);
+ if (ret)
+ dev_err(gxp->dev, "Failed to run MCU firmware (ret=%d)\n", ret);
+
+out:
+ mutex_unlock(&mcu_fw->lock);
+ mutex_unlock(&gxp->wakelock_mgr->lock);
+ up_write(&gxp->vd_semaphore);
+ list_for_each_entry (client, &gxp->client_list, list_entry) {
+ up_write(&client->semaphore);
+ }
+ mutex_unlock(&gxp->client_list_lock);
+}
diff --git a/gxp-mcu-firmware.h b/gxp-mcu-firmware.h
index 6218d5e..12b0995 100644
--- a/gxp-mcu-firmware.h
+++ b/gxp-mcu-firmware.h
@@ -61,4 +61,14 @@ void gxp_mcu_firmware_stop(struct gxp_mcu_firmware *mcu_fw);
*/
struct gxp_mcu_firmware *gxp_mcu_firmware_of(struct gxp_dev *gxp);
+/*
+ * Handles the MCU firmware crash. It will handle the crash only when the @crash_type is
+ * GCIP_FW_CRASH_UNRECOVERABLE_FAULT. Otherwise, it will ignore that crash.
+ *
+ * This function will be called from the `gxp-kci.c` when GCIP_RKCI_FIRMWARE_CRASH RKCI is arrived
+ * from the MCU firmware side.
+ */
+void gxp_mcu_firmware_crash_handler(struct gxp_dev *gxp,
+ enum gcip_fw_crash_type crash_type);
+
#endif /* __GXP_MCU_FIRMWARE_H__ */
diff --git a/gxp-mcu.c b/gxp-mcu.c
index b83d2ba..aae9519 100644
--- a/gxp-mcu.c
+++ b/gxp-mcu.c
@@ -38,10 +38,7 @@ static int gxp_alloc_shared_buffer(struct gxp_dev *gxp, struct gxp_mcu *mcu)
offset = gcip_mem_pool_offset(&mcu->remap_data_pool, paddr);
vaddr = offset + (mcu->fw.image_buf.vaddr + GXP_IREMAP_DATA_OFFSET);
memset(vaddr, 0, size);
-
- ida_init(&gxp->shared_slice_idp);
- gxp->num_shared_slices = GXP_SHARED_BUFFER_SIZE / GXP_SHARED_SLICE_SIZE;
- gxp->shared_slice_size = GXP_SHARED_SLICE_SIZE;
+ res->vaddr = vaddr;
return 0;
}
@@ -50,7 +47,6 @@ static void gxp_free_shared_buffer(struct gxp_mcu *mcu)
{
struct gxp_mapped_resource *res = &mcu->gxp->shared_buf;
- ida_destroy(&mcu->gxp->shared_slice_idp);
gcip_mem_pool_free(&mcu->remap_data_pool, res->paddr, res->size);
}
diff --git a/gxp-pm.c b/gxp-pm.c
index ead9d7c..4c30284 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -24,7 +24,6 @@
#define SHUTDOWN_DELAY_US_MIN 200
#define SHUTDOWN_DELAY_US_MAX 400
-#define SHUTDOWN_MAX_DELAY_COUNT 20
/*
* The order of this array decides the voting priority, should be increasing in
@@ -268,14 +267,16 @@ int gxp_pm_blk_off(struct gxp_dev *gxp)
return ret;
}
-bool gxp_pm_is_blk_down(struct gxp_dev *gxp)
+bool gxp_pm_is_blk_down(struct gxp_dev *gxp, uint timeout_ms)
{
- int timeout_cnt = 0;
+ int timeout_cnt = 0, max_delay_count;
int curr_state;
if (!gxp->power_mgr->aur_status)
return gxp->power_mgr->curr_state == AUR_OFF;
+ max_delay_count = (timeout_ms * 1000) / SHUTDOWN_DELAY_US_MIN;
+
do {
/* Delay 200~400us per retry till blk shutdown finished */
usleep_range(SHUTDOWN_DELAY_US_MIN, SHUTDOWN_DELAY_US_MAX);
@@ -283,7 +284,7 @@ bool gxp_pm_is_blk_down(struct gxp_dev *gxp)
if (!curr_state)
return true;
timeout_cnt++;
- } while (timeout_cnt < SHUTDOWN_MAX_DELAY_COUNT);
+ } while (timeout_cnt < max_delay_count);
return false;
}
diff --git a/gxp-pm.h b/gxp-pm.h
index 188f449..334435e 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -167,11 +167,12 @@ int gxp_pm_blk_off(struct gxp_dev *gxp);
/**
* gxp_pm_is_blk_down() - Check weather the blk is turned off or not.
* @gxp: The GXP device to check
+ * @timeout_ms: Wait for the block to be turned off for this duration.
*
* Return:
* * true - blk is turned off.
*/
-bool gxp_pm_is_blk_down(struct gxp_dev *gxp);
+bool gxp_pm_is_blk_down(struct gxp_dev *gxp, uint timeout_ms);
/**
* gxp_pm_get_blk_state() - Get the blk power state
diff --git a/gxp-ssmt.h b/gxp-ssmt.h
index f3458df..6cf8971 100644
--- a/gxp-ssmt.h
+++ b/gxp-ssmt.h
@@ -10,6 +10,8 @@
#include "gxp-internal.h"
+#define SSMT_CLAMP_MODE_BYPASS (1u << 31)
+
struct gxp_ssmt {
struct gxp_dev *gxp;
void __iomem *idma_ssmt_base;
@@ -31,4 +33,9 @@ int gxp_ssmt_init(struct gxp_dev *gxp, struct gxp_ssmt *ssmt);
*/
void gxp_ssmt_set_core_vid(struct gxp_ssmt *ssmt, uint core, uint vid);
+static inline void gxp_ssmt_set_core_bypass(struct gxp_ssmt *ssmt, uint core)
+{
+ gxp_ssmt_set_core_vid(ssmt, core, SSMT_CLAMP_MODE_BYPASS);
+}
+
#endif /* __GXP_SSMT_H__ */
diff --git a/gxp-uci.c b/gxp-uci.c
index 83883f1..eecc29f 100644
--- a/gxp-uci.c
+++ b/gxp-uci.c
@@ -39,6 +39,7 @@ static int gxp_uci_mailbox_manager_execute_cmd(
u64 *resp_seq, u16 *resp_status)
{
struct gxp_dev *gxp = client->gxp;
+ struct gxp_mcu_firmware *mcu_fw = gxp_mcu_firmware_of(gxp);
struct gxp_virtual_device *vd = client->vd;
struct gxp_uci_command cmd;
struct gxp_uci_response resp;
@@ -61,7 +62,13 @@ static int gxp_uci_mailbox_manager_execute_cmd(
cmd.core_id = 0;
cmd.client_id = vd->client_id;
+ /*
+ * Before the response returns, we must prevent unloading the MCU firmware even by
+ * the firmware crash handler. Otherwise, invalid IOMMU access can occur.
+ */
+ mutex_lock(&mcu_fw->lock);
ret = gxp_mailbox_send_cmd(mailbox, &cmd, &resp);
+ mutex_unlock(&mcu_fw->lock);
/* resp.seq and resp.status can be updated even though it failed to process the command */
if (resp_seq)
diff --git a/gxp-vd.c b/gxp-vd.c
index 66de461..0c0c1b0 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -8,17 +8,20 @@
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/idr.h>
+#include <linux/iommu.h>
#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <gcip/gcip-alloc-helper.h>
+#include <gcip/gcip-image-config.h>
#include "gxp-config.h"
#include "gxp-core-telemetry.h"
#include "gxp-debug-dump.h"
#include "gxp-dma.h"
#include "gxp-domain-pool.h"
+#include "gxp-doorbell.h"
#include "gxp-firmware.h"
#include "gxp-firmware-data.h"
#include "gxp-host-device-structs.h"
@@ -46,11 +49,416 @@ void gxp_vd_init(struct gxp_dev *gxp)
for (core = 0; core < GXP_NUM_CORES; core++)
gxp->core_to_vd[core] = NULL;
atomic_set(&gxp->next_vdid, 0);
+ ida_init(&gxp->shared_slice_idp);
}
void gxp_vd_destroy(struct gxp_dev *gxp)
{
- /* NO-OP for now. */
+ ida_destroy(&gxp->shared_slice_idp);
+}
+
+/* Allocates an SGT and map @daddr to it. */
+static int map_ns_region(struct gxp_virtual_device *vd, dma_addr_t daddr, size_t size)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ struct sg_table *sgt;
+ size_t idx;
+ const size_t n_reg = ARRAY_SIZE(vd->ns_regions);
+ int ret;
+
+ for (idx = 0; idx < n_reg; idx++) {
+ if (!vd->ns_regions[idx].sgt)
+ break;
+ }
+ if (idx == n_reg) {
+ dev_err(gxp->dev, "NS regions array %zx is full", n_reg);
+ return -ENOSPC;
+ }
+ sgt = gcip_alloc_noncontiguous(gxp->dev, size, GFP_KERNEL);
+ if (!sgt)
+ return -ENOMEM;
+
+ ret = gxp_dma_map_iova_sgt(gxp, vd->domain, daddr, sgt,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ dev_err(gxp->dev, "NS map %pad with size %#zx failed", &daddr,
+ size);
+ gcip_free_noncontiguous(sgt);
+ return ret;
+ }
+ vd->ns_regions[idx].daddr = daddr;
+ vd->ns_regions[idx].sgt = sgt;
+
+ return 0;
+}
+
+static void unmap_ns_region(struct gxp_virtual_device *vd, dma_addr_t daddr)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ struct sg_table *sgt;
+ size_t idx;
+ const size_t n_reg = ARRAY_SIZE(vd->ns_regions);
+
+ for (idx = 0; idx < n_reg; idx++) {
+ if (daddr == vd->ns_regions[idx].daddr)
+ break;
+ }
+ if (idx == n_reg) {
+ dev_warn(gxp->dev, "unable to find NS mapping @ %pad", &daddr);
+ return;
+ }
+
+ sgt = vd->ns_regions[idx].sgt;
+ vd->ns_regions[idx].sgt = NULL;
+ vd->ns_regions[idx].daddr = 0;
+ gxp_dma_unmap_iova_sgt(gxp, vd->domain, daddr, sgt);
+ gcip_free_noncontiguous(sgt);
+}
+
+/* Maps the shared buffer region to @vd->domain. */
+static int map_core_shared_buffer(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ struct iommu_domain *domain = vd->domain->domain;
+ const size_t shared_size = GXP_SHARED_SLICE_SIZE;
+
+ if (!gxp->shared_buf.paddr)
+ return 0;
+ return iommu_map(domain, gxp->shared_buf.daddr,
+ gxp->shared_buf.paddr + shared_size * vd->slice_index,
+ shared_size, IOMMU_READ | IOMMU_WRITE);
+}
+
+/* Reverts map_core_shared_buffer. */
+static void unmap_core_shared_buffer(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ struct iommu_domain *domain = vd->domain->domain;
+ const size_t shared_size = GXP_SHARED_SLICE_SIZE;
+
+ if (!gxp->shared_buf.paddr)
+ return;
+ iommu_unmap(domain, gxp->shared_buf.daddr, shared_size);
+}
+
+/* Maps @res->daddr to @res->paddr to @vd->domain. */
+static int map_resource(struct gxp_virtual_device *vd,
+ struct gxp_mapped_resource *res)
+{
+ if (res->daddr == 0)
+ return 0;
+ return iommu_map(vd->domain->domain, res->daddr, res->paddr, res->size,
+ IOMMU_READ | IOMMU_WRITE);
+}
+
+/* Reverts map_resource. */
+static void unmap_resource(struct gxp_virtual_device *vd,
+ struct gxp_mapped_resource *res)
+{
+ if (res->daddr == 0)
+ return;
+ iommu_unmap(vd->domain->domain, res->daddr, res->size);
+}
+
+/*
+ * Assigns @res's IOVA, size from image config.
+ */
+static void assign_resource(struct gxp_mapped_resource *res,
+ struct gcip_image_config *img_cfg,
+ enum gxp_imgcfg_idx idx)
+{
+ res->daddr = img_cfg->iommu_mappings[idx].virt_address;
+ res->size = gcip_config_to_size(
+ img_cfg->iommu_mappings[idx].image_config_value);
+}
+
+/*
+ * This function does follows:
+ * - Get CORE_CFG, VD_CFG, SYS_CFG's IOVAs and sizes from image config.
+ * - Map above regions with this layout:
+ * Pool
+ * +------------------------------------+
+ * | SLICE_0: CORE_CFG |
+ * | SLICE_0: VD_CFG |
+ * | <padding to GXP_SHARED_SLICE_SIZE> |
+ * +------------------------------------+
+ * | SLICE_1: CORE_CFG |
+ * | SLICE_1: VD_CFG |
+ * | <padding to GXP_SHARED_SLICE_SIZE> |
+ * +------------------------------------+
+ * | ... SLICE_N |
+ * +------------------------------------+
+ * | <padding> |
+ * +------------------------------------+
+ * | SYS_CFG |
+ * +------------------------------------+
+ *
+ * To keep compatibility, if not both mapping[0, 1] present then this function
+ * falls back to map the MCU-core shared region with hard-coded IOVA and size.
+ */
+static int map_cfg_regions(struct gxp_virtual_device *vd,
+ struct gcip_image_config *img_cfg)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ struct gxp_mapped_resource *pool;
+ struct gxp_mapped_resource res, tmp;
+ size_t offset;
+ int ret;
+
+ if (img_cfg->num_iommu_mappings < 2)
+ return map_core_shared_buffer(vd);
+
+ /*
+ * For direct mode, the config regions are programmed by host (us); for
+ * MCU mode, the config regions are programmed by MCU.
+ */
+ if (gxp_is_direct_mode(gxp)) {
+ tmp = gxp->fwdatabuf;
+ /* Leave the first piece be used for gxp_fw_data_init() */
+ tmp.vaddr += tmp.size / 2;
+ tmp.paddr += tmp.size / 2;
+ pool = &tmp;
+ } else {
+ pool = &gxp->shared_buf;
+ }
+
+ assign_resource(&res, img_cfg, CORE_CFG_REGION_IDX);
+ offset = vd->slice_index * GXP_SHARED_SLICE_SIZE;
+ res.vaddr = pool->vaddr + offset;
+ res.paddr = pool->paddr + offset;
+ ret = map_resource(vd, &res);
+ if (ret) {
+ dev_err(gxp->dev, "map core config %pad -> offset %#zx failed",
+ &res.daddr, offset);
+ return ret;
+ }
+ vd->core_cfg = res;
+
+ assign_resource(&res, img_cfg, VD_CFG_REGION_IDX);
+ offset += vd->core_cfg.size;
+ res.vaddr = pool->vaddr + offset;
+ res.paddr = pool->paddr + offset;
+ ret = map_resource(vd, &res);
+ if (ret) {
+ dev_err(gxp->dev, "map VD config %pad -> offset %#zx failed",
+ &res.daddr, offset);
+ goto err_unmap_core;
+ }
+ vd->vd_cfg = res;
+ /* image config correctness check */
+ if (vd->core_cfg.size + vd->vd_cfg.size > GXP_SHARED_SLICE_SIZE) {
+ dev_err(gxp->dev,
+ "Core CFG (%#llx) + VD CFG (%#llx) exceeds %#x",
+ vd->core_cfg.size, vd->vd_cfg.size,
+ GXP_SHARED_SLICE_SIZE);
+ ret = -ENOSPC;
+ goto err_unmap_vd;
+ }
+ /*
+ * It's okay when mappings[sys_cfg_region_idx] is not set, in which case
+ * map_resource does nothing.
+ */
+ assign_resource(&res, img_cfg, SYS_CFG_REGION_IDX);
+ /* Use the end of the shared region for system cfg. */
+ offset = GXP_SHARED_BUFFER_SIZE - res.size;
+ res.vaddr = pool->vaddr + offset;
+ res.paddr = pool->paddr + offset;
+ ret = map_resource(vd, &res);
+ if (ret) {
+ dev_err(gxp->dev, "map sys config %pad -> offset %#zx failed",
+ &res.daddr, offset);
+ goto err_unmap_vd;
+ }
+ vd->sys_cfg = res;
+
+ return 0;
+
+err_unmap_vd:
+ unmap_resource(vd, &vd->vd_cfg);
+ vd->vd_cfg.daddr = 0;
+err_unmap_core:
+ unmap_resource(vd, &vd->core_cfg);
+ vd->core_cfg.daddr = 0;
+ return ret;
+}
+
+static void unmap_cfg_regions(struct gxp_virtual_device *vd)
+{
+ if (vd->core_cfg.daddr == 0)
+ return unmap_core_shared_buffer(vd);
+
+ unmap_resource(vd, &vd->sys_cfg);
+ unmap_resource(vd, &vd->vd_cfg);
+ unmap_resource(vd, &vd->core_cfg);
+}
+
+static int gxp_vd_imgcfg_map(void *data, dma_addr_t daddr, phys_addr_t paddr,
+ size_t size, unsigned int flags)
+{
+ struct gxp_virtual_device *vd = data;
+
+ if (flags & GCIP_IMAGE_CONFIG_FLAGS_SECURE)
+ return 0;
+
+ return map_ns_region(vd, daddr, size);
+}
+
+static void gxp_vd_imgcfg_unmap(void *data, dma_addr_t daddr, size_t size,
+ unsigned int flags)
+{
+ struct gxp_virtual_device *vd = data;
+
+ if (flags & GCIP_IMAGE_CONFIG_FLAGS_SECURE)
+ return;
+
+ unmap_ns_region(vd, daddr);
+}
+
+static int map_fw_image_config(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ struct gxp_firmware_manager *fw_mgr)
+{
+ int ret;
+ struct gcip_image_config *cfg;
+ static const struct gcip_image_config_ops gxp_vd_imgcfg_ops = {
+ .map = gxp_vd_imgcfg_map,
+ .unmap = gxp_vd_imgcfg_unmap,
+ };
+
+ /*
+ * Allow to skip for test suites need VD but doesn't need the FW module.
+ */
+ if (IS_ENABLED(CONFIG_GXP_TEST) && !fw_mgr)
+ return 0;
+ cfg = &fw_mgr->img_cfg;
+ ret = gcip_image_config_parser_init(&vd->cfg_parser, &gxp_vd_imgcfg_ops,
+ gxp->dev, vd);
+ /* parser_init() never fails unless we pass invalid OPs. */
+ if (unlikely(ret))
+ return ret;
+ ret = gcip_image_config_parse(&vd->cfg_parser, cfg);
+ if (ret) {
+ dev_err(gxp->dev, "Image config mapping failed");
+ return ret;
+ }
+ ret = map_cfg_regions(vd, cfg);
+ if (ret) {
+ dev_err(gxp->dev, "Config regions mapping failed");
+ gcip_image_config_clear(&vd->cfg_parser);
+ return ret;
+ }
+ vd->fw_ro_size = cfg->firmware_size;
+ /*
+ * To be compatible with image config without setting firmware_size,
+ * fall back to map the whole region to carveout.
+ */
+ if (vd->fw_ro_size == 0)
+ vd->fw_ro_size = gxp->fwbufs[0].size;
+
+ return 0;
+}
+
+static void unmap_fw_image_config(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
+{
+ unmap_cfg_regions(vd);
+ gcip_image_config_clear(&vd->cfg_parser);
+}
+
+/*
+ * For each core,
+ * - fw_rw_size = fwbufs[core].size - fw_ro_size
+ * - allocates rwdata_sgt[core] with size fw_rw_size
+ * - maps fwbufs[core].daddr -> fwbufs[core].paddr with size fw_ro_size
+ * - maps fwbufs[core].daddr + fw_ro_size -> rwdata_sgt[core]
+ */
+static int alloc_and_map_fw_image(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
+{
+ size_t ro_size = vd->fw_ro_size, rw_size;
+ struct iommu_domain *domain = vd->domain->domain;
+ int i, ret;
+
+ /* Maps all FW regions together and no rwdata_sgt in this case. */
+ if (ro_size == gxp->fwbufs[0].size)
+ return iommu_map(domain, gxp->fwbufs[0].daddr,
+ gxp->fwbufs[0].paddr, ro_size * GXP_NUM_CORES,
+ IOMMU_READ | IOMMU_WRITE);
+
+ dev_info(gxp->dev, "mapping firmware RO size %#zx", ro_size);
+ rw_size = gxp->fwbufs[0].size - ro_size;
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ vd->rwdata_sgt[i] =
+ gcip_alloc_noncontiguous(gxp->dev, rw_size, GFP_KERNEL);
+ if (!vd->rwdata_sgt[i]) {
+ dev_err(gxp->dev,
+ "allocate firmware data for core %d failed", i);
+ ret = -ENOMEM;
+ goto err_free_sgt;
+ }
+ }
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ ret = iommu_map(domain, gxp->fwbufs[i].daddr,
+ gxp->fwbufs[i].paddr, ro_size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ dev_err(gxp->dev, "map firmware RO for core %d failed",
+ i);
+ goto err_unmap;
+ }
+ ret = gxp_dma_map_iova_sgt(gxp, vd->domain,
+ gxp->fwbufs[i].daddr + ro_size,
+ vd->rwdata_sgt[i],
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ dev_err(gxp->dev, "map firmware RW for core %d failed",
+ i);
+ iommu_unmap(domain, gxp->fwbufs[i].daddr, ro_size);
+ goto err_unmap;
+ }
+ }
+ return 0;
+
+err_unmap:
+ while (i--) {
+ iommu_unmap(domain, gxp->fwbufs[i].daddr, ro_size);
+ gxp_dma_unmap_iova_sgt(gxp, vd->domain,
+ gxp->fwbufs[i].daddr + ro_size,
+ vd->rwdata_sgt[i]);
+ }
+err_free_sgt:
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (vd->rwdata_sgt[i])
+ gcip_free_noncontiguous(vd->rwdata_sgt[i]);
+ vd->rwdata_sgt[i] = NULL;
+ }
+ return ret;
+}
+
+static void unmap_and_free_fw_image(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
+{
+ size_t ro_size = vd->fw_ro_size;
+ struct iommu_domain *domain = vd->domain->domain;
+ int i;
+
+ if (ro_size == gxp->fwbufs[0].size) {
+ iommu_unmap(domain, gxp->fwbufs[0].daddr,
+ ro_size * GXP_NUM_CORES);
+ return;
+ }
+
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ iommu_unmap(domain, gxp->fwbufs[i].daddr, ro_size);
+ gxp_dma_unmap_iova_sgt(gxp, vd->domain,
+ gxp->fwbufs[i].daddr + ro_size,
+ vd->rwdata_sgt[i]);
+ }
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (vd->rwdata_sgt[i])
+ gcip_free_noncontiguous(vd->rwdata_sgt[i]);
+ vd->rwdata_sgt[i] = NULL;
+ }
}
static int map_core_telemetry_buffers(struct gxp_dev *gxp,
@@ -158,6 +566,11 @@ static int assign_cores(struct gxp_virtual_device *vd)
uint core;
uint available_cores = 0;
+ if (!gxp_core_boot) {
+ /* We don't do core assignment when cores are managed by MCU. */
+ vd->core_list = BIT(GXP_NUM_CORES) - 1;
+ return 0;
+ }
vd->core_list = 0;
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp->core_to_vd[core] == NULL) {
@@ -183,17 +596,69 @@ static void unassign_cores(struct gxp_virtual_device *vd)
struct gxp_dev *gxp = vd->gxp;
uint core;
+ if (!gxp_core_boot)
+ return;
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp->core_to_vd[core] == vd)
gxp->core_to_vd[core] = NULL;
}
}
+/* Saves the state of this VD's doorbells and clears them. */
+static void vd_save_doorbells(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ uint base_doorbell;
+ uint i;
+
+ if (!gxp_fw_data_use_per_vd_config(vd))
+ return;
+ base_doorbell = GXP_DOORBELLS_START +
+ gxp_vd_hw_slot_id(vd) * GXP_NUM_DOORBELLS_PER_VD;
+ for (i = 0; i < ARRAY_SIZE(vd->doorbells_state); i++) {
+ vd->doorbells_state[i] =
+ gxp_doorbell_status(gxp, base_doorbell + i);
+ gxp_doorbell_clear(gxp, base_doorbell + i);
+ }
+}
+
+/* Restores the state of this VD's doorbells. */
+static void vd_restore_doorbells(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ uint base_doorbell;
+ uint i;
+
+ if (!gxp_fw_data_use_per_vd_config(vd))
+ return;
+ base_doorbell = GXP_DOORBELLS_START +
+ gxp_vd_hw_slot_id(vd) * GXP_NUM_DOORBELLS_PER_VD;
+ for (i = 0; i < ARRAY_SIZE(vd->doorbells_state); i++)
+ if (vd->doorbells_state[i])
+ gxp_doorbell_set(gxp, base_doorbell + i);
+ else
+ gxp_doorbell_clear(gxp, base_doorbell + i);
+}
+
+static void set_config_version(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
+{
+ if (gxp->firmware_mgr && vd->sys_cfg.daddr)
+ vd->config_version = gxp->firmware_mgr->img_cfg.config_version;
+ /*
+ * Let gxp_dma_map_core_resources() map this region only when using the
+ * legacy protocol.
+ *
+ * TODO(b/265748027): remove this
+ */
+ if (gxp_fw_data_use_per_vd_config(vd))
+ gxp->fwdatabuf.daddr = 0;
+}
+
struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
u16 requested_cores)
{
struct gxp_virtual_device *vd;
- unsigned int size;
int i;
int err;
@@ -216,6 +681,7 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
refcount_set(&vd->refcount, 1);
vd->credit = GXP_COMMAND_CREDIT_PER_VD;
vd->first_open = true;
+ vd->vdid = atomic_inc_return(&gxp->next_vdid);
vd->domain = gxp_domain_pool_alloc(gxp->domain_pool);
if (!vd->domain) {
@@ -223,30 +689,18 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
goto error_free_vd;
}
- if (gxp->num_shared_slices) {
- vd->slice_index =
- ida_alloc_max(&gxp->shared_slice_idp,
- gxp->num_shared_slices - 1, GFP_KERNEL);
- if (vd->slice_index < 0) {
- err = vd->slice_index;
- goto error_free_domain;
- }
- }
-
- size = GXP_NUM_CORES * PRIVATE_FW_DATA_SIZE;
- vd->fwdata_sgt = gcip_alloc_noncontiguous(gxp->dev, size, GFP_KERNEL);
- if (!vd->fwdata_sgt) {
- dev_err(gxp->dev, "allocate firmware data size=%x failed",
- size);
- err = -ENOMEM;
- goto error_free_slice_index;
+ vd->slice_index = ida_alloc_max(&gxp->shared_slice_idp,
+ GXP_NUM_SHARED_SLICES - 1, GFP_KERNEL);
+ if (vd->slice_index < 0) {
+ err = vd->slice_index;
+ goto error_free_domain;
}
vd->mailbox_resp_queues = kcalloc(
vd->num_cores, sizeof(*vd->mailbox_resp_queues), GFP_KERNEL);
if (!vd->mailbox_resp_queues) {
err = -ENOMEM;
- goto error_free_fwdata;
+ goto error_free_slice_index;
}
for (i = 0; i < vd->num_cores; i++) {
@@ -263,19 +717,29 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
if (err)
goto error_free_resp_queues;
+ /*
+ * Here assumes firmware is requested before allocating a VD, which is
+ * true because we request firmware on first GXP device open.
+ */
+ err = map_fw_image_config(gxp, vd, gxp->firmware_mgr);
+ if (err)
+ goto error_unassign_cores;
+
+ set_config_version(gxp, vd);
if (gxp->data_mgr) {
- vd->fw_app = gxp_fw_data_create_app(gxp, vd->core_list);
+ /* After map_fw_image_config because it needs vd->sys_cfg. */
+ vd->fw_app = gxp_fw_data_create_app(gxp, vd);
if (IS_ERR(vd->fw_app)) {
err = PTR_ERR(vd->fw_app);
- goto error_unassign_cores;
+ vd->fw_app = NULL;
+ goto error_unmap_imgcfg;
}
}
err = gxp_dma_map_core_resources(gxp, vd->domain, vd->core_list,
vd->slice_index);
if (err)
goto error_destroy_fw_data;
- err = gxp_dma_map_iova_sgt(gxp, vd->domain, GXP_IOVA_PRIV_FW_DATA,
- vd->fwdata_sgt, IOMMU_READ | IOMMU_WRITE);
+ err = alloc_and_map_fw_image(gxp, vd);
if (err)
goto error_unmap_core_resources;
err = map_core_telemetry_buffers(gxp, vd, vd->core_list);
@@ -284,25 +748,23 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
err = map_debug_dump_buffer(gxp, vd);
if (err)
goto error_unmap_core_telemetry_buffer;
- vd->vdid = atomic_inc_return(&gxp->next_vdid);
return vd;
error_unmap_core_telemetry_buffer:
unmap_core_telemetry_buffers(gxp, vd, vd->core_list);
error_unmap_fw_data:
- gxp_dma_unmap_iova_sgt(gxp, vd->domain, GXP_IOVA_PRIV_FW_DATA,
- vd->fwdata_sgt);
+ unmap_and_free_fw_image(gxp, vd);
error_unmap_core_resources:
gxp_dma_unmap_core_resources(gxp, vd->domain, vd->core_list);
error_destroy_fw_data:
gxp_fw_data_destroy_app(gxp, vd->fw_app);
+error_unmap_imgcfg:
+ unmap_fw_image_config(gxp, vd);
error_unassign_cores:
unassign_cores(vd);
error_free_resp_queues:
kfree(vd->mailbox_resp_queues);
-error_free_fwdata:
- gcip_free_noncontiguous(vd->fwdata_sgt);
error_free_slice_index:
if (vd->slice_index >= 0)
ida_free(&gxp->shared_slice_idp, vd->slice_index);
@@ -329,17 +791,13 @@ void gxp_vd_release(struct gxp_virtual_device *vd)
mutex_unlock(&gxp->secure_vd_lock);
}
- unassign_cores(vd);
unmap_debug_dump_buffer(gxp, vd);
unmap_core_telemetry_buffers(gxp, vd, core_list);
- gxp_dma_unmap_iova_sgt(gxp, vd->domain, GXP_IOVA_PRIV_FW_DATA,
- vd->fwdata_sgt);
+ unmap_and_free_fw_image(gxp, vd);
gxp_dma_unmap_core_resources(gxp, vd->domain, core_list);
-
- if (!IS_ERR_OR_NULL(vd->fw_app)) {
- gxp_fw_data_destroy_app(gxp, vd->fw_app);
- vd->fw_app = NULL;
- }
+ gxp_fw_data_destroy_app(gxp, vd->fw_app);
+ unmap_fw_image_config(gxp, vd);
+ unassign_cores(vd);
vd->gxp->mailbox_mgr->release_unconsumed_async_resps(vd);
@@ -357,7 +815,6 @@ void gxp_vd_release(struct gxp_virtual_device *vd)
up_write(&vd->mappings_semaphore);
kfree(vd->mailbox_resp_queues);
- gcip_free_noncontiguous(vd->fwdata_sgt);
if (vd->slice_index >= 0)
ida_free(&vd->gxp->shared_slice_idp, vd->slice_index);
gxp_domain_pool_free(vd->gxp->domain_pool, vd->domain);
@@ -428,6 +885,8 @@ int gxp_vd_run(struct gxp_virtual_device *vd)
if (ret)
goto err_vd_unavailable;
}
+ /* Clear all doorbells */
+ vd_restore_doorbells(vd);
ret = gxp_firmware_run(gxp, vd, vd->core_list);
if (ret)
goto err_vd_block_unready;
@@ -463,7 +922,8 @@ err_vd_unavailable:
void gxp_vd_stop(struct gxp_virtual_device *vd)
{
struct gxp_dev *gxp = vd->gxp;
- uint core;
+ uint phys_core;
+ uint core_list = vd->core_list;
uint lpm_state;
lockdep_assert_held(&gxp->vd_semaphore);
@@ -473,23 +933,55 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
/*
* Put all cores in the VD into reset so they can not wake each other up
*/
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ if (core_list & BIT(phys_core)) {
lpm_state = gxp_lpm_get_state(
- gxp, CORE_TO_PSM(core));
+ gxp, CORE_TO_PSM(phys_core));
if (lpm_state != LPM_PG_STATE)
- hold_core_in_reset(gxp, core);
+ hold_core_in_reset(gxp, phys_core);
}
}
}
- gxp_firmware_stop(gxp, vd, vd->core_list);
+ gxp_firmware_stop(gxp, vd, core_list);
if (vd->state == GXP_VD_READY || vd->state == GXP_VD_RUNNING ||
vd->state == GXP_VD_UNAVAILABLE)
gxp_dma_domain_detach_device(gxp, vd->domain);
vd->state = GXP_VD_OFF;
}
+static inline uint select_core(struct gxp_virtual_device *vd, uint virt_core,
+ uint phys_core)
+{
+ return gxp_fw_data_use_per_vd_config(vd) ? virt_core : phys_core;
+}
+
+static bool boot_state_is_suspend(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ u32 *boot_state)
+{
+ if (gxp_fw_data_use_per_vd_config(vd)) {
+ *boot_state = gxp_firmware_get_boot_status(gxp, vd, core);
+ return *boot_state == GXP_BOOT_STATUS_SUSPENDED;
+ }
+
+ *boot_state = gxp_firmware_get_boot_mode(gxp, vd, core);
+ return *boot_state == GXP_BOOT_MODE_STATUS_SUSPEND_COMPLETED;
+}
+
+static bool boot_state_is_active(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ u32 *boot_state)
+{
+ if (gxp_fw_data_use_per_vd_config(vd)) {
+ *boot_state = gxp_firmware_get_boot_status(gxp, vd, core);
+ return *boot_state == GXP_BOOT_STATUS_ACTIVE;
+ }
+
+ *boot_state = gxp_firmware_get_boot_mode(gxp, vd, core);
+ return *boot_state == GXP_BOOT_MODE_STATUS_RESUME_COMPLETED;
+}
+
/*
* Caller must have locked `gxp->vd_semaphore` for writing.
*
@@ -500,8 +992,9 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
*/
void gxp_vd_suspend(struct gxp_virtual_device *vd)
{
- uint core;
+ uint virt_core, phys_core;
struct gxp_dev *gxp = vd->gxp;
+ uint core_list = vd->core_list;
u32 boot_state;
uint failed_cores = 0;
@@ -521,63 +1014,75 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
* Start the suspend process for all of this VD's cores without waiting
* for completion.
*/
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
- if (!gxp_lpm_wait_state_ne(gxp, CORE_TO_PSM(core),
- LPM_ACTIVE_STATE)) {
- vd->state = GXP_VD_UNAVAILABLE;
- failed_cores |= BIT(core);
- hold_core_in_reset(gxp, core);
- dev_err(gxp->dev,
- "Core %u stuck at LPM_ACTIVE_STATE",
- core);
- continue;
- }
- /* Mark the boot mode as a suspend event */
+ virt_core = 0;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ uint core = select_core(vd, virt_core, phys_core);
+
+ if (!(core_list & BIT(phys_core)))
+ continue;
+ if (!gxp_lpm_wait_state_ne(gxp, CORE_TO_PSM(phys_core),
+ LPM_ACTIVE_STATE)) {
+ vd->state = GXP_VD_UNAVAILABLE;
+ failed_cores |= BIT(phys_core);
+ hold_core_in_reset(gxp, phys_core);
+ dev_err(gxp->dev, "Core %u stuck at LPM_ACTIVE_STATE",
+ phys_core);
+ continue;
+ }
+ /* Mark the boot mode as a suspend event */
+ if (gxp_fw_data_use_per_vd_config(vd)) {
+ gxp_firmware_set_boot_status(gxp, vd, core,
+ GXP_BOOT_STATUS_NONE);
+ gxp_firmware_set_boot_mode(gxp, vd, core,
+ GXP_BOOT_MODE_SUSPEND);
+ } else {
gxp_firmware_set_boot_mode(
- gxp, core, GXP_BOOT_MODE_REQUEST_SUSPEND);
- /*
- * Request a suspend event by sending a mailbox
- * notification.
- */
- gxp_notification_send(gxp, core,
- CORE_NOTIF_SUSPEND_REQUEST);
+ gxp, vd, core, GXP_BOOT_MODE_REQUEST_SUSPEND);
}
+ /*
+ * Request a suspend event by sending a mailbox
+ * notification.
+ */
+ gxp_notification_send(gxp, phys_core,
+ CORE_NOTIF_SUSPEND_REQUEST);
+ virt_core++;
}
/* Wait for all cores to complete core suspension. */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
- if (!(failed_cores & BIT(core))) {
- if (!gxp_lpm_wait_state_eq(gxp,
- CORE_TO_PSM(core),
- LPM_PG_STATE)) {
- boot_state = gxp_firmware_get_boot_mode(
- gxp, core);
- if (boot_state !=
- GXP_BOOT_MODE_STATUS_SUSPEND_COMPLETED) {
- dev_err(gxp->dev,
- "Suspension request on core %u failed (status: %u)",
- core, boot_state);
- vd->state = GXP_VD_UNAVAILABLE;
- failed_cores |= BIT(core);
- hold_core_in_reset(gxp, core);
- }
- } else {
- /* Re-set PS1 as the default low power state. */
- gxp_lpm_enable_state(gxp,
- CORE_TO_PSM(core),
- LPM_CG_STATE);
- }
+ virt_core = 0;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ uint core = select_core(vd, virt_core, phys_core);
+
+ if (!(core_list & BIT(phys_core)))
+ continue;
+ virt_core++;
+ if (failed_cores & BIT(phys_core))
+ continue;
+ if (!gxp_lpm_wait_state_eq(gxp, CORE_TO_PSM(phys_core),
+ LPM_PG_STATE)) {
+ if (!boot_state_is_suspend(gxp, vd, core,
+ &boot_state)) {
+ dev_err(gxp->dev,
+ "Suspension request on core %u failed (status: %u)",
+ phys_core, boot_state);
+ vd->state = GXP_VD_UNAVAILABLE;
+ failed_cores |= BIT(phys_core);
+ hold_core_in_reset(gxp, phys_core);
}
+ } else {
+ /* Re-set PS1 as the default low power state. */
+ gxp_lpm_enable_state(gxp, CORE_TO_PSM(phys_core),
+ LPM_CG_STATE);
}
}
gxp_dma_domain_detach_device(gxp, vd->domain);
if (vd->state == GXP_VD_UNAVAILABLE) {
/* shutdown all cores if virtual device is unavailable */
- for (core = 0; core < GXP_NUM_CORES; core++)
- if (gxp->core_to_vd[core] == vd)
- gxp_pm_core_off(gxp, core);
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++)
+ if (core_list & BIT(phys_core))
+ gxp_pm_core_off(gxp, phys_core);
} else {
+ /* Save and clear all doorbells. */
+ vd_save_doorbells(vd);
vd->blk_switch_count_when_suspended =
gxp_pm_get_blk_switch_count(gxp);
vd->state = GXP_VD_SUSPENDED;
@@ -591,8 +1096,8 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
int gxp_vd_resume(struct gxp_virtual_device *vd)
{
int ret = 0;
- uint core;
- uint core_list = 0;
+ uint phys_core, virt_core;
+ uint core_list = vd->core_list;
uint timeout;
u32 boot_state;
struct gxp_dev *gxp = vd->gxp;
@@ -609,79 +1114,90 @@ int gxp_vd_resume(struct gxp_virtual_device *vd)
gxp_pm_force_clkmux_normal(gxp);
curr_blk_switch_count = gxp_pm_get_blk_switch_count(gxp);
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd)
- core_list |= BIT(core);
- }
+ /* Restore the doorbells state for this VD. */
+ vd_restore_doorbells(vd);
+
gxp_dma_domain_attach_device(gxp, vd->domain, core_list);
/*
* Start the resume process for all of this VD's cores without waiting
* for completion.
*/
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (BIT(core) & core_list) {
- /*
- * The comparison is to check if blk_switch_count is
- * changed. If it's changed, it means the block is rebooted and
- * therefore we need to set up the hardware again.
- */
- if (vd->blk_switch_count_when_suspended !=
- curr_blk_switch_count) {
- ret = gxp_firmware_setup_hw_after_block_off(
- gxp, core, /*verbose=*/false);
- if (ret) {
- vd->state = GXP_VD_UNAVAILABLE;
- failed_cores |= BIT(core);
- dev_err(gxp->dev,
- "Failed to power up core %u\n",
- core);
- continue;
- }
+ virt_core = 0;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ uint core = select_core(vd, virt_core, phys_core);
+
+ if (!(core_list & BIT(phys_core)))
+ continue;
+ /*
+ * The comparison is to check if blk_switch_count is
+ * changed. If it's changed, it means the block is rebooted and
+ * therefore we need to set up the hardware again.
+ */
+ if (vd->blk_switch_count_when_suspended !=
+ curr_blk_switch_count) {
+ ret = gxp_firmware_setup_hw_after_block_off(
+ gxp, core, phys_core,
+ /*verbose=*/false);
+ if (ret) {
+ vd->state = GXP_VD_UNAVAILABLE;
+ failed_cores |= BIT(phys_core);
+ dev_err(gxp->dev,
+ "Failed to power up core %u\n",
+ phys_core);
+ continue;
}
- /* Mark this as a resume power-up event. */
+ }
+ /* Mark this as a resume power-up event. */
+ if (gxp_fw_data_use_per_vd_config(vd)) {
+ gxp_firmware_set_boot_status(gxp, vd, core,
+ GXP_BOOT_STATUS_NONE);
+ gxp_firmware_set_boot_mode(gxp, vd, core,
+ GXP_BOOT_MODE_RESUME);
+ } else {
gxp_firmware_set_boot_mode(
- gxp, core, GXP_BOOT_MODE_REQUEST_RESUME);
- /*
- * Power on the core by explicitly switching its PSM to
- * PS0 (LPM_ACTIVE_STATE).
- */
- gxp_lpm_set_state(gxp, CORE_TO_PSM(core),
- LPM_ACTIVE_STATE,
- /*verbose=*/false);
+ gxp, vd, core, GXP_BOOT_MODE_REQUEST_RESUME);
}
+ /*
+ * Power on the core by explicitly switching its PSM to
+ * PS0 (LPM_ACTIVE_STATE).
+ */
+ gxp_lpm_set_state(gxp, CORE_TO_PSM(phys_core), LPM_ACTIVE_STATE,
+ /*verbose=*/false);
+ virt_core++;
}
/* Wait for all cores to complete core resumption. */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (BIT(core) & core_list) {
- if (!(failed_cores & BIT(core))) {
- /* in microseconds */
- timeout = 1000000;
- while (--timeout) {
- boot_state = gxp_firmware_get_boot_mode(
- gxp, core);
- if (boot_state ==
- GXP_BOOT_MODE_STATUS_RESUME_COMPLETED)
- break;
- udelay(1 * GXP_TIME_DELAY_FACTOR);
- }
- if (timeout == 0 &&
- boot_state !=
- GXP_BOOT_MODE_STATUS_RESUME_COMPLETED) {
- dev_err(gxp->dev,
- "Resume request on core %u failed (status: %u)",
- core, boot_state);
- ret = -EBUSY;
- vd->state = GXP_VD_UNAVAILABLE;
- failed_cores |= BIT(core);
- }
+ virt_core = 0;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ uint core = select_core(vd, virt_core, phys_core);
+
+ if (!(core_list & BIT(phys_core)))
+ continue;
+
+ if (!(failed_cores & BIT(phys_core))) {
+ /* in microseconds */
+ timeout = 1000000;
+ while (--timeout) {
+ if (boot_state_is_active(gxp, vd, core,
+ &boot_state))
+ break;
+ udelay(1 * GXP_TIME_DELAY_FACTOR);
+ }
+ if (timeout == 0) {
+ dev_err(gxp->dev,
+ "Resume request on core %u failed (status: %u)",
+ phys_core, boot_state);
+ ret = -EBUSY;
+ vd->state = GXP_VD_UNAVAILABLE;
+ failed_cores |= BIT(phys_core);
}
}
+ virt_core++;
}
if (vd->state == GXP_VD_UNAVAILABLE) {
/* shutdown all cores if virtual device is unavailable */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (BIT(core) & core_list)
- gxp_pm_core_off(gxp, core);
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ if (core_list & BIT(phys_core))
+ gxp_pm_core_off(gxp, phys_core);
}
gxp_dma_domain_detach_device(gxp, vd->domain);
} else {
@@ -699,11 +1215,9 @@ int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core)
uint virt_core_index = 0;
for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
- if (gxp->core_to_vd[phys_core] == vd) {
- if (virt_core_index == virt_core) {
- /* Found virtual core */
+ if (vd->core_list & BIT(phys_core)) {
+ if (virt_core_index == virt_core)
return phys_core;
- }
virt_core_index++;
}
@@ -713,20 +1227,6 @@ int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core)
return -EINVAL;
}
-uint gxp_vd_phys_core_list(struct gxp_virtual_device *vd)
-{
- uint core_list = 0;
- int core;
-
- lockdep_assert_held(&vd->gxp->vd_semaphore);
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (vd->gxp->core_to_vd[core] == vd)
- core_list |= BIT(core);
- }
-
- return core_list;
-}
-
int gxp_vd_mapping_store(struct gxp_virtual_device *vd, struct gxp_mapping *map)
{
struct rb_node **link;
@@ -901,3 +1401,60 @@ void gxp_vd_put(struct gxp_virtual_device *vd)
if (refcount_dec_and_test(&vd->refcount))
kfree(vd);
}
+
+void gxp_vd_invalidate(struct gxp_dev *gxp, int client_id)
+{
+ struct gxp_client *client = NULL, *c;
+ release_unconsumed_async_resps_t release_unconsumed_async_resps =
+ gxp->mailbox_mgr->release_unconsumed_async_resps;
+
+ /*
+ * Prevent @gxp->client_list is being changed while handling the crash.
+ * The user cannot open or close an FD until this function releases the lock.
+ */
+ mutex_lock(&gxp->client_list_lock);
+
+ /*
+ * Find corresponding vd with client_id.
+ * If it holds a block wakelock, we should discard all pending/unconsumed UCI responses
+ * and change the state of the vd to GXP_VD_UNAVAILABLE.
+ */
+ list_for_each_entry (c, &gxp->client_list, list_entry) {
+ down_write(&c->semaphore);
+ down_write(&gxp->vd_semaphore);
+ if (c->vd && c->vd->client_id == client_id) {
+ client = c;
+ break;
+ }
+ up_write(&gxp->vd_semaphore);
+ up_write(&c->semaphore);
+ }
+
+ mutex_unlock(&gxp->client_list_lock);
+
+ if (!client) {
+ dev_err(gxp->dev, "Failed to find a VD, client_id=%d",
+ client_id);
+ return;
+ }
+
+ dev_err(gxp->dev, "Invalidate a VD, VDID=%d, client_id=%d",
+ client->vd->vdid, client_id);
+
+ if (client->vd->state != GXP_VD_UNAVAILABLE) {
+ if (client->has_block_wakelock) {
+ if (release_unconsumed_async_resps)
+ release_unconsumed_async_resps(client->vd);
+ gxp_vd_block_unready(client->vd);
+ }
+
+ client->vd->state = GXP_VD_UNAVAILABLE;
+ if (client->vd_invalid_eventfd)
+ gxp_eventfd_signal(client->vd_invalid_eventfd);
+ } else {
+ dev_dbg(gxp->dev, "This VD is already invalidated");
+ }
+
+ up_write(&gxp->vd_semaphore);
+ up_write(&client->semaphore);
+}
diff --git a/gxp-vd.h b/gxp-vd.h
index d9c28c9..fb6cace 100644
--- a/gxp-vd.h
+++ b/gxp-vd.h
@@ -18,6 +18,9 @@
#include <linux/types.h>
#include <linux/wait.h>
+#include <gcip/gcip-image-config.h>
+
+#include "gxp-host-device-structs.h"
#include "gxp-internal.h"
#include "gxp-mapping.h"
@@ -65,6 +68,8 @@ struct gxp_virtual_device {
struct mailbox_resp_queue *mailbox_resp_queues;
struct rb_root mappings_root;
struct rw_semaphore mappings_semaphore;
+ /* Used to save doorbell state on VD resume. */
+ uint doorbells_state[GXP_NUM_DOORBELLS_PER_VD];
enum gxp_virtual_device_state state;
/*
* Record the gxp->power_mgr->blk_switch_count when the vd was
@@ -79,9 +84,24 @@ struct gxp_virtual_device {
*/
int slice_index;
/*
- * The SG table that holds the firmware data region.
+ * The SG table that holds the firmware RW data region.
+ */
+ struct sg_table *rwdata_sgt[GXP_NUM_CORES];
+ /*
+ * The SG table that holds the regions specified in the image config's
+ * non-secure IOMMU mappings.
+ */
+ struct {
+ dma_addr_t daddr;
+ struct sg_table *sgt;
+ } ns_regions[GCIP_IMG_CFG_MAX_NS_IOMMU_MAPPINGS];
+ /* The firmware size specified in image config. */
+ u32 fw_ro_size;
+ /*
+ * The config regions specified in image config.
+ * core_cfg's size should be a multiple of GXP_NUM_CORES.
*/
- struct sg_table *fwdata_sgt;
+ struct gxp_mapped_resource core_cfg, vd_cfg, sys_cfg;
uint core_list;
/*
* The ID of DSP client. -1 if it is not allocated.
@@ -116,6 +136,9 @@ struct gxp_virtual_device {
refcount_t refcount;
/* A constant ID assigned after VD is allocated. For debug only. */
int vdid;
+ struct gcip_image_config_parser cfg_parser;
+ /* The config version specified in firmware's image config. */
+ u32 config_version;
};
/*
@@ -198,13 +221,6 @@ void gxp_vd_stop(struct gxp_virtual_device *vd);
*/
int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core);
-/*
- * Acquires the physical core IDs assigned to the virtual device.
- *
- * The caller must have locked gxp->vd_semaphore for reading.
- */
-uint gxp_vd_phys_core_list(struct gxp_virtual_device *vd);
-
/**
* gxp_vd_mapping_store() - Store a mapping in a virtual device's records
* @vd: The virtual device @map was created for and will be stored in
@@ -353,4 +369,23 @@ gxp_vd_get(struct gxp_virtual_device *vd)
*/
void gxp_vd_put(struct gxp_virtual_device *vd);
+/*
+ * Change the status of the vd of @client_id to GXP_VD_UNAVAILABLE.
+ * Internally, it will discard all pending/unconsumed user commands and call the
+ * `gxp_vd_block_unready` function.
+ *
+ * This function will be called when the `CLIENT_FATAL_ERROR_NOTIFY` RKCI has been sent from the
+ * firmware side.
+ */
+void gxp_vd_invalidate(struct gxp_dev *gxp, int client_id);
+
+/*
+ * An ID between 0~GXP_NUM_CORES-1 and is unique to each VD.
+ * Only used in direct mode.
+ */
+static inline uint gxp_vd_hw_slot_id(struct gxp_virtual_device *vd)
+{
+ return ffs(vd->core_list) - 1;
+}
+
#endif /* __GXP_VD_H__ */
diff --git a/gxp.h b/gxp.h
index 707785d..676b0b3 100644
--- a/gxp.h
+++ b/gxp.h
@@ -997,4 +997,23 @@ struct gxp_sync_fence_status {
#define GXP_SYNC_FENCE_STATUS \
_IOWR(GXP_IOCTL_BASE, 34, struct gxp_sync_fence_status)
+/*
+ * struct gxp_register_invalidated_eventfd_ioctl
+ * @eventfd: File-descriptor obtained via eventfd().
+ * Not used during the unregister step.
+ */
+struct gxp_register_invalidated_eventfd_ioctl {
+ __u32 eventfd;
+};
+
+/*
+ * Registers an eventfd which will be triggered when the device crashes and
+ * the virtual device of the client is invalidated.
+ */
+#define GXP_REGISTER_INVALIDATED_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 35, struct gxp_register_invalidated_eventfd_ioctl)
+
+#define GXP_UNREGISTER_INVALIDATED_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 36, struct gxp_register_invalidated_eventfd_ioctl)
+
#endif /* __GXP_H__ */