summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Salyzyn <salyzyn@google.com>2020-12-23 05:05:54 -0800
committerMark Salyzyn <salyzyn@google.com>2020-12-23 05:05:54 -0800
commit6f47fab2a7f1a1620dd6f97e0d63f7056e790003 (patch)
tree861eb06a4d9f08e4b5068197d286d2c64477cbf5
parentd33f6888e244c4ed16c9225122f0f2420d2a15c3 (diff)
parentc31b9b8adffe9f2f96bdabedf61e596a7e41193e (diff)
downloadabrolhos-6f47fab2a7f1a1620dd6f97e0d63f7056e790003.tar.gz
Merge partner/android-gs-pixel-mainline into partner/android-gs-pixel-5.10-stabilization
* partner/android-gs-pixel-mainline: Merge branch 'whitechapel' into android-gs-pixel-mainline Signed-off-by: Mark Salyzyn <salyzyn@google.com> Change-Id: I1ea9c71f18be68933c3c6222da125f9a07a0c022
-rw-r--r--drivers/edgetpu/abrolhos-device.c24
-rw-r--r--drivers/edgetpu/abrolhos-firmware.c22
-rw-r--r--drivers/edgetpu/abrolhos-iommu.c17
-rw-r--r--drivers/edgetpu/abrolhos-thermal.c7
-rw-r--r--drivers/edgetpu/edgetpu-device-group.c172
-rw-r--r--drivers/edgetpu/edgetpu-device-group.h75
-rw-r--r--drivers/edgetpu/edgetpu-dmabuf.c38
-rw-r--r--drivers/edgetpu/edgetpu-fs.c50
-rw-r--r--drivers/edgetpu/edgetpu-internal.h43
-rw-r--r--drivers/edgetpu/edgetpu-iremap-pool.c20
-rw-r--r--drivers/edgetpu/edgetpu-mailbox.c94
-rw-r--r--drivers/edgetpu/edgetpu-mailbox.h21
-rw-r--r--drivers/edgetpu/edgetpu-mapping.h13
-rw-r--r--drivers/edgetpu/edgetpu-mmu.h10
-rw-r--r--drivers/edgetpu/edgetpu-pm.h2
-rw-r--r--drivers/edgetpu/edgetpu-telemetry.c247
-rw-r--r--drivers/edgetpu/edgetpu-telemetry.h8
-rw-r--r--drivers/edgetpu/edgetpu.h5
18 files changed, 578 insertions, 290 deletions
diff --git a/drivers/edgetpu/abrolhos-device.c b/drivers/edgetpu/abrolhos-device.c
index 2df80ee..f6a0eaf 100644
--- a/drivers/edgetpu/abrolhos-device.c
+++ b/drivers/edgetpu/abrolhos-device.c
@@ -109,6 +109,30 @@ void edgetpu_mark_probe_fail(struct edgetpu_dev *etdev)
}
struct edgetpu_dumpregs_range edgetpu_chip_statusregs_ranges[] = {
+ {
+ .firstreg = EDGETPU_REG_USER_HIB_FIRST_ERROR_STATUS,
+ .lastreg = EDGETPU_REG_USER_HIB_FIRST_ERROR_STATUS,
+ },
+ {
+ .firstreg = EDGETPU_REG_SC_RUNSTATUS,
+ .lastreg = EDGETPU_REG_SC_RUNSTATUS,
+ },
+ {
+ .firstreg = EDGETPU_REG_USER_HIB_OUT_ACTVQ_INT_STAT,
+ .lastreg = EDGETPU_REG_USER_HIB_OUT_ACTVQ_INT_STAT,
+ },
+ {
+ .firstreg = EDGETPU_REG_USER_HIB_IN_ACTVQ_INT_STAT,
+ .lastreg = EDGETPU_REG_USER_HIB_IN_ACTVQ_INT_STAT,
+ },
+ {
+ .firstreg = EDGETPU_REG_USER_HIB_PARAMQ_INT_STAT,
+ .lastreg = EDGETPU_REG_USER_HIB_PARAMQ_INT_STAT,
+ },
+ {
+ .firstreg = EDGETPU_REG_USER_HIB_TOPLVL_INT_STAT,
+ .lastreg = EDGETPU_REG_USER_HIB_TOPLVL_INT_STAT,
+ },
};
int edgetpu_chip_statusregs_nranges =
ARRAY_SIZE(edgetpu_chip_statusregs_ranges);
diff --git a/drivers/edgetpu/abrolhos-firmware.c b/drivers/edgetpu/abrolhos-firmware.c
index 07e6792..18a7671 100644
--- a/drivers/edgetpu/abrolhos-firmware.c
+++ b/drivers/edgetpu/abrolhos-firmware.c
@@ -72,6 +72,7 @@ static int abrolhos_firmware_prepare_run(struct edgetpu_firmware *et_fw,
container_of(etdev, struct edgetpu_platform_dev, edgetpu_dev);
void *image_vaddr, *header_vaddr;
struct abrolhos_image_config *image_config;
+ phys_addr_t image_start, image_end, carveout_start, carveout_end;
dma_addr_t header_dma_addr;
int ret, tpu_state;
@@ -141,6 +142,27 @@ static int abrolhos_firmware_prepare_run(struct edgetpu_firmware *et_fw,
memcpy(&etdev->fw_version, &image_config->firmware_versions,
sizeof(etdev->fw_version));
+ /*
+ * GSA verifies the image config addresses and sizes are valid,
+ * so we don't perform overflow checks here.
+ */
+ image_start = (phys_addr_t)image_config->carveout_base;
+ image_end = (phys_addr_t)(image_config->firmware_base +
+ image_config->firmware_size - 1);
+ carveout_start = edgetpu_pdev->fw_region_paddr;
+ carveout_end = carveout_start + edgetpu_pdev->fw_region_size - 1;
+
+ /* Image must fit within the carveout */
+ if (image_start < carveout_start || image_end > carveout_end) {
+ etdev_err(etdev, "Firmware image doesn't fit in carveout\n");
+ etdev_err(etdev, "Image config: %pap - %pap\n", &image_start,
+ &image_end);
+ etdev_err(etdev, "Carveout: %pap - %pap\n", &carveout_start,
+ &carveout_end);
+ ret = -ERANGE;
+ goto out_free_gsa;
+ }
+
/* Reset KCI mailbox before starting f/w, don't process anything old.*/
edgetpu_mailbox_reset(etdev->kci->mailbox);
diff --git a/drivers/edgetpu/abrolhos-iommu.c b/drivers/edgetpu/abrolhos-iommu.c
index 38e6b3f..58ca89c 100644
--- a/drivers/edgetpu/abrolhos-iommu.c
+++ b/drivers/edgetpu/abrolhos-iommu.c
@@ -388,15 +388,18 @@ void edgetpu_mmu_unmap(struct edgetpu_dev *etdev, struct edgetpu_mapping *map,
iommu_get_domain_for_dev(etdev->dev);
ret = get_iommu_map_params(etdev, map, context_id, &params);
- if (ret)
- return;
- /*
- * If this is a per-context maping, it was mirrored in the per-context
- * domain. Undo that mapping first.
- */
- if (params.domain != default_domain)
+ if (!ret && params.domain != default_domain) {
+ /*
+ * If this is a per-context mapping, it was mirrored in the
+ * per-context domain. Undo that mapping first.
+ */
iommu_unmap(params.domain, map->device_address, params.size);
+ }
+ /*
+ * Always do dma_unmap since context_id might be invalid when group has
+ * mailbox detached.
+ */
/* Undo the mapping in the default domain */
dma_unmap_sg_attrs(etdev->dev, map->sgt.sgl, map->sgt.orig_nents,
edgetpu_host_dma_dir(map->dir), map->dma_attrs);
diff --git a/drivers/edgetpu/abrolhos-thermal.c b/drivers/edgetpu/abrolhos-thermal.c
index c46fccb..5317f3f 100644
--- a/drivers/edgetpu/abrolhos-thermal.c
+++ b/drivers/edgetpu/abrolhos-thermal.c
@@ -80,7 +80,6 @@ static int edgetpu_set_cur_state(struct thermal_cooling_device *cdev,
int ret;
struct edgetpu_thermal *cooling = cdev->devdata;
struct device *dev = cooling->dev;
- unsigned long pwr_state;
if (WARN_ON(state_original >= ARRAY_SIZE(state_mapping))) {
dev_err(dev, "%s: invalid cooling state %lu\n", __func__,
@@ -89,7 +88,6 @@ static int edgetpu_set_cur_state(struct thermal_cooling_device *cdev,
}
mutex_lock(&cooling->lock);
- pwr_state = state_mapping[state_original];
if (state_original != cooling->cooling_state) {
/*
* TODO (b/174799481):
@@ -106,9 +104,6 @@ static int edgetpu_set_cur_state(struct thermal_cooling_device *cdev,
return ret;
}
cooling->cooling_state = state_original;
- } else {
- mutex_unlock(&cooling->lock);
- return -EALREADY;
}
mutex_unlock(&cooling->lock);
@@ -188,7 +183,7 @@ static int edgetpu_power2state(struct thermal_cooling_device *cdev,
for (i = 0; i < ARRAY_SIZE(state_pwr_map); i++) {
if (power >= state_pwr_map[i].power) {
- *state = state_pwr_map[i].state;
+ *state = i;
return 0;
}
}
diff --git a/drivers/edgetpu/edgetpu-device-group.c b/drivers/edgetpu/edgetpu-device-group.c
index 8d263ca..08a97a2 100644
--- a/drivers/edgetpu/edgetpu-device-group.c
+++ b/drivers/edgetpu/edgetpu-device-group.c
@@ -89,16 +89,31 @@ static int edgetpu_kci_leave_group_worker(struct kci_worker_param *param)
#endif /* CONFIG_ABROLHOS */
-/*
- * Asynchronously sends LEAVE_GROUP KCI to all devices in @group.
- *
- * Caller holds group->lock.
- */
-static void edgetpu_device_group_kci_leave(struct edgetpu_device_group *group)
+static int edgetpu_group_kci_open_device(struct edgetpu_device_group *group)
{
-#if IS_ENABLED(CONFIG_ABROLHOS)
- u8 mailbox_id = group->vii.mailbox->mailbox_id;
- int ret = edgetpu_kci_close_device(group->etdev->kci, BIT(mailbox_id));
+ u8 mailbox_id = edgetpu_group_context_id_locked(group);
+ int ret = edgetpu_kci_open_device(group->etdev->kci, BIT(mailbox_id));
+
+ /*
+ * This should only happen when the FW hasn't driven this KCI, log once
+ * to prevent log storm.
+ */
+ if (ret)
+ etdev_warn_once(group->etdev, "Open device failed with %d",
+ ret);
+ atomic_inc(&group->etdev->job_count);
+ return 0;
+}
+
+static void edgetpu_group_kci_close_device(struct edgetpu_device_group *group)
+{
+ u8 mailbox_id;
+ int ret;
+
+ if (edgetpu_group_mailbox_detached_locked(group))
+ return;
+ mailbox_id = edgetpu_group_context_id_locked(group);
+ ret = edgetpu_kci_close_device(group->etdev->kci, BIT(mailbox_id));
/*
* This should only happen when the FW hasn't driven this KCI, log once
@@ -108,6 +123,17 @@ static void edgetpu_device_group_kci_leave(struct edgetpu_device_group *group)
etdev_warn_once(group->etdev, "Close device failed with %d",
ret);
return;
+}
+
+/*
+ * Asynchronously sends LEAVE_GROUP KCI to all devices in @group.
+ *
+ * Caller holds group->lock.
+ */
+static void edgetpu_device_group_kci_leave(struct edgetpu_device_group *group)
+{
+#if IS_ENABLED(CONFIG_ABROLHOS)
+ return edgetpu_group_kci_close_device(group);
#else /* !CONFIG_ABROLHOS */
struct kci_worker_param *params =
kmalloc_array(group->n_clients, sizeof(*params), GFP_KERNEL);
@@ -149,18 +175,7 @@ static int
edgetpu_device_group_kci_finalized(struct edgetpu_device_group *group)
{
#if IS_ENABLED(CONFIG_ABROLHOS)
- u8 mailbox_id = group->vii.mailbox->mailbox_id;
- int ret = edgetpu_kci_open_device(group->etdev->kci, BIT(mailbox_id));
-
- /*
- * This should only happen when the FW hasn't driven this KCI, log once
- * to prevent log storm.
- */
- if (ret)
- etdev_warn_once(group->etdev, "Open device failed with %d",
- ret);
- atomic_inc(&group->etdev->job_count);
- return 0;
+ return edgetpu_group_kci_open_device(group);
#else /* !CONFIG_ABROLHOS */
struct kci_worker_param *params =
kmalloc_array(group->n_clients, sizeof(*params), GFP_KERNEL);
@@ -354,8 +369,10 @@ static void edgetpu_device_group_release(struct edgetpu_device_group *group)
group_release_members(group);
}
edgetpu_mailbox_remove_vii(&group->vii);
- edgetpu_mmu_detach_domain(group->etdev, group->etdomain);
- edgetpu_mmu_free_domain(group->etdev, group->etdomain);
+ if (group->etdomain) {
+ edgetpu_mmu_detach_domain(group->etdev, group->etdomain);
+ edgetpu_mmu_free_domain(group->etdev, group->etdomain);
+ }
group->status = EDGETPU_DEVICE_GROUP_DISBANDED;
}
@@ -648,12 +665,12 @@ edgetpu_device_group_alloc(struct edgetpu_client *client,
etdomain = edgetpu_mmu_alloc_domain(group->etdev);
if (!etdomain) {
ret = -ENOMEM;
- goto error_put_group;
+ goto error_leave_group;
}
ret = edgetpu_mmu_attach_domain(group->etdev, etdomain);
if (ret) {
edgetpu_mmu_free_domain(group->etdev, etdomain);
- goto error_put_group;
+ goto error_leave_group;
}
group->etdomain = etdomain;
ret = edgetpu_mailbox_init_vii(&group->vii, group, attr);
@@ -661,15 +678,19 @@ edgetpu_device_group_alloc(struct edgetpu_client *client,
etdev_dbg(group->etdev, "%s: group %u init vii failed ret=%d",
__func__, group->workload_id, ret);
/* this also performs domain detach / free */
- edgetpu_device_group_leave_locked(client);
- goto error_put_group;
+ goto error_leave_group;
}
+ group->context_id = group->vii.mailbox->mailbox_id;
+ if (attr->priority & EDGETPU_PRIORITY_DETACHABLE)
+ group->mailbox_detachable = true;
group->mbox_attr = *attr;
mutex_unlock(&client->etdev->state_lock);
return group;
+error_leave_group:
+ edgetpu_device_group_leave_locked(client);
error_put_group:
edgetpu_device_group_put(group);
state_unlock:
@@ -714,7 +735,8 @@ int edgetpu_device_group_finalize(struct edgetpu_device_group *group)
if (edgetpu_device_group_is_finalized(group))
goto err_unlock;
- if (!edgetpu_device_group_is_waiting(group)) {
+ if (!edgetpu_device_group_is_waiting(group) ||
+ edgetpu_group_mailbox_detached_locked(group)) {
ret = -EINVAL;
goto err_unlock;
}
@@ -808,15 +830,16 @@ static int edgetpu_device_group_map_iova_sgt(struct edgetpu_device_group *group,
{
struct edgetpu_dev *etdev;
const struct edgetpu_mapping *map = &hmap->map;
- enum edgetpu_context_id context_id = edgetpu_group_context_id(group);
+ enum edgetpu_context_id ctx_id = edgetpu_group_context_id_locked(group);
uint i;
int ret;
for (i = 1; i < group->n_clients; i++) {
etdev = edgetpu_device_group_nth_etdev(group, i);
+ edgetpu_mmu_reserve(etdev, map->alloc_iova, map->alloc_size);
ret = edgetpu_mmu_map_iova_sgt(etdev, map->device_address,
&hmap->sg_tables[i], map->dir,
- context_id);
+ ctx_id);
if (ret)
goto rollback;
}
@@ -829,8 +852,9 @@ rollback:
etdev = edgetpu_device_group_nth_etdev(group, i);
edgetpu_mmu_unmap_iova_sgt_attrs(etdev, map->device_address,
&hmap->sg_tables[i], map->dir,
- context_id,
+ ctx_id,
DMA_ATTR_SKIP_CPU_SYNC);
+ edgetpu_mmu_free(etdev, map->alloc_iova, map->alloc_size);
}
return ret;
}
@@ -846,14 +870,15 @@ edgetpu_device_group_unmap_iova_sgt(struct edgetpu_device_group *group,
{
const struct edgetpu_mapping *map = &hmap->map;
struct edgetpu_dev *etdev;
- enum edgetpu_context_id context_id = edgetpu_group_context_id(group);
+ enum edgetpu_context_id ctx_id = edgetpu_group_context_id_locked(group);
uint i;
for (i = 1; i < group->n_clients; i++) {
etdev = edgetpu_device_group_nth_etdev(group, i);
edgetpu_mmu_unmap_iova_sgt_attrs(etdev, map->device_address,
&hmap->sg_tables[i], map->dir,
- context_id, map->dma_attrs);
+ ctx_id, map->dma_attrs);
+ edgetpu_mmu_free(etdev, map->alloc_iova, map->alloc_size);
}
}
@@ -866,7 +891,7 @@ edgetpu_device_group_unmap_iova_sgt(struct edgetpu_device_group *group,
static void edgetpu_unmap_node(struct edgetpu_mapping *map)
{
struct edgetpu_device_group *group = map->priv;
- enum edgetpu_context_id context_id = edgetpu_group_context_id(group);
+ enum edgetpu_context_id ctx_id = edgetpu_group_context_id_locked(group);
struct edgetpu_host_map *hmap =
container_of(map, struct edgetpu_host_map, map);
struct edgetpu_dev *etdev;
@@ -884,7 +909,7 @@ static void edgetpu_unmap_node(struct edgetpu_mapping *map)
etdev = edgetpu_device_group_nth_etdev(group,
map->die_index);
}
- edgetpu_mmu_unmap(etdev, map, context_id);
+ edgetpu_mmu_unmap(etdev, map, ctx_id);
}
for_each_sg_page(map->sgt.sgl, &sg_iter, map->sgt.orig_nents, 0) {
@@ -1155,7 +1180,7 @@ int edgetpu_device_group_map(struct edgetpu_device_group *group,
struct edgetpu_host_map *hmap;
struct edgetpu_mapping *map;
struct edgetpu_dev *etdev;
- enum edgetpu_context_id context_id = edgetpu_group_context_id(group);
+ enum edgetpu_context_id context_id;
const u32 mmu_flags = map_to_mmu_flags(flags) | EDGETPU_MMU_HOST;
/* Pin user pages before holding any lock. */
@@ -1164,7 +1189,8 @@ int edgetpu_device_group_map(struct edgetpu_device_group *group,
return PTR_ERR(pages);
mutex_lock(&group->lock);
- if (!edgetpu_device_group_is_finalized(group)) {
+ context_id = edgetpu_group_context_id_locked(group);
+ if (!edgetpu_group_finalized_and_attached(group)) {
ret = -EINVAL;
goto error_unlock_group;
}
@@ -1236,7 +1262,7 @@ int edgetpu_device_group_unmap(struct edgetpu_device_group *group,
int ret = 0;
mutex_lock(&group->lock);
- if (!edgetpu_device_group_is_finalized(group)) {
+ if (!edgetpu_group_finalized_and_attached(group)) {
ret = -EINVAL;
goto unlock_group;
}
@@ -1279,7 +1305,7 @@ int edgetpu_device_group_sync_buffer(struct edgetpu_device_group *group,
return -EINVAL;
mutex_lock(&group->lock);
- if (!edgetpu_device_group_is_finalized(group)) {
+ if (!edgetpu_group_finalized_and_attached(group)) {
ret = -EINVAL;
goto unlock_group;
}
@@ -1323,7 +1349,7 @@ void edgetpu_group_mappings_show(struct edgetpu_device_group *group,
seq_puts(s, ": disbanded\n");
return;
}
- seq_printf(s, " context %u:\n", edgetpu_group_context_id(group));
+ seq_printf(s, " context %d:\n", edgetpu_group_context_id_locked(group));
if (group->host_mappings.count) {
seq_puts(s, "host buffer mappings:\n");
@@ -1363,7 +1389,7 @@ int edgetpu_mmap_csr(struct edgetpu_device_group *group,
ulong phys_base, vma_size, map_size;
mutex_lock(&group->lock);
- if (!edgetpu_device_group_is_finalized(group)) {
+ if (!edgetpu_group_finalized_and_attached(group)) {
ret = -EINVAL;
goto out;
}
@@ -1390,6 +1416,10 @@ int edgetpu_mmap_queue(struct edgetpu_device_group *group,
edgetpu_queue_mem *queue_mem;
mutex_lock(&group->lock);
+ /*
+ * VII queues are available even when mailbox detached, no need to check
+ * whether mailbox attached here.
+ */
if (!edgetpu_device_group_is_finalized(group)) {
ret = -EINVAL;
goto out;
@@ -1426,3 +1456,63 @@ void edgetpu_fatal_error_notify(struct edgetpu_dev *etdev)
}
mutex_unlock(&etdev->groups_lock);
}
+
+void edgetpu_group_detach_mailbox(struct edgetpu_device_group *group)
+{
+ struct edgetpu_mailbox_manager *mgr = group->etdev->mailbox_manager;
+ struct edgetpu_mailbox *mailbox;
+
+ if (!group->mailbox_detachable)
+ return;
+ mutex_lock(&group->lock);
+ if (edgetpu_group_mailbox_detached_locked(group)) {
+ mutex_unlock(&group->lock);
+ return;
+ }
+ if (edgetpu_device_group_is_finalized(group))
+ edgetpu_group_kci_close_device(group);
+ mailbox = group->vii.mailbox;
+ group->vii.mailbox = NULL;
+ edgetpu_device_group_put(mailbox->internal.group);
+ edgetpu_mailbox_remove(mgr, mailbox);
+ edgetpu_mmu_detach_domain(group->etdev, group->etdomain);
+ group->context_id = EDGETPU_CONTEXT_INVALID;
+
+ mutex_unlock(&group->lock);
+}
+
+int edgetpu_group_attach_mailbox(struct edgetpu_device_group *group)
+{
+ struct edgetpu_mailbox_manager *mgr = group->etdev->mailbox_manager;
+ struct edgetpu_mailbox *mailbox;
+ uint ctx_id;
+ int ret = 0;
+
+ if (!group->mailbox_detachable)
+ return 0;
+ mutex_lock(&group->lock);
+ if (!edgetpu_group_mailbox_detached_locked(group))
+ goto out;
+ ret = edgetpu_mmu_attach_domain(group->etdev, group->etdomain);
+ if (ret)
+ goto out;
+ if (group->etdomain->pasid == IOMMU_PASID_INVALID)
+ mailbox = edgetpu_mailbox_vii_add(mgr, 0);
+ else
+ mailbox = edgetpu_mailbox_vii_add(mgr, group->etdomain->pasid);
+ if (IS_ERR(mailbox)) {
+ edgetpu_mmu_detach_domain(group->etdev, group->etdomain);
+ ret = PTR_ERR(mailbox);
+ goto out;
+ }
+ ctx_id = mailbox->mailbox_id;
+ group->vii.mailbox = mailbox;
+ mailbox->internal.group = edgetpu_device_group_get(group);
+ edgetpu_mailbox_reinit_vii(group);
+ group->context_id = ctx_id;
+ if (edgetpu_device_group_is_finalized(group))
+ edgetpu_group_kci_open_device(group);
+out:
+ mutex_unlock(&group->lock);
+ return ret;
+}
diff --git a/drivers/edgetpu/edgetpu-device-group.h b/drivers/edgetpu/edgetpu-device-group.h
index 9098226..3c68dd4 100644
--- a/drivers/edgetpu/edgetpu-device-group.h
+++ b/drivers/edgetpu/edgetpu-device-group.h
@@ -55,6 +55,18 @@ struct edgetpu_device_group {
*/
refcount_t ref_count;
uint workload_id;
+ struct edgetpu_dev *etdev; /* the device opened by the leader */
+ /*
+ * Whether edgetpu_group_detach_mailbox() has effects on this group.
+ * This field is configured according to the priority field when
+ * creating this group.
+ */
+ bool mailbox_detachable;
+
+ /* protects everything in the following comment block */
+ struct mutex lock;
+ /* fields protected by @lock */
+
/*
* List of clients belonging to this group.
* The first client is the leader.
@@ -69,14 +81,21 @@ struct edgetpu_device_group {
*/
struct edgetpu_client **members;
enum edgetpu_device_group_status status;
- struct edgetpu_dev *etdev; /* the device opened by the leader */
struct edgetpu_vii vii; /* VII mailbox */
+ /*
+ * Context ID ranges from EDGETPU_CONTEXT_VII_BASE to
+ * EDGETPU_NCONTEXTS - 1.
+ * This equals EDGETPU_CONTEXT_INVALID when the group has mailbox
+ * detached (means the group isn't in any context at this time).
+ */
+ enum edgetpu_context_id context_id;
/* The IOMMU domain being associated to this group */
struct edgetpu_iommu_domain *etdomain;
/* matrix of P2P mailboxes */
struct edgetpu_p2p_mailbox **p2p_mailbox_matrix;
- /* protects clients, n_clients, status, and vii */
- struct mutex lock;
+
+ /* end of fields protected by @lock */
+
/* TPU IOVA mapped to host DRAM space */
struct edgetpu_mapping_root host_mappings;
/* TPU IOVA mapped to buffers backed by dma-buf */
@@ -102,8 +121,8 @@ static inline bool edgetpu_device_group_is_waiting(
*
* Must be called with lock held.
*/
-static inline bool edgetpu_device_group_is_finalized(
- const struct edgetpu_device_group *group)
+static inline bool
+edgetpu_device_group_is_finalized(const struct edgetpu_device_group *group)
{
return group->status == EDGETPU_DEVICE_GROUP_FINALIZED;
}
@@ -239,11 +258,16 @@ int edgetpu_device_group_sync_buffer(struct edgetpu_device_group *group,
/* Clear all mappings for a device group. */
void edgetpu_mappings_clear_group(struct edgetpu_device_group *group);
-/* Return context ID for group MMU mappings, based on VII mailbox index. */
+/*
+ * Return context ID for group MMU mappings.
+ *
+ * Caller holds @group->lock to prevent race, the context ID may be changed by
+ * edgetpu_group_{detach/attach}_mailbox.
+ */
static inline enum edgetpu_context_id
-edgetpu_group_context_id(struct edgetpu_device_group *group)
+edgetpu_group_context_id_locked(struct edgetpu_device_group *group)
{
- return EDGETPU_CONTEXT_VII_BASE + group->vii.mailbox->mailbox_id - 1;
+ return group->context_id;
}
/* dump mappings in @group */
@@ -290,4 +314,39 @@ bool edgetpu_set_group_join_lockout(struct edgetpu_dev *etdev, bool lockout);
/* Notify all device groups of @etdev about a failure on the die */
void edgetpu_fatal_error_notify(struct edgetpu_dev *etdev);
+/*
+ * Detach and release the mailbox of VII from @group.
+ * Some group operations would be disabled when a group has no mailbox attached.
+ */
+void edgetpu_group_detach_mailbox(struct edgetpu_device_group *group);
+/*
+ * Request and attach a mailbox of VII to @group.
+ *
+ * Return 0 on success.
+ */
+int edgetpu_group_attach_mailbox(struct edgetpu_device_group *group);
+
+/*
+ * Checks whether @group has mailbox detached.
+ *
+ * Caller holds @group->lock.
+ */
+static inline bool
+edgetpu_group_mailbox_detached_locked(const struct edgetpu_device_group *group)
+{
+ return group->context_id == EDGETPU_CONTEXT_INVALID;
+}
+
+/*
+ * Returns whether @group is finalized and has mailbox attached.
+ *
+ * Caller holds @group->lock.
+ */
+static inline bool
+edgetpu_group_finalized_and_attached(const struct edgetpu_device_group *group)
+{
+ return edgetpu_device_group_is_finalized(group) &&
+ !edgetpu_group_mailbox_detached_locked(group);
+}
+
#endif /* __EDGETPU_DEVICE_GROUP_H__ */
diff --git a/drivers/edgetpu/edgetpu-dmabuf.c b/drivers/edgetpu/edgetpu-dmabuf.c
index 13b1497..b424463 100644
--- a/drivers/edgetpu/edgetpu-dmabuf.c
+++ b/drivers/edgetpu/edgetpu-dmabuf.c
@@ -129,13 +129,16 @@ rollback:
*
* If the first entry is not fetched (this could happen in bulk mappings),
* a TPU VA is still allocated according to @dmap->mmu_flags but not mapped.
+ *
+ * Caller holds @group->lock.
*/
static int etdev_map_dmabuf(struct edgetpu_dev *etdev,
struct edgetpu_dmabuf_map *dmap,
enum dma_data_direction dir, tpu_addr_t *tpu_addr_p)
{
struct edgetpu_device_group *group = dmap->map.priv;
- const enum edgetpu_context_id ctx_id = edgetpu_group_context_id(group);
+ const enum edgetpu_context_id ctx_id =
+ edgetpu_group_context_id_locked(group);
struct dmabuf_map_entry *entry = &dmap->entries[0];
tpu_addr_t tpu_addr;
int ret;
@@ -180,13 +183,18 @@ static int etdev_map_dmabuf(struct edgetpu_dev *etdev,
return 0;
}
-/* reverts etdev_map_dmabuf() */
+/*
+ * Reverts etdev_map_dmabuf().
+ *
+ * Caller holds @group->lock.
+ */
static void etdev_unmap_dmabuf(struct edgetpu_dev *etdev,
struct edgetpu_dmabuf_map *dmap,
tpu_addr_t tpu_addr)
{
struct edgetpu_device_group *group = dmap->map.priv;
- const enum edgetpu_context_id ctx_id = edgetpu_group_context_id(group);
+ const enum edgetpu_context_id ctx_id =
+ edgetpu_group_context_id_locked(group);
struct dmabuf_map_entry *entry = &dmap->entries[0];
if (entry->n == 0) {
@@ -200,12 +208,17 @@ static void etdev_unmap_dmabuf(struct edgetpu_dev *etdev,
}
}
-/* handles mirrored mapping request */
+/*
+ * Handles mirrored mapping request.
+ *
+ * Caller holds @group->lock.
+ */
static int group_map_dmabuf(struct edgetpu_device_group *group,
struct edgetpu_dmabuf_map *dmap,
enum dma_data_direction dir, tpu_addr_t *tpu_addr_p)
{
- const enum edgetpu_context_id ctx_id = edgetpu_group_context_id(group);
+ const enum edgetpu_context_id ctx_id =
+ edgetpu_group_context_id_locked(group);
struct edgetpu_dev *etdev = group->etdev;
tpu_addr_t tpu_addr;
uint i;
@@ -243,12 +256,17 @@ err_remove:
return ret;
}
-/* reverts group_map_dmabuf() */
+/*
+ * Reverts group_map_dmabuf().
+ *
+ * Caller holds @group->lock.
+ */
static void group_unmap_dmabuf(struct edgetpu_device_group *group,
struct edgetpu_dmabuf_map *dmap,
tpu_addr_t tpu_addr)
{
- const enum edgetpu_context_id ctx_id = edgetpu_group_context_id(group);
+ const enum edgetpu_context_id ctx_id =
+ edgetpu_group_context_id_locked(group);
struct edgetpu_dev *etdev;
uint i;
@@ -731,7 +749,7 @@ int edgetpu_map_dmabuf(struct edgetpu_device_group *group,
goto err_put;
mutex_lock(&group->lock);
- if (!edgetpu_device_group_is_finalized(group))
+ if (!edgetpu_group_finalized_and_attached(group))
goto err_unlock_group;
dmap = alloc_dmabuf_map(group, flags);
@@ -801,7 +819,7 @@ int edgetpu_unmap_dmabuf(struct edgetpu_device_group *group, u32 die_index,
mutex_lock(&group->lock);
/* the group is disbanded means all the mappings have been released */
- if (!edgetpu_device_group_is_finalized(group))
+ if (!edgetpu_group_finalized_and_attached(group))
goto out_unlock;
edgetpu_mapping_lock(mappings);
map = edgetpu_mapping_find_locked(mappings, die_index, tpu_addr);
@@ -837,7 +855,7 @@ int edgetpu_map_bulk_dmabuf(struct edgetpu_device_group *group,
if (arg->size == 0)
return -EINVAL;
mutex_lock(&group->lock);
- if (!edgetpu_device_group_is_finalized(group))
+ if (!edgetpu_group_finalized_and_attached(group))
goto err_unlock_group;
/* checks not all FDs are ignored */
for (i = 0; i < group->n_clients; i++)
diff --git a/drivers/edgetpu/edgetpu-fs.c b/drivers/edgetpu/edgetpu-fs.c
index 829ab5b..998704c 100644
--- a/drivers/edgetpu/edgetpu-fs.c
+++ b/drivers/edgetpu/edgetpu-fs.c
@@ -576,8 +576,14 @@ static int edgetpu_ioctl_release_wakelock(struct edgetpu_client *client)
return -EINVAL;
}
- edgetpu_pm_put(client->etdev->pm);
client->wakelock.req_count--;
+ if (!client->wakelock.req_count) {
+ mutex_lock(&client->group_lock);
+ if (client->group)
+ edgetpu_group_detach_mailbox(client->group);
+ mutex_unlock(&client->group_lock);
+ }
+ edgetpu_pm_put(client->etdev->pm);
etdev_dbg(client->etdev,
"%s: wakelock req count = %u CSR map count = %u\n", __func__,
client->wakelock.req_count, client->wakelock.csr_map_count);
@@ -599,16 +605,28 @@ static int edgetpu_ioctl_acquire_wakelock(struct edgetpu_client *client)
if (ret) {
etdev_warn(client->etdev, "%s: pm_get failed (%d)", __func__,
ret);
- mutex_unlock(&client->wakelock.lock);
- return ret;
+ goto out_unlock;
+ }
+ if (!client->wakelock.req_count) {
+ mutex_lock(&client->group_lock);
+ if (client->group)
+ ret = edgetpu_group_attach_mailbox(client->group);
+ mutex_unlock(&client->group_lock);
+ if (ret) {
+ etdev_warn(client->etdev,
+ "failed to attach mailbox: %d", ret);
+ edgetpu_pm_put(client->etdev->pm);
+ goto out_unlock;
+ }
}
client->wakelock.req_count++;
etdev_dbg(client->etdev,
"%s: wakelock req count = %u CSR map count = %u\n", __func__,
client->wakelock.req_count, client->wakelock.csr_map_count);
+out_unlock:
mutex_unlock(&client->wakelock.lock);
- return 0;
+ return ret;
}
long edgetpu_ioctl(struct file *file, uint cmd, ulong arg)
@@ -729,10 +747,6 @@ static struct edgetpu_dumpregs_range common_statusregs_ranges[] = {
.lastreg = EDGETPU_REG_USER_HIB_DMA_PAUSED,
},
{
- .firstreg = EDGETPU_REG_USER_HIB_FIRST_ERROR_STATUS,
- .lastreg = EDGETPU_REG_USER_HIB_FIRST_ERROR_STATUS,
- },
- {
.firstreg = EDGETPU_REG_USER_HIB_ERROR_STATUS,
.lastreg = EDGETPU_REG_USER_HIB_ERROR_MASK,
},
@@ -741,10 +755,6 @@ static struct edgetpu_dumpregs_range common_statusregs_ranges[] = {
.lastreg = EDGETPU_REG_SC_DECODEPC,
},
{
- .firstreg = EDGETPU_REG_SC_RUNSTATUS,
- .lastreg = EDGETPU_REG_SC_RUNSTATUS,
- },
- {
.firstreg = EDGETPU_REG_SC_ERROR,
.lastreg = EDGETPU_REG_SC_ERROR_MASK,
},
@@ -753,30 +763,14 @@ static struct edgetpu_dumpregs_range common_statusregs_ranges[] = {
.lastreg = EDGETPU_REG_SC_ERROR_INFO,
},
{
- .firstreg = EDGETPU_REG_USER_HIB_OUT_ACTVQ_INT_STAT,
- .lastreg = EDGETPU_REG_USER_HIB_OUT_ACTVQ_INT_STAT,
- },
- {
.firstreg = EDGETPU_REG_USER_HIB_INSTRQ_TAIL,
.lastreg = EDGETPU_REG_USER_HIB_INSTRQ_INT_STAT,
},
{
- .firstreg = EDGETPU_REG_USER_HIB_IN_ACTVQ_INT_STAT,
- .lastreg = EDGETPU_REG_USER_HIB_IN_ACTVQ_INT_STAT,
- },
- {
- .firstreg = EDGETPU_REG_USER_HIB_PARAMQ_INT_STAT,
- .lastreg = EDGETPU_REG_USER_HIB_PARAMQ_INT_STAT,
- },
- {
.firstreg = EDGETPU_REG_USER_HIB_SC_HOST_INT_STAT,
.lastreg = EDGETPU_REG_USER_HIB_SC_HOST_INT_STAT,
},
{
- .firstreg = EDGETPU_REG_USER_HIB_TOPLVL_INT_STAT,
- .lastreg = EDGETPU_REG_USER_HIB_TOPLVL_INT_STAT,
- },
- {
.firstreg = EDGETPU_REG_USER_HIB_FATALERR_INT_STAT,
.lastreg = EDGETPU_REG_USER_HIB_FATALERR_INT_STAT,
},
diff --git a/drivers/edgetpu/edgetpu-internal.h b/drivers/edgetpu/edgetpu-internal.h
index 14ad2dc..34da92d 100644
--- a/drivers/edgetpu/edgetpu-internal.h
+++ b/drivers/edgetpu/edgetpu-internal.h
@@ -7,6 +7,13 @@
#ifndef __EDGETPU_INTERNAL_H__
#define __EDGETPU_INTERNAL_H__
+#include <linux/printk.h>
+
+#ifdef CONFIG_X86
+#include <asm/pgtable_types.h>
+#include <asm/set_memory.h>
+#endif
+
#include <linux/atomic.h>
#include <linux/cdev.h>
#include <linux/debugfs.h>
@@ -61,6 +68,7 @@
* specific values in the mmu driver.
*/
enum edgetpu_context_id {
+ EDGETPU_CONTEXT_INVALID = -1,
EDGETPU_CONTEXT_KCI = 0, /* TPU firmware/kernel ID 0 */
EDGETPU_CONTEXT_VII_BASE = 1, /* groups 0-6 IDs 1-7 */
/* contexts 8 and above not yet allocated */
@@ -74,6 +82,9 @@ struct edgetpu_coherent_mem {
tpu_addr_t tpu_addr; /* DMA handle for TPU internal IOMMU, if any */
u64 host_addr; /* address mapped on host for debugging */
size_t size;
+#ifdef CONFIG_X86
+ bool is_set_uc; /* memory has been marked uncached on X86 */
+#endif
};
struct edgetpu_reg_window {
@@ -239,6 +250,38 @@ static inline void edgetpu_dev_write_64(struct edgetpu_dev *etdev,
writeq_relaxed(value, etdev->regs.mem + reg_offset);
}
+static inline void
+edgetpu_x86_coherent_mem_init(struct edgetpu_coherent_mem *mem)
+{
+#ifdef CONFIG_X86
+ mem->is_set_uc = false;
+#endif
+}
+
+static inline void
+edgetpu_x86_coherent_mem_set_uc(struct edgetpu_coherent_mem *mem)
+{
+#ifdef CONFIG_X86
+ if (!mem->is_set_uc) {
+ set_memory_uc((unsigned long)mem->vaddr, mem->size >>
+ PAGE_SHIFT);
+ mem->is_set_uc = true;
+ }
+#endif
+}
+
+static inline void
+edgetpu_x86_coherent_mem_set_wb(struct edgetpu_coherent_mem *mem)
+{
+#ifdef CONFIG_X86
+ if (mem->is_set_uc) {
+ set_memory_wb((unsigned long)mem->vaddr, mem->size >>
+ PAGE_SHIFT);
+ mem->is_set_uc = false;
+ }
+#endif
+}
+
/* External drivers can hook up to edgetpu driver using these calls. */
int edgetpu_open(struct edgetpu_dev *etdev, struct file *file);
long edgetpu_ioctl(struct file *file, uint cmd, ulong arg);
diff --git a/drivers/edgetpu/edgetpu-iremap-pool.c b/drivers/edgetpu/edgetpu-iremap-pool.c
index 52094b1..13b9977 100644
--- a/drivers/edgetpu/edgetpu-iremap-pool.c
+++ b/drivers/edgetpu/edgetpu-iremap-pool.c
@@ -6,13 +6,6 @@
* Copyright (C) 2020 Google, Inc.
*/
-#include <linux/printk.h>
-
-#ifdef CONFIG_X86
-#include <asm/pgtable_types.h>
-#include <asm/set_memory.h>
-#endif
-
#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
#include <linux/kernel.h>
@@ -20,6 +13,7 @@
#include <linux/mutex.h>
#include <linux/slab.h>
+#include "edgetpu-internal.h"
#include "edgetpu-iremap-pool.h"
#include "edgetpu-mmu.h"
@@ -94,16 +88,11 @@ static int edgetpu_alloc_coherent(struct edgetpu_dev *etdev, size_t size,
GFP_KERNEL);
if (!mem->vaddr)
return -ENOMEM;
-#ifdef CONFIG_X86
- set_memory_uc((unsigned long)mem->vaddr, size >> PAGE_SHIFT);
-#endif
+ edgetpu_x86_coherent_mem_init(mem);
mem->tpu_addr =
edgetpu_mmu_tpu_map(etdev, mem->dma_addr, size,
DMA_BIDIRECTIONAL, context_id, flags);
if (!mem->tpu_addr) {
-#ifdef CONFIG_X86
- set_memory_wb((unsigned long)mem->vaddr, size >> PAGE_SHIFT);
-#endif
dma_free_coherent(etdev->dev, size, mem->vaddr, mem->dma_addr);
mem->vaddr = NULL;
return -EINVAL;
@@ -145,9 +134,7 @@ static void edgetpu_free_coherent(struct edgetpu_dev *etdev,
enum edgetpu_context_id context_id)
{
edgetpu_mmu_tpu_unmap(etdev, mem->tpu_addr, mem->size, context_id);
-#ifdef CONFIG_X86
- set_memory_wb((unsigned long)mem->vaddr, mem->size >> PAGE_SHIFT);
-#endif
+ edgetpu_x86_coherent_mem_set_wb(mem);
dma_free_coherent(etdev->dev, mem->size, mem->vaddr, mem->dma_addr);
mem->vaddr = NULL;
}
@@ -192,6 +179,7 @@ int edgetpu_iremap_mmap(struct edgetpu_dev *etdev, struct vm_area_struct *vma,
vma->vm_pgoff = 0;
if (!etmempool) {
+ edgetpu_x86_coherent_mem_set_uc(mem);
ret = dma_mmap_coherent(etdev->dev, vma, mem->vaddr,
mem->dma_addr, mem->size);
vma->vm_pgoff = orig_pgoff;
diff --git a/drivers/edgetpu/edgetpu-mailbox.c b/drivers/edgetpu/edgetpu-mailbox.c
index 123cee7..f2fb859 100644
--- a/drivers/edgetpu/edgetpu-mailbox.c
+++ b/drivers/edgetpu/edgetpu-mailbox.c
@@ -5,11 +5,6 @@
* Copyright (C) 2019 Google, Inc.
*/
-#ifdef CONFIG_X86
-#include <linux/printk.h>
-#include <asm/pgtable_types.h>
-#include <asm/set_memory.h>
-#endif
#include <linux/bits.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
@@ -34,6 +29,15 @@ static inline bool valid_circular_queue_size(u32 size)
return true;
}
+/* Return context ID for mailbox. */
+static inline enum edgetpu_context_id
+edgetpu_mailbox_context_id(struct edgetpu_mailbox *mailbox)
+{
+ if (!mailbox)
+ return EDGETPU_CONTEXT_INVALID;
+ return EDGETPU_CONTEXT_VII_BASE + mailbox->mailbox_id - 1;
+}
+
/* Sets mailbox->cmd_queue_tail and corresponding CSR on device. */
static void edgetpu_mailbox_set_cmd_queue_tail(struct edgetpu_mailbox *mailbox,
u32 value)
@@ -56,9 +60,8 @@ static void edgetpu_mailbox_set_resp_queue_head(struct edgetpu_mailbox *mailbox,
*
* Caller holds mgr->mailboxes_lock.
*/
-static struct edgetpu_mailbox *edgetpu_mailbox_create_locked(
- struct edgetpu_mailbox_manager *mgr,
- uint index)
+static struct edgetpu_mailbox *
+edgetpu_mailbox_create_locked(struct edgetpu_mailbox_manager *mgr, uint index)
{
struct edgetpu_mailbox *mailbox = kzalloc(sizeof(*mailbox), GFP_ATOMIC);
@@ -97,7 +100,9 @@ static void edgetpu_mailbox_disable_ith(struct edgetpu_mailbox_manager *mgr,
static void edgetpu_vii_irq_handler(struct edgetpu_mailbox *mailbox)
{
- edgetpu_group_notify(mailbox->internal.group, EDGETPU_EVENT_RESPDATA);
+ if (mailbox->internal.group)
+ edgetpu_group_notify(mailbox->internal.group,
+ EDGETPU_EVENT_RESPDATA);
}
/*
@@ -206,7 +211,6 @@ void edgetpu_mailbox_reset(struct edgetpu_mailbox *mailbox)
/* Sets the priority of @mailbox. */
void edgetpu_mailbox_set_priority(struct edgetpu_mailbox *mailbox, u32 priority)
{
- /* TODO(b/137343013): check whether priority is valid */
EDGETPU_MAILBOX_CONTEXT_WRITE(mailbox, priority, priority);
}
@@ -238,8 +242,10 @@ edgetpu_mailbox_vii_add(struct edgetpu_mailbox_manager *mgr, uint id)
mailbox = ERR_PTR(-EBUSY);
} else {
mailbox = edgetpu_mailbox_create_locked(mgr, id);
- if (!IS_ERR(mailbox))
+ if (!IS_ERR(mailbox)) {
mgr->mailboxes[id] = mailbox;
+ mailbox->handle_irq = edgetpu_vii_irq_handler;
+ }
}
write_unlock_irqrestore(&mgr->mailboxes_lock, flags);
return mailbox;
@@ -457,7 +463,7 @@ int edgetpu_mailbox_init_vii(struct edgetpu_vii *vii,
__func__, mailbox->mailbox_id, vii->resp_queue_mem.tpu_addr,
&vii->resp_queue_mem.dma_addr);
mailbox->internal.group = edgetpu_device_group_get(group);
- mailbox->handle_irq = edgetpu_vii_irq_handler;
+ vii->etdev = group->etdev;
vii->mailbox = mailbox;
EDGETPU_MAILBOX_CONTEXT_WRITE(mailbox, context_enable, 1);
return 0;
@@ -466,17 +472,15 @@ int edgetpu_mailbox_init_vii(struct edgetpu_vii *vii,
void edgetpu_mailbox_remove_vii(struct edgetpu_vii *vii)
{
struct edgetpu_dev *etdev;
- struct edgetpu_mailbox_manager *mgr;
- if (!vii->mailbox)
- return;
- etdev = vii->mailbox->internal.group->etdev;
- mgr = etdev->mailbox_manager;
+ etdev = vii->etdev;
edgetpu_mailbox_free_queue(etdev, vii->mailbox, &vii->cmd_queue_mem);
edgetpu_mailbox_free_queue(etdev, vii->mailbox, &vii->resp_queue_mem);
- edgetpu_device_group_put(vii->mailbox->internal.group);
- edgetpu_mailbox_remove(mgr, vii->mailbox);
- vii->mailbox = NULL;
+ if (vii->mailbox) {
+ edgetpu_device_group_put(vii->mailbox->internal.group);
+ edgetpu_mailbox_remove(etdev->mailbox_manager, vii->mailbox);
+ vii->mailbox = NULL;
+ }
}
/*
@@ -666,62 +670,42 @@ void edgetpu_mailbox_reset_vii(struct edgetpu_mailbox_manager *mgr)
write_unlock_irqrestore(&mgr->mailboxes_lock, flags);
}
-static int edgetpu_mailbox_reinit_vii(struct edgetpu_device_group *group)
+void edgetpu_mailbox_reinit_vii(struct edgetpu_device_group *group)
{
int cmd_queue_size, resp_queue_size;
struct edgetpu_mailbox *mailbox = group->vii.mailbox;
struct edgetpu_mailbox_attr *attr = &group->mbox_attr;
- int ret;
+ /*
+ * Sizes here should never be invalid since they are checked in
+ * edgetpu_mailbox_init_vii().
+ */
cmd_queue_size = convert_runtime_queue_size_to_fw(attr->cmd_queue_size,
attr->sizeof_cmd);
- if (cmd_queue_size < 0)
- return cmd_queue_size;
-
resp_queue_size = convert_runtime_queue_size_to_fw(
attr->resp_queue_size, attr->sizeof_resp);
- if (resp_queue_size < 0)
- return resp_queue_size;
etdev_dbg(group->etdev, "Restoring vii. workload_id=%u mbox_id=%u\n",
- group->workload_id, group->vii.mailbox->mailbox_id);
+ group->workload_id, mailbox->mailbox_id);
etdev_dbg(group->etdev, "Priority: %d\n", attr->priority);
etdev_dbg(group->etdev, "Tail doorbell %s",
attr->cmdq_tail_doorbell ? "enabled" : "disabled");
etdev_dbg(group->etdev, "cmd queue: addr=%llX size=%u\n",
- group->vii.cmd_queue_mem.tpu_addr,
- cmd_queue_size);
+ group->vii.cmd_queue_mem.tpu_addr, cmd_queue_size);
etdev_dbg(group->etdev, "resp queue: addr=%llX size=%u\n",
- group->vii.resp_queue_mem.tpu_addr,
- resp_queue_size);
+ group->vii.resp_queue_mem.tpu_addr, resp_queue_size);
edgetpu_mailbox_set_priority(mailbox, attr->priority);
EDGETPU_MAILBOX_CONTEXT_WRITE(mailbox, cmd_queue_tail_doorbell_enable,
attr->cmdq_tail_doorbell);
-
- ret = edgetpu_mailbox_set_queue(mailbox, MAILBOX_CMD_QUEUE,
- group->vii.cmd_queue_mem.tpu_addr,
- cmd_queue_size);
- if (ret) {
- etdev_warn(group->etdev,
- "%s: Restoring command queue failed: %d\n", __func__,
- ret);
- return ret;
- }
-
- ret = edgetpu_mailbox_set_queue(mailbox, MAILBOX_RESP_QUEUE,
- group->vii.resp_queue_mem.tpu_addr,
- resp_queue_size);
- if (ret) {
- etdev_warn(group->etdev,
- "%s: Restoring response queue failed: %d\n",
- __func__, ret);
- return ret;
- }
-
+ edgetpu_mailbox_set_queue(mailbox, MAILBOX_CMD_QUEUE,
+ group->vii.cmd_queue_mem.tpu_addr,
+ cmd_queue_size);
+ edgetpu_mailbox_set_queue(mailbox, MAILBOX_RESP_QUEUE,
+ group->vii.resp_queue_mem.tpu_addr,
+ resp_queue_size);
EDGETPU_MAILBOX_CONTEXT_WRITE(mailbox, context_enable, 1);
- return 0;
}
void edgetpu_mailbox_restore_active_vii_queues(struct edgetpu_dev *etdev)
@@ -733,7 +717,7 @@ void edgetpu_mailbox_restore_active_vii_queues(struct edgetpu_dev *etdev)
mutex_lock(&etdev->groups_lock);
for (i = 0; i < EDGETPU_NGROUPS; i++) {
group = etdev->groups[i];
- if (group) {
+ if (group && !edgetpu_group_mailbox_detached_locked(group)) {
edgetpu_mailbox_reinit_vii(group);
if (edgetpu_device_group_is_finalized(group))
mailbox_ids |=
diff --git a/drivers/edgetpu/edgetpu-mailbox.h b/drivers/edgetpu/edgetpu-mailbox.h
index 56b35bc..238915f 100644
--- a/drivers/edgetpu/edgetpu-mailbox.h
+++ b/drivers/edgetpu/edgetpu-mailbox.h
@@ -66,8 +66,12 @@ struct edgetpu_mailbox {
typedef struct edgetpu_coherent_mem edgetpu_queue_mem;
struct edgetpu_vii {
- /* The mailbox this VII uses, can be NULL for an uninitialized VII. */
+ /*
+ * The mailbox this VII uses, can be NULL when uninitialized or mailbox
+ * detached.
+ */
struct edgetpu_mailbox *mailbox;
+ struct edgetpu_dev *etdev;
edgetpu_queue_mem cmd_queue_mem;
edgetpu_queue_mem resp_queue_mem;
};
@@ -234,18 +238,19 @@ void edgetpu_mailbox_free_queue(struct edgetpu_dev *etdev,
edgetpu_queue_mem *mem);
/*
+ * Re-programs the CSRs of queue addresses, context, priority etc. to @group's
+ * VII mailbox.
+ *
+ * Caller holds @group->lock and ensures @group has mailbox attached.
+ */
+void edgetpu_mailbox_reinit_vii(struct edgetpu_device_group *group);
+
+/*
* Re-configure VII mailbox queues which have an active client, re-using
* existing buffers
*/
void edgetpu_mailbox_restore_active_vii_queues(struct edgetpu_dev *etdev);
-/* Return context ID for mailbox. */
-static inline enum edgetpu_context_id
-edgetpu_mailbox_context_id(struct edgetpu_mailbox *mailbox)
-{
- return EDGETPU_CONTEXT_VII_BASE + mailbox->mailbox_id - 1;
-}
-
/* utility functions for P2P */
int edgetpu_mailbox_p2p_batch(struct edgetpu_mailbox_manager *mgr, uint n,
diff --git a/drivers/edgetpu/edgetpu-mapping.h b/drivers/edgetpu/edgetpu-mapping.h
index d27e016..35906b0 100644
--- a/drivers/edgetpu/edgetpu-mapping.h
+++ b/drivers/edgetpu/edgetpu-mapping.h
@@ -34,6 +34,19 @@ struct edgetpu_mapping {
u64 host_address;
u32 die_index; /* this mapping is mapped on the @die_index-th die */
tpu_addr_t device_address;
+ /*
+ * The size used for allocating @alloc_iova in bytes. This field may be
+ * set by edgetpu_mmu_map().
+ */
+ size_t alloc_size;
+ /*
+ * This might be different from @device_address since edgetpu_mmu_map()
+ * may allocate more space than the size requested in the reason of
+ * alignment. This field and @alloc_size are expected to be used in
+ * edgetpu_mmu_unmap() and/or to reserve the IOVA space before calling
+ * edgetpu_mmu_map_iova_sgt().
+ */
+ tpu_addr_t alloc_iova;
edgetpu_map_flag_t flags; /* the flag passed by the runtime */
/* DMA attributes to be performed for dma_(un)map calls. */
unsigned long dma_attrs;
diff --git a/drivers/edgetpu/edgetpu-mmu.h b/drivers/edgetpu/edgetpu-mmu.h
index 8175aa1..8f453b2 100644
--- a/drivers/edgetpu/edgetpu-mmu.h
+++ b/drivers/edgetpu/edgetpu-mmu.h
@@ -10,6 +10,7 @@
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
+#include <linux/version.h>
#include "edgetpu-internal.h"
#include "edgetpu.h"
@@ -19,6 +20,12 @@
#include <linux/iommu-ext.h>
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)
+#ifndef IOMMU_PASID_INVALID
+#define IOMMU_PASID_INVALID (-1U)
+#endif
+#endif
+
#define IS_MIRRORED(flag) (!((flag) & EDGETPU_MAP_NONMIRRORED))
/* flags for MMU operations */
@@ -120,6 +127,9 @@ void edgetpu_mmu_unmap(struct edgetpu_dev *dev, struct edgetpu_mapping *map,
* Description: Request TPU to map @iova to the pages presented by @sgt.
*
* Returns 0 on success, -errno on error.
+ *
+ * Note: Caller should use edgetpu_mmu_reserve() before calling this method if
+ * the target @iova isn't acquired from edgetpu_mmu_alloc(@etdev).
*/
int edgetpu_mmu_map_iova_sgt(struct edgetpu_dev *etdev, tpu_addr_t iova,
struct sg_table *sgt, enum dma_data_direction dir,
diff --git a/drivers/edgetpu/edgetpu-pm.h b/drivers/edgetpu/edgetpu-pm.h
index dc20efd..45d2fad 100644
--- a/drivers/edgetpu/edgetpu-pm.h
+++ b/drivers/edgetpu/edgetpu-pm.h
@@ -12,7 +12,7 @@
#define STATE_SHUTDOWN 1
#define STATE_RUN 0
-#define EDGETPU_PCHANNEL_STATE_CHANGE_TIMEOUT 15 /* 15us */
+#define EDGETPU_PCHANNEL_STATE_CHANGE_TIMEOUT 1000 /* 1 ms */
#define EDGETPU_PCHANNEL_STATE_CHANGE_RETRIES 10
struct edgetpu_pm_private;
diff --git a/drivers/edgetpu/edgetpu-telemetry.c b/drivers/edgetpu/edgetpu-telemetry.c
index 928c3c9..68351a8 100644
--- a/drivers/edgetpu/edgetpu-telemetry.c
+++ b/drivers/edgetpu/edgetpu-telemetry.c
@@ -4,22 +4,23 @@
*
* Copyright (C) 2019-2020 Google, Inc.
*/
-#ifdef CONFIG_X86
-#include <linux/printk.h> // pr_warn used by set_memory.h
-#include <asm/pgtable_types.h>
-#include <asm/set_memory.h>
-#endif
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include "edgetpu-internal.h"
#include "edgetpu-iremap-pool.h"
#include "edgetpu-mmu.h"
#include "edgetpu-telemetry.h"
#include "edgetpu.h"
+/* When log data arrives, recheck for more log data after this delay. */
+#define TELEMETRY_LOG_RECHECK_DELAY 200 /* ms */
+
static struct edgetpu_telemetry *
select_telemetry(struct edgetpu_telemetry_ctx *ctx,
enum edgetpu_telemetry_type type)
@@ -36,88 +37,6 @@ select_telemetry(struct edgetpu_telemetry_ctx *ctx,
}
}
-static int telemetry_init(struct edgetpu_dev *etdev,
- struct edgetpu_telemetry *tel, const char *name,
- struct edgetpu_coherent_mem *mem)
-{
- const size_t size = EDGETPU_TELEMETRY_BUFFER_SIZE;
- const u32 flags = EDGETPU_MMU_DIE | EDGETPU_MMU_32 | EDGETPU_MMU_HOST;
- void *vaddr;
- dma_addr_t dma_addr;
- tpu_addr_t tpu_addr;
-
- if (mem) {
- tel->coherent_mem = *mem;
- vaddr = mem->vaddr;
- tel->caller_mem = true;
- } else {
- vaddr = dmam_alloc_coherent(etdev->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!vaddr)
- return -ENOMEM;
-#ifdef CONFIG_X86
- set_memory_uc((unsigned long)vaddr, size >> PAGE_SHIFT);
-#endif
- tpu_addr = edgetpu_mmu_tpu_map(etdev, dma_addr, size,
- DMA_BIDIRECTIONAL,
- EDGETPU_CONTEXT_KCI, flags);
- if (!tpu_addr) {
- dev_err(etdev->dev,
- "%s: failed to map buffer for '%s'\n",
- etdev->dev_name, name);
- return -ENOSPC;
- }
- tel->coherent_mem.vaddr = vaddr;
- tel->coherent_mem.dma_addr = dma_addr;
- tel->coherent_mem.tpu_addr = tpu_addr;
- tel->coherent_mem.size = size;
- tel->caller_mem = false;
- }
-
- rwlock_init(&tel->ctx_mem_lock);
- tel->name = name;
-
- tel->header = (struct edgetpu_telemetry_header *)vaddr;
- tel->header->head = 0;
- tel->header->tail = 0;
- tel->header->entries_dropped = 0;
-
- tel->ctx = NULL;
-
- spin_lock_init(&tel->state_lock);
- tel->state = EDGETPU_TELEMETRY_ENABLED;
- tel->inited = true;
-
- return 0;
-}
-
-static void telemetry_exit(struct edgetpu_dev *etdev,
- struct edgetpu_telemetry *tel)
-{
- ulong flags;
-
- if (!tel->inited)
- return;
- spin_lock_irqsave(&tel->state_lock, flags);
- /* Prevent racing with the IRQ handler */
- tel->state = EDGETPU_TELEMETRY_INVALID;
- spin_unlock_irqrestore(&tel->state_lock, flags);
-
- if (tel->coherent_mem.tpu_addr && !tel->caller_mem) {
- edgetpu_mmu_tpu_unmap(etdev, tel->coherent_mem.tpu_addr,
- tel->coherent_mem.size,
- EDGETPU_CONTEXT_KCI);
- tel->coherent_mem.tpu_addr = 0;
-#ifdef CONFIG_X86
- set_memory_wb((unsigned long)tel->coherent_mem.vaddr,
- tel->coherent_mem.size >> PAGE_SHIFT);
-#endif
- }
- if (tel->ctx)
- eventfd_ctx_put(tel->ctx);
- tel->ctx = NULL;
-}
-
static int telemetry_kci(struct edgetpu_dev *etdev,
struct edgetpu_telemetry *tel,
int (*send_kci)(struct edgetpu_kci *, u64, u32))
@@ -201,9 +120,9 @@ static void copy_with_wrap(struct edgetpu_telemetry_header *header, void *dest,
}
/* Log messages from TPU CPU to dmesg */
-static void edgetpu_fw_log(struct edgetpu_dev *etdev,
- struct edgetpu_telemetry *log)
+static void edgetpu_fw_log(struct edgetpu_telemetry *log)
{
+ struct edgetpu_dev *etdev = log->etdev;
struct edgetpu_telemetry_header *header = log->header;
struct edgetpu_log_entry_header entry;
u8 *start;
@@ -255,23 +174,55 @@ static void edgetpu_fw_log(struct edgetpu_dev *etdev,
}
/* Consumes the queue buffer. */
-static void edgetpu_fw_trace(struct edgetpu_dev *etdev,
- struct edgetpu_telemetry *trace)
+static void edgetpu_fw_trace(struct edgetpu_telemetry *trace)
{
struct edgetpu_telemetry_header *header = trace->header;
header->head = header->tail;
}
-/*
- * If the buffer queue is not empty,
- * - signals the event context.
- * - calls @fallback if event is not set.
- */
+/* Worker for processing log/trace buffers. */
+
+static void telemetry_worker(struct work_struct *work)
+{
+ struct edgetpu_telemetry *tel =
+ container_of(work, struct edgetpu_telemetry, work);
+ u32 prev_head;
+ ulong flags;
+
+ /*
+ * Loop while telemetry enabled, there is data to be consumed,
+ * and the previous iteration made progress. If another IRQ arrives
+ * just after the last head != tail check we should get another worker
+ * schedule.
+ */
+ do {
+ spin_lock_irqsave(&tel->state_lock, flags);
+ if (tel->state != EDGETPU_TELEMETRY_ENABLED) {
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ return;
+ }
+
+ prev_head = tel->header->head;
+ if (tel->header->head != tel->header->tail) {
+ read_lock(&tel->ctx_mem_lock);
+ if (tel->ctx)
+ eventfd_signal(tel->ctx, 1);
+ else
+ tel->fallback_fn(tel);
+ read_unlock(&tel->ctx_mem_lock);
+ }
+
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ msleep(TELEMETRY_LOG_RECHECK_DELAY);
+ } while (tel->header->head != tel->header->tail &&
+ tel->header->head != prev_head);
+}
+
+
+/* If the buffer queue is not empty, schedules worker. */
static void telemetry_irq_handler(struct edgetpu_dev *etdev,
- struct edgetpu_telemetry *tel,
- void (*fallback)(struct edgetpu_dev *,
- struct edgetpu_telemetry *))
+ struct edgetpu_telemetry *tel)
{
if (!tel->inited)
return;
@@ -279,12 +230,7 @@ static void telemetry_irq_handler(struct edgetpu_dev *etdev,
if (tel->state == EDGETPU_TELEMETRY_ENABLED &&
tel->header->head != tel->header->tail) {
- read_lock(&tel->ctx_mem_lock);
- if (tel->ctx)
- eventfd_signal(tel->ctx, 1);
- else
- fallback(etdev, tel);
- read_unlock(&tel->ctx_mem_lock);
+ schedule_work(&tel->work);
}
spin_unlock(&tel->state_lock);
@@ -323,6 +269,88 @@ static int telemetry_mmap_buffer(struct edgetpu_dev *etdev,
return ret;
}
+static int telemetry_init(struct edgetpu_dev *etdev,
+ struct edgetpu_telemetry *tel, const char *name,
+ struct edgetpu_coherent_mem *mem,
+ void (*fallback)(struct edgetpu_telemetry *))
+{
+ const size_t size = EDGETPU_TELEMETRY_BUFFER_SIZE;
+ const u32 flags = EDGETPU_MMU_DIE | EDGETPU_MMU_32 | EDGETPU_MMU_HOST;
+ void *vaddr;
+ dma_addr_t dma_addr;
+ tpu_addr_t tpu_addr;
+
+ if (mem) {
+ tel->coherent_mem = *mem;
+ vaddr = mem->vaddr;
+ tel->caller_mem = true;
+ } else {
+ vaddr = dmam_alloc_coherent(etdev->dev, size, &dma_addr,
+ GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+ tpu_addr = edgetpu_mmu_tpu_map(etdev, dma_addr, size,
+ DMA_BIDIRECTIONAL,
+ EDGETPU_CONTEXT_KCI, flags);
+ if (!tpu_addr) {
+ dev_err(etdev->dev,
+ "%s: failed to map buffer for '%s'\n",
+ etdev->dev_name, name);
+ return -ENOSPC;
+ }
+ tel->coherent_mem.vaddr = vaddr;
+ tel->coherent_mem.dma_addr = dma_addr;
+ tel->coherent_mem.tpu_addr = tpu_addr;
+ tel->coherent_mem.size = size;
+ tel->caller_mem = false;
+ edgetpu_x86_coherent_mem_set_uc(&tel->coherent_mem);
+ }
+
+ rwlock_init(&tel->ctx_mem_lock);
+ tel->name = name;
+ tel->etdev = etdev;
+
+ tel->header = (struct edgetpu_telemetry_header *)vaddr;
+ tel->header->head = 0;
+ tel->header->tail = 0;
+ tel->header->entries_dropped = 0;
+
+ tel->ctx = NULL;
+
+ spin_lock_init(&tel->state_lock);
+ INIT_WORK(&tel->work, telemetry_worker);
+ tel->fallback_fn = fallback;
+ tel->state = EDGETPU_TELEMETRY_ENABLED;
+ tel->inited = true;
+
+ return 0;
+}
+
+static void telemetry_exit(struct edgetpu_dev *etdev,
+ struct edgetpu_telemetry *tel)
+{
+ ulong flags;
+
+ if (!tel->inited)
+ return;
+ spin_lock_irqsave(&tel->state_lock, flags);
+ /* Prevent racing with the IRQ handler or worker */
+ tel->state = EDGETPU_TELEMETRY_INVALID;
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ cancel_work_sync(&tel->work);
+
+ if (tel->coherent_mem.tpu_addr && !tel->caller_mem) {
+ edgetpu_mmu_tpu_unmap(etdev, tel->coherent_mem.tpu_addr,
+ tel->coherent_mem.size,
+ EDGETPU_CONTEXT_KCI);
+ tel->coherent_mem.tpu_addr = 0;
+ edgetpu_x86_coherent_mem_set_wb(&tel->coherent_mem);
+ }
+ if (tel->ctx)
+ eventfd_ctx_put(tel->ctx);
+ tel->ctx = NULL;
+}
+
int edgetpu_telemetry_init(struct edgetpu_dev *etdev,
struct edgetpu_coherent_mem *log_mem,
struct edgetpu_coherent_mem *trace_mem)
@@ -332,12 +360,12 @@ int edgetpu_telemetry_init(struct edgetpu_dev *etdev,
if (!etdev->telemetry)
return -ENODEV;
ret = telemetry_init(etdev, &etdev->telemetry->log, "telemetry_log",
- log_mem);
+ log_mem, edgetpu_fw_log);
if (ret)
return ret;
#if IS_ENABLED(CONFIG_EDGETPU_TELEMETRY_TRACE)
ret = telemetry_init(etdev, &etdev->telemetry->trace, "telemetry_trace",
- trace_mem);
+ trace_mem, edgetpu_fw_trace);
if (ret) {
telemetry_exit(etdev, &etdev->telemetry->log);
return ret;
@@ -393,9 +421,8 @@ void edgetpu_telemetry_irq_handler(struct edgetpu_dev *etdev)
{
if (!etdev->telemetry)
return;
- telemetry_irq_handler(etdev, &etdev->telemetry->log, edgetpu_fw_log);
- telemetry_irq_handler(etdev, &etdev->telemetry->trace,
- edgetpu_fw_trace);
+ telemetry_irq_handler(etdev, &etdev->telemetry->log);
+ telemetry_irq_handler(etdev, &etdev->telemetry->trace);
}
void edgetpu_telemetry_mappings_show(struct edgetpu_dev *etdev,
diff --git a/drivers/edgetpu/edgetpu-telemetry.h b/drivers/edgetpu/edgetpu-telemetry.h
index 879fcb6..308c8ea 100644
--- a/drivers/edgetpu/edgetpu-telemetry.h
+++ b/drivers/edgetpu/edgetpu-telemetry.h
@@ -12,6 +12,7 @@
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include "edgetpu-internal.h"
#include "edgetpu-kci.h"
@@ -57,6 +58,8 @@ struct edgetpu_log_entry_header {
} __packed;
struct edgetpu_telemetry {
+ struct edgetpu_dev *etdev;
+
/*
* State transitioning is to prevent racing in IRQ handlers. e.g. the
* interrupt comes when the kernel is releasing buffers.
@@ -77,6 +80,11 @@ struct edgetpu_telemetry {
rwlock_t ctx_mem_lock; /* protects ctx and coherent_mem */
const char *name; /* for debugging */
bool inited; /* whether telemetry_init() succeeded */
+
+ /* Worker for handling data. */
+ struct work_struct work;
+ /* Fallback function to call for default log/trace handling. */
+ void (*fallback_fn)(struct edgetpu_telemetry *tel);
};
struct edgetpu_telemetry_ctx {
diff --git a/drivers/edgetpu/edgetpu.h b/drivers/edgetpu/edgetpu.h
index 7c07e39..b494224 100644
--- a/drivers/edgetpu/edgetpu.h
+++ b/drivers/edgetpu/edgetpu.h
@@ -122,6 +122,11 @@ struct edgetpu_event_register {
#define EDGETPU_SET_EVENTFD \
_IOW(EDGETPU_IOCTL_BASE, 5, struct edgetpu_event_register)
+/*
+ * @priority with this bit means the mailbox could be released when wakelock is
+ * released.
+ */
+#define EDGETPU_PRIORITY_DETACHABLE (1u << 3)
struct edgetpu_mailbox_attr {
/*
* There are limitations on these size fields, see the error cases in