summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNeela Chithirala <chithiralan@google.com>2022-01-17 04:41:54 +0000
committerNeela Chithirala <chithiralan@google.com>2022-02-03 06:27:20 +0000
commit3ccb2479717de3089dbbcb894ddd045b2ddc256c (patch)
treea577f284ff42d11b1fcfb7c338a7f0b59b10672a
parente14069f1739b05c7a7f60ae73c8ce14b91ef12e0 (diff)
downloadgs201-3ccb2479717de3089dbbcb894ddd045b2ddc256c.tar.gz
Merge branch 'gs201-release' to android13-gs-pixel-5.10
* gs201-release: gxp: Fix multicore VDs not shutting down clean Bug: 215303765 gxp: Rework VD locking and remove mailbox locking Bug: 189018271 gxp: initial commit for thermal driver Bug: 177217526 gxp: Add wakelock interface and make debugfs wakelock aware Bug: 215192870 gxp: Hook-up pm ops for driver suspend/resume Bug: 204924965 gxp: Dynamically power BLK_AUR on and off Bug: 204924965 gxp: support GXP_PLATFORM=GEM5 Bug: 204942713 gxp: Remove delay waiting for FW mailbox init Bug: 207037428 gxp: Fix infrequent crash during mailbox release gxp: Release FW file on firmware loading errors gxp: return GXP_RESP_CANCELLED if timeout occurs Bug: 207432733 gxp: Remove initial 10ms delay when disabling telemetry gxp: Cast telemetry buffer IOVAs to u32s before use gxp: check sscoredump by CONFIG_SUBSYSTEM_COREDUMP gxp: Fix double-lock hang in gxp_telemetry_vma_close gxp: Log driver git commit hash on probe Bug: 206744969 gxp: Add ioctl for reading the TOP global counter gxp: Implement eventfd signalling for telemetry gxp: Notify running cores of telemetry state changes gxp: Add notification interface Signed-off-by: Neela Chithirala <chithiralan@google.com> Change-Id: Ic7cd7b81ee643371c600ac208ae33d6344ed7f1b
-rw-r--r--Makefile26
-rw-r--r--gxp-config.h4
-rw-r--r--gxp-csrs.h2
-rw-r--r--gxp-debug-dump.c38
-rw-r--r--gxp-debug-dump.h9
-rw-r--r--gxp-debugfs.c115
-rw-r--r--gxp-dma-iommu-gem5.c499
-rw-r--r--gxp-dma-iommu.c54
-rw-r--r--gxp-dma-rmem.c10
-rw-r--r--gxp-dma.h27
-rw-r--r--gxp-firmware-data.c37
-rw-r--r--gxp-firmware-data.h24
-rw-r--r--gxp-firmware.c64
-rw-r--r--gxp-hw-mailbox-driver.c22
-rw-r--r--gxp-internal.h21
-rw-r--r--gxp-lpm.c32
-rw-r--r--gxp-lpm.h6
-rw-r--r--gxp-mailbox.c95
-rw-r--r--gxp-mailbox.h26
-rw-r--r--gxp-mb-notification.c66
-rw-r--r--gxp-notification.h95
-rw-r--r--gxp-platform.c167
-rw-r--r--gxp-pm.c39
-rw-r--r--gxp-sw-mailbox-driver.c42
-rw-r--r--gxp-telemetry.c266
-rw-r--r--gxp-telemetry.h56
-rw-r--r--gxp-thermal.c328
-rw-r--r--gxp-thermal.h44
-rw-r--r--gxp-vd.c42
-rw-r--r--gxp-wakelock.c128
-rw-r--r--gxp-wakelock.h79
-rw-r--r--gxp.h28
32 files changed, 2241 insertions, 250 deletions
diff --git a/Makefile b/Makefile
index 5a4b17e..97d393e 100644
--- a/Makefile
+++ b/Makefile
@@ -15,15 +15,27 @@ gxp-objs += \
gxp-lpm.o \
gxp-mailbox.o \
gxp-mapping.o \
+ gxp-mb-notification.o \
gxp-platform.o \
gxp-range-alloc.o \
gxp-pm.o \
gxp-telemetry.o \
- gxp-vd.o
+ gxp-thermal.o \
+ gxp-vd.o \
+ gxp-wakelock.o
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
M ?= $(shell pwd)
+# Obtain the current git commit hash for logging on probe
+GIT_PATH=$(shell cd $(KERNEL_SRC); readlink -e $(M))
+ifeq ($(shell git --git-dir=$(GIT_PATH)/.git rev-parse --is-inside-work-tree),true)
+ GIT_REPO_STATE=$(shell (git --git-dir=$(GIT_PATH)/.git --work-tree=$(GIT_PATH) status --porcelain | grep -q .) && echo -dirty)
+ ccflags-y += -DGIT_REPO_TAG=\"$(shell git --git-dir=$(GIT_PATH)/.git rev-parse --short HEAD)$(GIT_REPO_STATE)\"
+else
+ ccflags-y += -DGIT_REPO_TAG=\"Not\ a\ git\ repository\"
+endif
+
# If building via make directly, specify target platform by adding
# "GXP_PLATFORM=<target>"
# With one of the following values:
@@ -33,6 +45,11 @@ M ?= $(shell pwd)
# Defaults to building for CLOUDRIPPER if not otherwise specified.
GXP_PLATFORM ?= CLOUDRIPPER
+# Test against the build closet to production mode, choose CLOUDRIPPER.
+ifdef CONFIG_GXP_TEST
+ GXP_PLATFORM = CLOUDRIPPER
+endif
+
# Default to using the HW mailbox and SysMMU
GXP_SW_MAILBOX ?= 0
GXP_HAS_SYSMMU ?= 1
@@ -48,7 +65,12 @@ endif
# Setup which version of the gxp-dma interface is used.
ifeq ($(GXP_HAS_SYSMMU),1)
ccflags-y += -DCONFIG_GXP_HAS_SYSMMU
- gxp-objs += gxp-dma-iommu.o
+ # For gem5, need to adopt dma interface without aux domain.
+ ifeq ($(GXP_PLATFORM), GEM5)
+ gxp-objs += gxp-dma-iommu-gem5.o
+ else
+ gxp-objs += gxp-dma-iommu.o
+ endif
else
gxp-objs += gxp-dma-rmem.o
endif
diff --git a/gxp-config.h b/gxp-config.h
index cc06d1f..d17b071 100644
--- a/gxp-config.h
+++ b/gxp-config.h
@@ -9,7 +9,11 @@
#define __GXP_CONFIG_H__
#define GXP_DRIVER_NAME "gxp_platform"
+#ifndef CONFIG_GXP_GEM5
#define GXP_NUM_CORES 4
+#else
+#define GXP_NUM_CORES 1
+#endif
#if defined(CONFIG_GXP_ZEBU) || defined(CONFIG_GXP_IP_ZEBU)
#define GXP_TIME_DELAY_FACTOR 20
diff --git a/gxp-csrs.h b/gxp-csrs.h
index f1d710b..d077b9e 100644
--- a/gxp-csrs.h
+++ b/gxp-csrs.h
@@ -43,6 +43,7 @@ enum gxp_csrs {
};
#define GXP_REG_COMMON_INT_MASK_0_DOORBELLS_MASK 0xFFFFFFFF
+#define GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT 16
enum gxp_core_csrs {
GXP_REG_INST_BPM = 0x0000,
@@ -50,6 +51,7 @@ enum gxp_core_csrs {
GXP_REG_PROCESSOR_ID = 0x4004,
GXP_REG_ALT_RESET_VECTOR = 0x4008,
GXP_REG_COMMON_INT_MASK_0 = 0x4010,
+ GXP_REG_ETM_PWRCTL = 0xB020,
};
#define DOORBELL_COUNT 32
diff --git a/gxp-debug-dump.c b/gxp-debug-dump.c
index ea0339d..5c7ae61 100644
--- a/gxp-debug-dump.c
+++ b/gxp-debug-dump.c
@@ -13,7 +13,7 @@
#include <linux/string.h>
#include <linux/workqueue.h>
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
#include <linux/platform_data/sscoredump.h>
#endif
@@ -21,7 +21,6 @@
#include "gxp-doorbell.h"
#include "gxp-internal.h"
#include "gxp-lpm.h"
-#include "gxp-mailbox.h"
#include "gxp-tmp.h"
#define GXP_COREDUMP_PENDING 0xF
@@ -251,7 +250,7 @@ static void gxp_handle_debug_dump(struct gxp_dev *gxp,
struct gxp_core_dump_header *core_dump_header;
struct gxp_core_header *core_header;
int i;
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
struct sscd_platform_data *pdata =
(struct sscd_platform_data *)gxp->debug_dump_mgr->sscd_pdata;
struct sscd_segment *segs;
@@ -289,7 +288,7 @@ static void gxp_handle_debug_dump(struct gxp_dev *gxp,
data_addr += segs[seg_idx].size;
seg_idx++;
}
-#endif // CONFIG_ANDROID
+#endif // CONFIG_SUBSYSTEM_COREDUMP
/* Core */
for (i = 0; i < GXP_NUM_CORES; i++) {
@@ -304,7 +303,7 @@ static void gxp_handle_debug_dump(struct gxp_dev *gxp,
goto out;
}
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
/* Core Header */
segs[seg_idx].addr = core_header;
segs[seg_idx].size = sizeof(struct gxp_core_header);
@@ -347,7 +346,7 @@ static void gxp_handle_debug_dump(struct gxp_dev *gxp,
*/
msleep(1000);
mutex_unlock(&gxp->debug_dump_mgr->sscd_lock);
-#endif // CONFIG_ANDROID
+#endif // CONFIG_SUBSYSTEM_COREDUMP
/* This bit signals that core dump has been processed */
core_header->dump_available = 0;
@@ -357,7 +356,7 @@ static void gxp_handle_debug_dump(struct gxp_dev *gxp,
}
out:
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
kfree(segs);
#endif
return;
@@ -430,10 +429,11 @@ static void gxp_wait_kernel_init_dump_work(struct work_struct *work)
void gxp_debug_dump_process_dump(struct work_struct *work)
{
- struct gxp_mailbox *mailbox = container_of(work, struct gxp_mailbox,
- debug_dump_work);
- uint core_id = mailbox->core_id;
- struct gxp_dev *gxp = mailbox->gxp;
+ struct gxp_debug_dump_work *debug_dump_work =
+ container_of(work, struct gxp_debug_dump_work, work);
+
+ uint core_id = debug_dump_work->core_id;
+ struct gxp_dev *gxp = debug_dump_work->gxp;
struct gxp_debug_dump_manager *mgr;
struct gxp_core_dump *core_dump;
struct gxp_core_dump_header *core_dump_header;
@@ -473,6 +473,17 @@ void gxp_debug_dump_process_dump(struct work_struct *work)
}
}
+struct work_struct *gxp_debug_dump_get_notification_handler(struct gxp_dev *gxp,
+ uint core)
+{
+ struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
+
+ if (!mgr)
+ return NULL;
+
+ return &mgr->debug_dump_works[core].work;
+}
+
int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
{
struct resource r;
@@ -513,6 +524,11 @@ int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
for (core = 0; core < GXP_NUM_CORES; core++) {
core_dump_header = &mgr->core_dump->core_dump_header[core];
core_dump_header->core_header.dump_available = 0;
+
+ mgr->debug_dump_works[core].gxp = gxp;
+ mgr->debug_dump_works[core].core_id = core;
+ INIT_WORK(&mgr->debug_dump_works[core].work,
+ gxp_debug_dump_process_dump);
}
/* No need for a DMA handle since the carveout is coherent */
diff --git a/gxp-debug-dump.h b/gxp-debug-dump.h
index cd6bc23..013f27c 100644
--- a/gxp-debug-dump.h
+++ b/gxp-debug-dump.h
@@ -141,8 +141,15 @@ struct gxp_core_dump {
uint32_t dump_data[];
};
+struct gxp_debug_dump_work {
+ struct work_struct work;
+ struct gxp_dev *gxp;
+ uint core_id;
+};
+
struct gxp_debug_dump_manager {
struct gxp_dev *gxp;
+ struct gxp_debug_dump_work debug_dump_works[GXP_NUM_CORES];
struct gxp_core_dump *core_dump; /* start of the core dump */
void *sscd_dev;
void *sscd_pdata;
@@ -160,5 +167,7 @@ struct gxp_debug_dump_manager {
int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata);
void gxp_debug_dump_exit(struct gxp_dev *gxp);
void gxp_debug_dump_process_dump(struct work_struct *work);
+struct work_struct *gxp_debug_dump_get_notification_handler(struct gxp_dev *gxp,
+ uint core);
#endif /* __GXP_DEBUG_DUMP_H__ */
diff --git a/gxp-debugfs.c b/gxp-debugfs.c
index 88b3190..c9ba69c 100644
--- a/gxp-debugfs.c
+++ b/gxp-debugfs.c
@@ -15,6 +15,7 @@
#include "gxp-mailbox.h"
#include "gxp-telemetry.h"
#include "gxp-vd.h"
+#include "gxp-wakelock.h"
static int gxp_debugfs_lpm_test(void *data, u64 val)
{
@@ -55,7 +56,9 @@ static int gxp_debugfs_mailbox(void *data, u64 val)
cmd.buffer_descriptor.size = 0;
cmd.buffer_descriptor.flags = 0;
+ down_read(&gxp->vd_semaphore);
gxp_mailbox_execute_cmd(gxp->mailbox_mgr->mailboxes[core], &cmd, &resp);
+ up_read(&gxp->vd_semaphore);
dev_info(gxp->dev,
"Mailbox Command Sent: cmd.code=%d, resp.status=%d, resp.retval=%d\n",
@@ -93,7 +96,9 @@ static int gxp_debugfs_pingpong(void *data, u64 val)
cmd.buffer_descriptor.size = 0;
cmd.buffer_descriptor.flags = (u32) val;
+ down_read(&gxp->vd_semaphore);
gxp_mailbox_execute_cmd(gxp->mailbox_mgr->mailboxes[core], &cmd, &resp);
+ up_read(&gxp->vd_semaphore);
dev_info(
gxp->dev,
@@ -107,14 +112,21 @@ DEFINE_DEBUGFS_ATTRIBUTE(gxp_pingpong_fops, NULL, gxp_debugfs_pingpong,
static int gxp_firmware_run_set(void *data, u64 val)
{
struct gxp_dev *gxp = (struct gxp_dev *) data;
+ struct gxp_client *client_to_delete;
int ret = 0;
+ down_write(&gxp->vd_semaphore);
+
if (val) {
if (gxp->debugfs_client) {
dev_err(gxp->dev, "Firmware already running!\n");
- return -EIO;
+ ret = -EIO;
+ goto out;
}
+ /* Cannot run firmware without a wakelock */
+ gxp_wakelock_acquire(gxp);
+
/*
* Cleanup any bad state or corruption the device might've
* caused
@@ -127,7 +139,8 @@ static int gxp_firmware_run_set(void *data, u64 val)
dev_err(gxp->dev, "Failed to create client\n");
ret = PTR_ERR(gxp->debugfs_client);
gxp->debugfs_client = NULL;
- return ret;
+ gxp_wakelock_release(gxp);
+ goto out;
}
ret = gxp_vd_allocate(gxp->debugfs_client, GXP_NUM_CORES);
@@ -135,17 +148,30 @@ static int gxp_firmware_run_set(void *data, u64 val)
dev_err(gxp->dev, "Failed to allocate VD\n");
gxp_client_destroy(gxp->debugfs_client);
gxp->debugfs_client = NULL;
- return ret;
+ gxp_wakelock_release(gxp);
+ goto out;
}
} else {
if (!gxp->debugfs_client) {
dev_err(gxp->dev, "Firmware not running!\n");
- return -EIO;
+ ret = -EIO;
+ goto out;
}
- gxp_client_destroy(gxp->debugfs_client);
+ client_to_delete = gxp->debugfs_client;
gxp->debugfs_client = NULL;
+
+ up_write(&gxp->vd_semaphore);
+
+ gxp_client_destroy(client_to_delete);
+ gxp_wakelock_release(gxp);
+
+ /* Return here, since vd_semaphore has already been unlocked */
+ return ret;
}
+out:
+ up_write(&gxp->vd_semaphore);
+
return ret;
}
@@ -160,11 +186,54 @@ static int gxp_firmware_run_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(gxp_firmware_run_fops, gxp_firmware_run_get,
gxp_firmware_run_set, "%llx\n");
+static int gxp_wakelock_set(void *data, u64 val)
+{
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
+ int ret = 0;
+
+ if (val > 0) {
+ /* Wakelock Acquire */
+ if (gxp->debugfs_wakelock_held) {
+ dev_warn(gxp->dev,
+ "Debugfs wakelock is already held.\n");
+ return -EBUSY;
+ }
+
+ ret = gxp_wakelock_acquire(gxp);
+ if (ret)
+ dev_err(gxp->dev,
+ "Failed to acquire debugfs wakelock ret=%d\n",
+ ret);
+ else
+ gxp->debugfs_wakelock_held = true;
+ } else {
+ /* Wakelock Release */
+ if (!gxp->debugfs_wakelock_held) {
+ dev_warn(gxp->dev, "Debugfs wakelock not held.\n");
+ return -EIO;
+ }
+
+ gxp_wakelock_release(gxp);
+ gxp->debugfs_wakelock_held = false;
+ }
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(gxp_wakelock_fops, NULL, gxp_wakelock_set, "%llx\n");
+
static int gxp_blk_powerstate_set(void *data, u64 val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
int ret = 0;
+ if (gxp_pm_get_blk_state(gxp) == AUR_OFF) {
+ dev_warn(
+ gxp->dev,
+ "Cannot set block power state when the block is off. Obtain a wakelock to power it on.\n");
+ return -ENODEV;
+ }
+
if (val >= AUR_DVFS_MIN_STATE) {
ret = gxp_pm_blk_set_state_acpm(gxp, val);
} else {
@@ -178,6 +247,13 @@ static int gxp_blk_powerstate_get(void *data, u64 *val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
+ if (gxp_pm_get_blk_state(gxp) == AUR_OFF) {
+ dev_warn(
+ gxp->dev,
+ "Cannot get block power state when the block is off.\n");
+ return -ENODEV;
+ }
+
*val = gxp_pm_blk_get_state_acpm(gxp);
return 0;
}
@@ -252,12 +328,37 @@ static int gxp_log_buff_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(gxp_log_buff_fops, gxp_log_buff_get, gxp_log_buff_set,
"%llu\n");
+static int gxp_log_eventfd_signal_set(void *data, u64 val)
+{
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
+ int ret = 0;
+
+ mutex_lock(&gxp->telemetry_mgr->lock);
+
+ if (!gxp->telemetry_mgr->logging_efd) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = eventfd_signal(gxp->telemetry_mgr->logging_efd, 1);
+
+out:
+ mutex_unlock(&gxp->telemetry_mgr->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(gxp_log_eventfd_signal_fops, NULL,
+ gxp_log_eventfd_signal_set, "%llu\n");
+
void gxp_create_debugfs(struct gxp_dev *gxp)
{
gxp->d_entry = debugfs_create_dir("gxp", NULL);
if (IS_ERR_OR_NULL(gxp->d_entry))
return;
+ gxp->debugfs_wakelock_held = false;
+
debugfs_create_file("lpm_test", 0200, gxp->d_entry, gxp,
&gxp_lpm_test_fops);
debugfs_create_file("mailbox", 0200, gxp->d_entry, gxp,
@@ -266,11 +367,15 @@ void gxp_create_debugfs(struct gxp_dev *gxp)
&gxp_pingpong_fops);
debugfs_create_file("firmware_run", 0600, gxp->d_entry, gxp,
&gxp_firmware_run_fops);
+ debugfs_create_file("wakelock", 0200, gxp->d_entry, gxp,
+ &gxp_wakelock_fops);
debugfs_create_file("blk_powerstate", 0600, gxp->d_entry, gxp,
&gxp_blk_powerstate_fops);
debugfs_create_file("coredump", 0200, gxp->d_entry, gxp,
&gxp_coredump_fops);
debugfs_create_file("log", 0600, gxp->d_entry, gxp, &gxp_log_buff_fops);
+ debugfs_create_file("log_eventfd", 0200, gxp->d_entry, gxp,
+ &gxp_log_eventfd_signal_fops);
}
void gxp_remove_debugfs(struct gxp_dev *gxp)
diff --git a/gxp-dma-iommu-gem5.c b/gxp-dma-iommu-gem5.c
new file mode 100644
index 0000000..4c691f6
--- /dev/null
+++ b/gxp-dma-iommu-gem5.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP DMA implemented via IOMMU without AUX domain and SSMT support.
+ *
+ * Copyright (C) 2021 Google LLC
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include "gxp-config.h"
+#include "gxp-dma.h"
+#include "gxp-iova.h"
+#include "gxp-mapping.h"
+
+struct gxp_dma_iommu_manager {
+ struct gxp_dma_manager dma_mgr;
+ struct iommu_domain *default_domain;
+};
+
+/* Fault handler */
+
+static int sysmmu_fault_handler(struct iommu_fault *fault, void *token)
+{
+ struct gxp_dev *gxp = (struct gxp_dev *)token;
+
+ switch (fault->type) {
+ case IOMMU_FAULT_DMA_UNRECOV:
+ dev_err(gxp->dev, "Unrecoverable IOMMU fault!\n");
+ break;
+ case IOMMU_FAULT_PAGE_REQ:
+ dev_err(gxp->dev, "IOMMU page request fault!\n");
+ break;
+ default:
+ dev_err(gxp->dev, "Unexpected IOMMU fault type (%d)\n",
+ fault->type);
+ return -EAGAIN;
+ }
+
+ /*
+ * Normally the iommu driver should fill out the `event` struct for
+ * unrecoverable errors, and the `prm` struct for page request faults.
+ * The SysMMU driver, instead, always fills out the `event` struct.
+ *
+ * Note that the `fetch_addr` and `perm` fields are never filled out,
+ * so we skip printing them.
+ */
+ dev_err(gxp->dev, "reason = %08X\n", fault->event.reason);
+ dev_err(gxp->dev, "flags = %08X\n", fault->event.flags);
+ dev_err(gxp->dev, "pasid = %08X\n", fault->event.pasid);
+ dev_err(gxp->dev, "addr = %llX\n", fault->event.addr);
+
+ // Tell the IOMMU driver to carry on
+ return -EAGAIN;
+}
+
+/* gxp-dma.h Interface */
+
+int gxp_dma_init(struct gxp_dev *gxp)
+{
+ struct gxp_dma_iommu_manager *mgr;
+ int ret;
+
+ /* GXP can only address 32-bit IOVAs */
+ ret = dma_set_mask_and_coherent(gxp->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(gxp->dev, "Failed to set DMA mask\n");
+ return ret;
+ }
+
+ mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+
+ mgr->default_domain = iommu_get_domain_for_dev(gxp->dev);
+ if (!mgr->default_domain) {
+ dev_err(gxp->dev, "Failed to find default IOMMU domain\n");
+ return -EIO;
+ }
+
+ if (iommu_register_device_fault_handler(gxp->dev, sysmmu_fault_handler,
+ gxp)) {
+ dev_err(gxp->dev, "Failed to register iommu fault handler\n");
+ return -EIO;
+ }
+
+ gxp->dma_mgr = &(mgr->dma_mgr);
+
+ return 0;
+}
+
+void gxp_dma_exit(struct gxp_dev *gxp)
+{
+ if (iommu_unregister_device_fault_handler(gxp->dev))
+ dev_err(gxp->dev,
+ "Failed to unregister SysMMU fault handler\n");
+}
+
+#define SYNC_BARRIERS_SIZE 0x100000
+#define SYNC_BARRIERS_TOP_OFFSET 0x100000
+#define EXT_TPU_MBX_SIZE 0x2000
+
+/* Offset from mailbox base to the device interface that needs to be mapped */
+#define MAILBOX_DEVICE_INTERFACE_OFFSET 0x10000
+
+int gxp_dma_map_resources(struct gxp_dev *gxp)
+{
+ struct gxp_dma_iommu_manager *mgr = container_of(
+ gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
+ unsigned int core;
+ int ret = 0;
+
+ ret = iommu_map(mgr->default_domain, GXP_IOVA_AURORA_TOP,
+ gxp->regs.paddr, gxp->regs.size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ /*
+ * Firmware expects to access the sync barriers at a separate
+ * address, lower than the rest of the AURORA_TOP registers.
+ */
+ ret = iommu_map(mgr->default_domain, GXP_IOVA_SYNC_BARRIERS,
+ gxp->regs.paddr + SYNC_BARRIERS_TOP_OFFSET,
+ SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ /*
+ * TODO(b/202213606): Map FW regions of all cores in a VD for
+ * each other at VD creation.
+ */
+ ret = iommu_map(mgr->default_domain, GXP_IOVA_FIRMWARE(0),
+ gxp->fwbufs[0].paddr,
+ gxp->fwbufs[0].size * GXP_NUM_CORES,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ ret = iommu_map(mgr->default_domain, GXP_IOVA_CORE_DUMP,
+ gxp->coredumpbuf.paddr, gxp->coredumpbuf.size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ ret = iommu_map(mgr->default_domain, GXP_IOVA_FW_DATA,
+ gxp->fwdatabuf.paddr, gxp->fwdatabuf.size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ ret = iommu_map(mgr->default_domain, GXP_IOVA_MAILBOX(core),
+ gxp->mbx[core].paddr +
+ MAILBOX_DEVICE_INTERFACE_OFFSET,
+ gxp->mbx[core].size, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ /* Only map the TPU mailboxes if they were found on probe */
+ if (gxp->tpu_dev.mbx_paddr) {
+ ret = iommu_map(
+ mgr->default_domain,
+ GXP_IOVA_EXT_TPU_MBX + core * EXT_TPU_MBX_SIZE,
+ gxp->tpu_dev.mbx_paddr +
+ core * EXT_TPU_MBX_SIZE,
+ EXT_TPU_MBX_SIZE, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ }
+ gxp->mbx[core].daddr = GXP_IOVA_MAILBOX(core);
+ gxp->fwbufs[core].daddr = GXP_IOVA_FIRMWARE(core);
+ }
+ gxp->regs.daddr = GXP_IOVA_AURORA_TOP;
+ gxp->coredumpbuf.daddr = GXP_IOVA_CORE_DUMP;
+ gxp->fwdatabuf.daddr = GXP_IOVA_FW_DATA;
+
+ return ret;
+
+err:
+ /*
+ * Attempt to unmap all resources.
+ * Any resource that hadn't been mapped yet will cause `iommu_unmap()`
+ * to return immediately, so its safe to try to unmap everything.
+ */
+ gxp_dma_unmap_resources(gxp);
+ return ret;
+}
+
+void gxp_dma_unmap_resources(struct gxp_dev *gxp)
+{
+ struct gxp_dma_iommu_manager *mgr = container_of(
+ gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
+ unsigned int core;
+
+ iommu_unmap(mgr->default_domain, GXP_IOVA_AURORA_TOP, gxp->regs.size);
+ iommu_unmap(mgr->default_domain, GXP_IOVA_SYNC_BARRIERS,
+ SYNC_BARRIERS_SIZE);
+ /*
+ * TODO(b/202213606): A core should only have access to the FW
+ * of other cores if they're in the same VD, and have the FW
+ * region unmapped on VD destruction.
+ */
+ iommu_unmap(mgr->default_domain, GXP_IOVA_FIRMWARE(0),
+ gxp->fwbufs[0].size * GXP_NUM_CORES);
+ iommu_unmap(mgr->default_domain, GXP_IOVA_CORE_DUMP,
+ gxp->coredumpbuf.size);
+ iommu_unmap(mgr->default_domain, GXP_IOVA_FW_DATA, gxp->fwdatabuf.size);
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ iommu_unmap(mgr->default_domain, GXP_IOVA_MAILBOX(core),
+ gxp->mbx[core].size);
+ /* Only unmap the TPU mailboxes if they were found on probe */
+ if (gxp->tpu_dev.mbx_paddr) {
+ iommu_unmap(mgr->default_domain,
+ GXP_IOVA_EXT_TPU_MBX +
+ core * EXT_TPU_MBX_SIZE,
+ EXT_TPU_MBX_SIZE);
+ }
+ }
+}
+
+static inline struct sg_table *alloc_sgt_for_buffer(void *ptr, size_t size,
+ struct iommu_domain *domain,
+ dma_addr_t daddr)
+{
+ struct sg_table *sgt;
+ ulong offset;
+ uint num_ents;
+ int ret;
+ struct scatterlist *next;
+ size_t size_in_page;
+ struct page *page;
+ void *va_base = ptr;
+
+ /* Calculate the number of entries needed in the table */
+ offset = offset_in_page(va_base);
+ if (unlikely((size + offset) / PAGE_SIZE >= UINT_MAX - 1 ||
+ size + offset < size))
+ return ERR_PTR(-EINVAL);
+ num_ents = (size + offset) / PAGE_SIZE;
+ if ((size + offset) % PAGE_SIZE)
+ num_ents++;
+
+ /* Allocate and setup the table for filling out */
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table(sgt, num_ents, GFP_KERNEL);
+ if (ret) {
+ kfree(sgt);
+ return ERR_PTR(ret);
+ }
+ next = sgt->sgl;
+
+ /*
+ * Fill in the first scatterlist entry.
+ * This is the only one which may start at a non-page-aligned address.
+ */
+ size_in_page = size > (PAGE_SIZE - offset_in_page(ptr)) ?
+ PAGE_SIZE - offset_in_page(ptr) :
+ size;
+ page = phys_to_page(iommu_iova_to_phys(domain, daddr));
+ sg_set_page(next, page, size_in_page, offset_in_page(ptr));
+ size -= size_in_page;
+ ptr += size_in_page;
+ next = sg_next(next);
+
+ while (size > 0) {
+ /*
+ * Fill in and link the next scatterlist entry.
+ * `ptr` is now page-aligned, so it is only necessary to check
+ * if this entire page is part of the buffer, or if the buffer
+ * ends part way through the page (which means this is the last
+ * entry in the list).
+ */
+ size_in_page = size > PAGE_SIZE ? PAGE_SIZE : size;
+ page = phys_to_page(iommu_iova_to_phys(
+ domain, daddr + (unsigned long long)(ptr - va_base)));
+ sg_set_page(next, page, size_in_page, 0);
+
+ size -= size_in_page;
+ ptr += size_in_page;
+ next = sg_next(next);
+ }
+
+ return sgt;
+}
+
+#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
+int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list,
+ struct edgetpu_ext_mailbox_info *mbx_info)
+{
+ struct gxp_dma_iommu_manager *mgr = container_of(
+ gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
+ uint orig_core_list = core_list;
+ u64 queue_iova;
+ int core;
+ int ret;
+ int i = 0;
+
+ while (core_list) {
+ phys_addr_t cmdq_pa = mbx_info->mailboxes[i].cmdq_pa;
+ phys_addr_t respq_pa = mbx_info->mailboxes[i++].respq_pa;
+
+ core = ffs(core_list) - 1;
+ core_list &= ~BIT(core);
+ queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
+ ret = iommu_map(mgr->default_domain, queue_iova, cmdq_pa,
+ mbx_info->cmdq_size, IOMMU_WRITE);
+ if (ret)
+ goto error;
+ ret = iommu_map(mgr->default_domain,
+ queue_iova + mbx_info->cmdq_size, respq_pa,
+ mbx_info->respq_size, IOMMU_READ);
+ if (ret) {
+ iommu_unmap(mgr->default_domain, queue_iova,
+ mbx_info->cmdq_size);
+ goto error;
+ }
+ }
+ return 0;
+
+error:
+ core_list ^= orig_core_list;
+ while (core_list) {
+ core = ffs(core_list) - 1;
+ core_list &= ~BIT(core);
+ queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
+ iommu_unmap(mgr->default_domain, queue_iova,
+ mbx_info->cmdq_size);
+ iommu_unmap(mgr->default_domain,
+ queue_iova + mbx_info->cmdq_size,
+ mbx_info->respq_size);
+ }
+ return ret;
+}
+
+void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_tpu_mbx_desc mbx_desc)
+{
+ struct gxp_dma_iommu_manager *mgr = container_of(
+ gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
+ uint core_list = mbx_desc.phys_core_list;
+ u64 queue_iova;
+ int core;
+
+ while (core_list) {
+ core = ffs(core_list) - 1;
+ core_list &= ~BIT(core);
+ queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
+ iommu_unmap(mgr->default_domain, queue_iova,
+ mbx_desc.cmdq_size);
+ iommu_unmap(mgr->default_domain,
+ queue_iova + mbx_desc.cmdq_size,
+ mbx_desc.respq_size);
+ }
+}
+#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
+
+void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ uint gxp_dma_flags)
+{
+ struct gxp_dma_iommu_manager *mgr = container_of(
+ gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
+ void *buf;
+ struct sg_table *sgt;
+ dma_addr_t daddr;
+
+ size = size < PAGE_SIZE ? PAGE_SIZE : size;
+
+ /* Allocate a coherent buffer in the default domain */
+ buf = dma_alloc_coherent(gxp->dev, size, &daddr, flag);
+ if (!buf) {
+ dev_err(gxp->dev, "Failed to allocate coherent buffer\n");
+ return NULL;
+ }
+
+ if (dma_handle)
+ *dma_handle = daddr;
+
+ return buf;
+}
+
+void gxp_dma_free_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ size = size < PAGE_SIZE ? PAGE_SIZE : size;
+
+ dma_free_coherent(gxp->dev, size, cpu_addr, dma_handle);
+}
+
+dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, uint core_list,
+ void *cpu_addr, size_t size,
+ enum dma_data_direction direction,
+ unsigned long attrs, uint gxp_dma_flags)
+{
+ dma_addr_t daddr;
+
+ daddr = dma_map_single_attrs(gxp->dev, cpu_addr, size, direction,
+ attrs);
+ if (dma_mapping_error(gxp->dev, daddr))
+ return DMA_MAPPING_ERROR;
+
+ return daddr;
+}
+
+void gxp_dma_unmap_single(struct gxp_dev *gxp, uint core_list,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ dma_unmap_single_attrs(gxp->dev, dma_addr, size, direction, attrs);
+}
+
+dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, uint core_list,
+ struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction direction,
+ unsigned long attrs, uint gxp_dma_flags)
+{
+ dma_addr_t daddr;
+
+ daddr = dma_map_page_attrs(gxp->dev, page, offset, size, direction,
+ attrs);
+ if (dma_mapping_error(gxp->dev, daddr))
+ return DMA_MAPPING_ERROR;
+
+ return daddr;
+}
+
+void gxp_dma_unmap_page(struct gxp_dev *gxp, uint core_list,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction, unsigned long attrs)
+{
+ dma_unmap_page_attrs(gxp->dev, dma_addr, size, direction, attrs);
+}
+
+dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp, uint core_list,
+ phys_addr_t phys_addr, size_t size,
+ enum dma_data_direction direction,
+ unsigned long attrs, uint gxp_dma_flags)
+{
+ dma_addr_t daddr;
+
+ daddr = dma_map_resource(gxp->dev, phys_addr, size, direction, attrs);
+ if (dma_mapping_error(gxp->dev, daddr))
+ return DMA_MAPPING_ERROR;
+
+ return daddr;
+}
+
+void gxp_dma_unmap_resource(struct gxp_dev *gxp, uint core_list,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ dma_unmap_resource(gxp->dev, dma_addr, size, direction, attrs);
+}
+
+int gxp_dma_map_sg(struct gxp_dev *gxp, uint core_list, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ unsigned long attrs, uint gxp_dma_flags)
+{
+ return dma_map_sg_attrs(gxp->dev, sg, nents, direction, attrs);
+}
+
+void gxp_dma_unmap_sg(struct gxp_dev *gxp, uint core_list,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction, unsigned long attrs)
+{
+ dma_unmap_sg_attrs(gxp->dev, sg, nents, direction, attrs);
+}
+
+void gxp_dma_sync_single_for_cpu(struct gxp_dev *gxp, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ /* Syncing is not domain specific. Just call through to DMA API */
+ dma_sync_single_for_cpu(gxp->dev, dma_handle, size, direction);
+}
+
+void gxp_dma_sync_single_for_device(struct gxp_dev *gxp, dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ /* Syncing is not domain specific. Just call through to DMA API */
+ dma_sync_single_for_device(gxp->dev, dma_handle, size, direction);
+}
+
+void gxp_dma_sync_sg_for_cpu(struct gxp_dev *gxp, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ /* Syncing is not domain specific. Just call through to DMA API */
+ dma_sync_sg_for_cpu(gxp->dev, sg, nents, direction);
+}
+
+void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ /* Syncing is not domain specific. Just call through to DMA API */
+ dma_sync_sg_for_device(gxp->dev, sg, nents, direction);
+}
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c
index dfb6dcf..05d76be 100644
--- a/gxp-dma-iommu.c
+++ b/gxp-dma-iommu.c
@@ -16,6 +16,7 @@
#include "gxp-dma-iommu.h"
#include "gxp-iova.h"
#include "gxp-mapping.h"
+#include "gxp-pm.h"
struct gxp_dma_iommu_manager {
struct gxp_dma_manager dma_mgr;
@@ -70,6 +71,30 @@ static inline void ssmt_set_vid_for_sid(void __iomem *ssmt, int vid, u8 sid)
writel(vid, (ssmt) + 0x1200u + (0x4u * (sid)));
}
+int gxp_dma_ssmt_program(struct gxp_dev *gxp)
+{
+/* SSMT is not supported in unittests */
+#ifndef CONFIG_GXP_TEST
+ struct gxp_dma_iommu_manager *mgr = container_of(
+ gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
+ unsigned int core;
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ ssmt_set_vid_for_sid(mgr->idma_ssmt_base, mgr->core_vids[core],
+ IDMA_SID_FOR_CORE(core));
+ ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base,
+ mgr->core_vids[core],
+ INST_SID_FOR_CORE(core));
+ ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base,
+ mgr->core_vids[core],
+ DATA_SID_FOR_CORE(core));
+ }
+#endif
+
+ return 0;
+}
+
+
static inline int ssmt_init(struct gxp_dev *gxp,
struct gxp_dma_iommu_manager *mgr)
{
@@ -204,22 +229,6 @@ int gxp_dma_init(struct gxp_dev *gxp)
iommu_aux_get_pasid(mgr->core_domains[core], gxp->dev);
dev_notice(gxp->dev, "SysMMU: core%u assigned vid %d\n", core,
mgr->core_vids[core]);
-/* SSMT is not supported in unittests */
-#ifndef CONFIG_GXP_TEST
- /*
- * TODO(b/194347483) SSMT programming must happen each time
- * BLK_AURORA is powered on, but currently BLK_AURORA is
- * turned on at probe and left on until removal.
- */
- ssmt_set_vid_for_sid(mgr->idma_ssmt_base, mgr->core_vids[core],
- IDMA_SID_FOR_CORE(core));
- ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base,
- mgr->core_vids[core],
- INST_SID_FOR_CORE(core));
- ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base,
- mgr->core_vids[core],
- DATA_SID_FOR_CORE(core));
-#endif
}
gxp->dma_mgr = &(mgr->dma_mgr);
@@ -247,11 +256,20 @@ void gxp_dma_exit(struct gxp_dev *gxp)
gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
unsigned int core;
+ /*
+ * The SysMMU driver writes registers in the SysMMU during
+ * `iommu_aux_detach_device()`, to disable that domain's VID and flush
+ * its TLB. BLK_AUR must be powered on for these writes to succeed.
+ */
+ gxp_pm_blk_on(gxp);
+
for (core = 0; core < GXP_NUM_CORES; core++) {
iommu_aux_detach_device(mgr->core_domains[core], gxp->dev);
iommu_domain_free(mgr->core_domains[core]);
}
+ gxp_pm_blk_off(gxp);
+
if (iommu_unregister_device_fault_handler(gxp->dev))
dev_err(gxp->dev,
"Failed to unregister SysMMU fault handler\n");
@@ -445,7 +463,7 @@ alloc_sgt_for_buffer(void *ptr, size_t size,
return sgt;
}
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list,
struct edgetpu_ext_mailbox_info *mbx_info)
{
@@ -512,7 +530,7 @@ void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
mbx_desc.cmdq_size, mbx_desc.respq_size);
}
}
-#endif // CONFIG_ANDROID
+#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
diff --git a/gxp-dma-rmem.c b/gxp-dma-rmem.c
index 504940d..fc2a2e4 100644
--- a/gxp-dma-rmem.c
+++ b/gxp-dma-rmem.c
@@ -108,6 +108,12 @@ static void bounce_buffer_remove(struct gxp_dma_rmem_manager *mgr,
/* gxp-dma.h Interface */
+int gxp_dma_ssmt_program(struct gxp_dev *gxp)
+{
+ /* NO-OP when using reserved memory with no IOMMU */
+ return 0;
+}
+
int gxp_dma_init(struct gxp_dev *gxp)
{
struct gxp_dma_rmem_manager *mgr;
@@ -181,7 +187,7 @@ void gxp_dma_unmap_resources(struct gxp_dev *gxp)
/* no mappings to undo */
}
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list,
struct edgetpu_ext_mailbox_info *mbx_info)
{
@@ -193,7 +199,7 @@ void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
{
/* no mappings to undo */
}
-#endif // CONFIG_ANDROID
+#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
diff --git a/gxp-dma.h b/gxp-dma.h
index d3dd81e..e8131e7 100644
--- a/gxp-dma.h
+++ b/gxp-dma.h
@@ -10,7 +10,7 @@
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/types.h>
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
#include <soc/google/tpu-ext.h>
#endif
@@ -31,6 +31,27 @@ struct gxp_dma_manager {
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
#endif
+/*
+ * TODO(b/214113464) This is a temporary interface to reprogram the SSMT every
+ * time the block is powered up. It should be replaced with a new interface for
+ * assigning a given virtual device's domain to that virtual device's physical
+ * core once VD suspend/resume is implemented.
+ */
+/**
+ * gxp_dma_ssmt_program() - Program the SSMTs to map each core to its page table.
+ * @gxp: The GXP device to program the SSMTs for
+ *
+ * Every time the DSP block is powered on, the SSMTs must be re-programmed to
+ * map traffic from each physical core to be translated via that core's
+ * assigned page table. This API must be called every time the block is powered
+ * on for DSP usage.
+ *
+ * Return:
+ * * 0 - Success
+ * * Other - Reserved
+ */
+int gxp_dma_ssmt_program(struct gxp_dev *gxp);
+
/**
* gxp_dma_init() - Initialize the GXP DMA subsystem
* @gxp: The GXP device to initialize DMA for
@@ -77,7 +98,7 @@ int gxp_dma_map_resources(struct gxp_dev *gxp);
*/
void gxp_dma_unmap_resources(struct gxp_dev *gxp);
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
/**
* gxp_dma_map_tpu_buffer() - Map the tpu mbx queue buffers with fixed IOVAs
* @gxp: The GXP device to setup the mappings for
@@ -98,7 +119,7 @@ int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list,
*/
void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
struct gxp_tpu_mbx_desc mbx_desc);
-#endif // CONFIG_ANDROID
+#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
/**
* gxp_dma_alloc_coherent() - Allocate and map a coherent buffer for a GXP core
diff --git a/gxp-firmware-data.c b/gxp-firmware-data.c
index f782fcd..2f0c84c 100644
--- a/gxp-firmware-data.c
+++ b/gxp-firmware-data.c
@@ -710,13 +710,14 @@ void gxp_fw_data_destroy(struct gxp_dev *gxp)
}
int gxp_fw_data_set_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
- u32 *buffer_addrs,
+ u32 host_status,
+ dma_addr_t *buffer_addrs,
u32 per_buffer_size)
{
struct gxp_telemetry_descriptor *descriptor =
gxp->data_mgr->telemetry_mem.host_addr;
struct telemetry_descriptor *core_descriptors;
- int i;
+ uint core;
if (type == GXP_TELEMETRY_TYPE_LOGGING)
core_descriptors = descriptor->per_core_loggers;
@@ -725,10 +726,36 @@ int gxp_fw_data_set_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
else
return -EINVAL;
- for (i = 0; i < NUM_CORES; i++) {
- core_descriptors[i].buffer_addr = buffer_addrs[i];
- core_descriptors[i].buffer_size = per_buffer_size;
+ /* Validate that the provided IOVAs are addressable (i.e. 32-bit) */
+ for (core = 0; core < NUM_CORES; core++) {
+ if (buffer_addrs[core] > U32_MAX)
+ return -EINVAL;
+ }
+
+ for (core = 0; core < NUM_CORES; core++) {
+ core_descriptors[core].host_status = host_status;
+ core_descriptors[core].buffer_addr = (u32)buffer_addrs[core];
+ core_descriptors[core].buffer_size = per_buffer_size;
}
return 0;
}
+
+u32 gxp_fw_data_get_telemetry_device_status(struct gxp_dev *gxp, uint core,
+ u8 type)
+{
+ struct gxp_telemetry_descriptor *descriptor =
+ gxp->data_mgr->telemetry_mem.host_addr;
+
+ if (core >= GXP_NUM_CORES)
+ return 0;
+
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ return descriptor->per_core_loggers[core].device_status;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ return descriptor->per_core_tracers[core].device_status;
+ default:
+ return 0;
+ }
+}
diff --git a/gxp-firmware-data.h b/gxp-firmware-data.h
index 1306303..e9851ed 100644
--- a/gxp-firmware-data.h
+++ b/gxp-firmware-data.h
@@ -58,18 +58,38 @@ void gxp_fw_data_destroy(struct gxp_dev *gxp);
* for firmware to write to.
* @gxp: The GXP device to set buffer descriptors for
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ * @host_status: Bitfield describing the host's telemetry status. See the
+ * bit definitions in gxp-host-device-structs.h.
* @buffer_addrs: An array containing the IOVA each physical core can access
* its logging or tracing buffer at
* @per_buffer_size: The size of each core's logging or tracing buffer in bytes
*
* `gxp_fw_data_init()` must have been called before this function.
*
+ * Caller must hold gxp->telemetry_mgr's lock.
+ *
* Return:
* 0 - Success
- * -EINVAL - Invalid @type provided
+ * -EINVAL - Invalid @type provided or @buffer_addrs are not addressable by @gxp
*/
int gxp_fw_data_set_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
- u32 *buffer_addrs,
+ u32 host_status,
+ dma_addr_t *buffer_addrs,
u32 per_buffer_size);
+/**
+ * gxp_fw_data_get_telemetry_device_status() - Returns a bitfield describing a
+ * core's telemetry status.
+ * @gxp: The GXP device to get device telemetry status for
+ * @core: The core in @gxp to get the device telemetry status for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Caller must hold gxp->telemetry_mgr's lock.
+ *
+ * Return: The bitfield describing @core's telemetry status. If @core or @type
+ * are invalid, the result will always be 0.
+ */
+u32 gxp_fw_data_get_telemetry_device_status(struct gxp_dev *gxp, uint core,
+ u8 type);
+
#endif /* __GXP_FIRMWARE_DATA_H__ */
diff --git a/gxp-firmware.c b/gxp-firmware.c
index 89c0354..1557696 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -21,6 +21,9 @@
#include "gxp-internal.h"
#include "gxp-lpm.h"
#include "gxp-mailbox.h"
+#include "gxp-notification.h"
+#include "gxp-pm.h"
+#include "gxp-telemetry.h"
#include "gxp-tmp.h"
/* TODO (b/176984045): Clean up gxp-firmware.c */
@@ -125,6 +128,9 @@ static int elf_load_segments(struct gxp_dev *gxp, const struct firmware *fw,
return ret;
}
+/* Forward declaration for usage inside gxp_firmware_load(..). */
+static void gxp_firmware_unload(struct gxp_dev *gxp, uint core);
+
static int gxp_firmware_load(struct gxp_dev *gxp, uint core)
{
u32 reset_vec, offset;
@@ -163,14 +169,15 @@ static int gxp_firmware_load(struct gxp_dev *gxp, uint core)
gxp->fwbufs[core].size, MEMREMAP_WC);
if (!(gxp->fwbufs[core].vaddr)) {
dev_err(gxp->dev, "FW buf memremap failed\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_firmware_unload;
}
/* Load firmware to System RAM */
ret = elf_load_segments(gxp, fw[core], core);
if (ret) {
dev_err(gxp->dev, "Unable to load elf file\n");
- return ret;
+ goto out_firmware_unload;
}
memset(gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF, 0,
@@ -203,6 +210,10 @@ static int gxp_firmware_load(struct gxp_dev *gxp, uint core)
gxp_bpm_configure(gxp, core, DATA_BPM_OFFSET, BPM_EVENT_WRITE_XFER);
return 0;
+
+out_firmware_unload:
+ gxp_firmware_unload(gxp, core);
+ return ret;
}
static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
@@ -270,6 +281,7 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
dev_notice(gxp->dev, "Core %u is alive!\n", core);
}
+#ifndef CONFIG_GXP_GEM5
/*
* Currently, the hello_world FW reads the INT_MASK0 register
* (written by the driver) to validate TOP access. The value
@@ -277,6 +289,9 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
* the scratchpad space, which must be compared to the value
* written in the INT_MASK0 register by the driver for
* confirmation.
+ * On Gem5, FW will start early when lpm is up. This behavior will
+ * affect the order of reading/writing INT_MASK0, so ignore this
+ * handshaking in Gem5.
*/
/* TODO (b/182528386): Fix handshake for verifying TOP access */
offset = SCRATCHPAD_MSG_OFFSET(MSG_TOP_ACCESS_OK);
@@ -291,6 +306,7 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
dev_notice(gxp->dev, "TOP access from core %u successful!\n",
core);
}
+#endif // !CONFIG_GXP_GEM5
/* Stop bus performance monitors */
gxp_bpm_stop(gxp, core);
@@ -324,6 +340,9 @@ int gxp_fw_init(struct gxp_dev *gxp)
aurora_base = gxp->regs.vaddr;
+ /* Power on BLK_AUR to read the revision and processor ID registers */
+ gxp_pm_blk_on(gxp);
+
ver = gxp_read_32(gxp, GXP_REG_AURORA_REVISION);
dev_notice(gxp->dev, "Aurora version: 0x%x\n", ver);
@@ -333,6 +352,9 @@ int gxp_fw_init(struct gxp_dev *gxp)
core, proc_id);
}
+ /* Shut BLK_AUR down again to avoid interfering with power management */
+ gxp_pm_blk_off(gxp);
+
ret = gxp_acquire_rmem_resource(gxp, &r, "gxp-fw-region");
if (ret) {
dev_err(gxp->dev,
@@ -365,18 +387,23 @@ int gxp_fw_init(struct gxp_dev *gxp)
*/
gxp->firmware_running = 0;
- gxp_lpm_init(gxp);
return 0;
}
void gxp_fw_destroy(struct gxp_dev *gxp)
{
- gxp_lpm_destroy(gxp);
+ /* NO-OP for now. */
+ /*
+ * TODO(b/214124218): Revisit if the firmware subsystem still needs a
+ * "destroy" method now that power management is decoupled from the
+ * firmware subsystem's lifecycle.
+ */
}
int gxp_firmware_run(struct gxp_dev *gxp, uint core)
{
int ret = 0;
+ struct work_struct *work;
if (gxp->firmware_running & BIT(core)) {
dev_err(gxp->dev, "Firmware is already running on core %u\n",
@@ -390,7 +417,8 @@ int gxp_firmware_run(struct gxp_dev *gxp, uint core)
return ret;
}
- ret = gxp_lpm_up(gxp, core);
+ gxp_doorbell_set_listening_core(gxp, CORE_WAKEUP_DOORBELL, core);
+ ret = gxp_pm_core_on(gxp, core);
if (ret) {
dev_err(gxp->dev, "Failed to power up core %u\n", core);
goto out_firmware_unload;
@@ -400,7 +428,7 @@ int gxp_firmware_run(struct gxp_dev *gxp, uint core)
if (ret) {
dev_err(gxp->dev, "Firmware handshake failed on core %u\n",
core);
- /* TODO (b/176984045): Undo gxp_lpm_up() */
+ gxp_pm_core_off(gxp, core);
goto out_firmware_unload;
}
@@ -420,15 +448,16 @@ int gxp_firmware_run(struct gxp_dev *gxp, uint core)
goto out_firmware_unload;
}
- gxp_mailbox_register_debug_handler(gxp->mailbox_mgr->mailboxes[core],
- gxp_debug_dump_process_dump,
- GXP_DEBUG_DUMP_INT_MASK);
+ work = gxp_debug_dump_get_notification_handler(gxp, core);
+ if (work)
+ gxp_notification_register_handler(
+ gxp, core, HOST_NOTIF_DEBUG_DUMP_READY, work);
+
+ work = gxp_telemetry_get_notification_handler(gxp, core);
+ if (work)
+ gxp_notification_register_handler(
+ gxp, core, HOST_NOTIF_TELEMETRY_STATUS, work);
- /*
- * Wait until the FW consumes the new mailbox register values before
- * allowing messages to be sent thus manipulating the mailbox pointers.
- */
- msleep(25 * GXP_TIME_DELAY_FACTOR);
gxp->firmware_running |= BIT(core);
return ret;
@@ -445,10 +474,15 @@ void gxp_firmware_stop(struct gxp_dev *gxp, uint core)
gxp->firmware_running &= ~BIT(core);
+ gxp_notification_unregister_handler(gxp, core,
+ HOST_NOTIF_DEBUG_DUMP_READY);
+ gxp_notification_unregister_handler(gxp, core,
+ HOST_NOTIF_TELEMETRY_STATUS);
+
gxp_mailbox_release(gxp->mailbox_mgr,
gxp->mailbox_mgr->mailboxes[core]);
dev_notice(gxp->dev, "Mailbox %u released\n", core);
- gxp_lpm_down(gxp, core);
+ gxp_pm_core_off(gxp, core);
gxp_firmware_unload(gxp, core);
}
diff --git a/gxp-hw-mailbox-driver.c b/gxp-hw-mailbox-driver.c
index 3019897..4858585 100644
--- a/gxp-hw-mailbox-driver.c
+++ b/gxp-hw-mailbox-driver.c
@@ -45,6 +45,8 @@ static irqreturn_t mailbox_irq_handler(int irq, void *arg)
{
u32 masked_status;
struct gxp_mailbox *mailbox = (struct gxp_mailbox *) arg;
+ struct work_struct **handlers = mailbox->interrupt_handlers;
+ u32 next_int;
/* Contains only the non-masked, pending interrupt bits */
masked_status = gxp_mailbox_get_host_mask_status(mailbox);
@@ -57,15 +59,17 @@ static irqreturn_t mailbox_irq_handler(int irq, void *arg)
masked_status &= ~MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK;
}
- if (masked_status & mailbox->debug_dump_int_mask) {
- mailbox->handle_debug_dump_irq(mailbox);
- masked_status &= ~mailbox->debug_dump_int_mask;
- }
+ while ((next_int = ffs(masked_status))) {
+ next_int--; /* ffs returns 1-based indices */
+ masked_status &= ~BIT(next_int);
- if (masked_status)
- pr_err_ratelimited(
- "mailbox%d: received unknown interrupt bits 0x%x\n",
- mailbox->core_id, masked_status);
+ if (handlers[next_int])
+ schedule_work(handlers[next_int]);
+ else
+ pr_err_ratelimited(
+ "mailbox%d: received unknown interrupt bit 0x%X\n",
+ mailbox->core_id, next_int);
+ }
return IRQ_HANDLED;
}
@@ -109,13 +113,11 @@ static void unregister_irq(struct gxp_mailbox *mailbox)
void gxp_mailbox_driver_init(struct gxp_mailbox *mailbox)
{
register_irq(mailbox);
- return;
}
void gxp_mailbox_driver_exit(struct gxp_mailbox *mailbox)
{
unregister_irq(mailbox);
- return;
}
void __iomem *gxp_mailbox_get_csr_base(struct gxp_dev *gxp, uint index)
diff --git a/gxp-internal.h b/gxp-internal.h
index 0034800..c5ee20d 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -63,6 +63,8 @@ struct gxp_dma_manager;
struct gxp_fw_data_manager;
struct gxp_power_manager;
struct gxp_telemetry_manager;
+struct gxp_thermal_manager;
+struct gxp_wakelock_manager;
struct gxp_dev {
struct device *dev; /* platform bus device */
@@ -85,13 +87,30 @@ struct gxp_dev {
struct gxp_debug_dump_manager *debug_dump_mgr;
struct gxp_mapping_root *mappings; /* tree of user mappings */
u32 firmware_running; /* firmware status bitmap */
- struct mutex vd_lock; /* synchronizes vd operations */
+ /*
+ * Reader/writer lock protecting usage of virtual cores assigned to
+ * physical cores.
+ * A writer is any function creating or destroying a virtual core, or
+ * running or stopping one on a physical core.
+ * A reader is any function making use of or interacting with a virtual
+ * core without starting or stopping it on a physical core.
+ */
+ /*
+ * TODO(b/216862052) vd_semaphore also currently protects client state.
+ * A separate per-client lock should be introduced
+ * instead, as part of support for creating VDs
+ * without running them on physical cores.
+ */
+ struct rw_semaphore vd_semaphore;
struct gxp_client *core_to_client[GXP_NUM_CORES];
struct gxp_client *debugfs_client;
+ bool debugfs_wakelock_held;
+ struct gxp_thermal_manager *thermal_mgr;
struct gxp_dma_manager *dma_mgr;
struct gxp_fw_data_manager *data_mgr;
struct gxp_tpu_dev tpu_dev;
struct gxp_telemetry_manager *telemetry_mgr;
+ struct gxp_wakelock_manager *wakelock_mgr;
};
/* GXP device IO functions */
diff --git a/gxp-lpm.c b/gxp-lpm.c
index ff42320..aaf37e1 100644
--- a/gxp-lpm.c
+++ b/gxp-lpm.c
@@ -152,32 +152,6 @@ static int psm_enable(struct gxp_dev *gxp, uint psm)
void gxp_lpm_init(struct gxp_dev *gxp)
{
- u32 val;
-
- /*
- * Some LPM signals are not looped back in the current FPGA
- * implementation, causing the PSM to time out waiting for a handshake
- * signal from the host.
- * TODO: This is to be fixed in the next version of FPGA build
- * WORKAROUND: Patch LPM instruction to bypass the timeout for now
- * FIXME: The patch is only for ML2.5 build, and is incompatible to
- * other builds
- */
- val = lpm_read_32(gxp, LPM_INSTRUCTION_OFFSET);
- val &= (~LPM_INSTRUCTION_MASK);
- lpm_write_32(gxp, LPM_INSTRUCTION_OFFSET, val);
-
- /* Local Access Path should not be enabled */
-#if 0
- /*
- * Enable CNOC to DNOC path in Provino for direct TOP access from Q7
- * cores.
- */
- val = gxp_read_32(gxp, PROVINO_IXBAR1_ARL_CTRL);
- val |= PROVINO_IXBAR1_ARL_EN;
- gxp_write_32(gxp, PROVINO_IXBAR1_ARL_CTRL, val);
-#endif
-
/* Enable Top PSM */
dev_notice(gxp->dev, "Enabling Top PSM...\n");
if (psm_enable(gxp, LPM_TOP_PSM)) {
@@ -237,8 +211,12 @@ void gxp_lpm_down(struct gxp_dev *gxp, uint core)
gxp_doorbell_set(gxp, CORE_WAKEUP_DOORBELL);
msleep(25 * GXP_TIME_DELAY_FACTOR);
- /* Reset doorbell mask */
+ /*
+ * Clear the core's interrupt mask and the wakeup doorbell to ensure
+ * the core will not wake unexpectedly.
+ */
gxp_write_32_core(gxp, core, GXP_REG_COMMON_INT_MASK_0, 0);
+ gxp_doorbell_clear(gxp, CORE_WAKEUP_DOORBELL);
/* Ensure core is in PS2 */
set_state(gxp, core, LPM_PG_W_RET_STATE);
diff --git a/gxp-lpm.h b/gxp-lpm.h
index 8ffbb0e..a1b891a 100644
--- a/gxp-lpm.h
+++ b/gxp-lpm.h
@@ -30,7 +30,11 @@ enum lpm_state {
#define LPM_INSTRUCTION_OFFSET 0x00000944
#define LPM_INSTRUCTION_MASK 0x03000000
-#define LPM_TOP_PSM 4
+/*
+ * The TOP PSM comes immediately after the last PSM of core, so define its PSM
+ * number in terms of the number of cores.
+ */
+#define LPM_TOP_PSM GXP_NUM_CORES
#define LPM_HW_MODE 0
#define LPM_SW_PSM_MODE 1
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index 549a6e1..ad3c688 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -187,8 +187,6 @@ struct gxp_mailbox_manager *gxp_mailbox_create_manager(struct gxp_dev *gxp,
if (!mgr->mailboxes)
return ERR_PTR(-ENOMEM);
- rwlock_init(&mgr->mailboxes_lock);
-
return mgr;
}
@@ -384,12 +382,6 @@ static inline void gxp_mailbox_handle_irq(struct gxp_mailbox *mailbox)
queue_work(mailbox->response_wq, &mailbox->response_work);
}
-static inline void
-gxp_mailbox_handle_debug_dump_irq(struct gxp_mailbox *mailbox)
-{
- schedule_work(&mailbox->debug_dump_work);
-}
-
#define _RESPONSE_WORKQUEUE_NAME(_x_) "gxp_responses_" #_x_
#define RESPONSE_WORKQUEUE_NAME(_x_) _RESPONSE_WORKQUEUE_NAME(_x_)
static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
@@ -500,52 +492,20 @@ struct gxp_mailbox *gxp_mailbox_alloc(struct gxp_mailbox_manager *mgr,
u8 core_id)
{
struct gxp_mailbox *mailbox;
- unsigned long flags;
- /* Allocate a mailbox before locking */
mailbox = create_mailbox(mgr, core_id);
if (IS_ERR(mailbox))
return mailbox;
- write_lock_irqsave(&mgr->mailboxes_lock, flags);
-
- if (mgr->mailboxes[core_id])
- goto busy;
- else
- mgr->mailboxes[core_id] = mailbox;
-
- write_unlock_irqrestore(&mgr->mailboxes_lock, flags);
-
- /* Once we've confirmed the mailbox will be used, enable it */
enable_mailbox(mailbox);
return mailbox;
-
-busy:
- write_unlock_irqrestore(&mgr->mailboxes_lock, flags);
-
- gxp_dma_free_coherent(
- mailbox->gxp, BIT(mailbox->core_id),
- sizeof(struct gxp_command) * mailbox->cmd_queue_size,
- mailbox->cmd_queue, mailbox->cmd_queue_device_addr);
- gxp_dma_free_coherent(
- mailbox->gxp, BIT(mailbox->core_id),
- sizeof(struct gxp_response) * mailbox->resp_queue_size,
- mailbox->resp_queue, mailbox->resp_queue_device_addr);
- gxp_dma_free_coherent(mailbox->gxp, BIT(mailbox->core_id),
- sizeof(struct gxp_mailbox_descriptor),
- mailbox->descriptor,
- mailbox->descriptor_device_addr);
- destroy_workqueue(mailbox->response_wq);
- kfree(mailbox);
-
- return ERR_PTR(-EBUSY);
}
void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
struct gxp_mailbox *mailbox)
{
- unsigned long flags;
+ int i;
if (!mailbox) {
dev_err(mgr->gxp->dev,
@@ -553,21 +513,31 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
return;
}
- /* Halt the mailbox driver */
+ /*
+ * Halt the mailbox driver.
+ * This must happen before the mailbox itself is cleaned-up/released
+ * to make sure the mailbox does not disappear out from under the
+ * mailbox driver. This also halts all incoming responses/interrupts.
+ */
gxp_mailbox_driver_exit(mailbox);
- /* TODO(b/189018271) Mailbox locking is broken */
- write_lock_irqsave(&mgr->mailboxes_lock, flags);
-
/* Halt and flush any traffic */
cancel_work_sync(&mailbox->response_work);
- cancel_work_sync(&mailbox->debug_dump_work);
+ for (i = 0; i < GXP_MAILBOX_INT_BIT_COUNT; i++) {
+ if (mailbox->interrupt_handlers[i])
+ cancel_work_sync(mailbox->interrupt_handlers[i]);
+ }
/* Reset the mailbox HW */
gxp_mailbox_reset_hw(mailbox);
- mgr->mailboxes[mailbox->core_id] = NULL;
- write_unlock_irqrestore(&mgr->mailboxes_lock, flags);
+ /*
+ * At this point all users of the mailbox have been halted or are
+ * waiting on gxp->vd_semaphore, which this function's caller has
+ * locked for writing.
+ * It is now safe to clear the manager's mailbox pointer.
+ */
+ mgr->mailboxes[mailbox->core_id] = NULL;
/* Clean up resources */
gxp_dma_free_coherent(
@@ -756,6 +726,7 @@ static void async_cmd_timeout_work(struct work_struct *work)
*/
spin_lock_irqsave(async_resp->dest_queue_lock, flags);
if (async_resp->dest_queue) {
+ async_resp->resp.status = GXP_RESP_CANCELLED;
list_add_tail(&async_resp->list_entry, async_resp->dest_queue);
spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
wake_up(async_resp->dest_queue_waitq);
@@ -799,13 +770,27 @@ err_free_resp:
return ret;
}
-void gxp_mailbox_register_debug_handler(struct gxp_mailbox *mailbox,
- void (*debug_dump_process)
- (struct work_struct *work),
- u32 debug_dump_int_mask)
+int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
+ u32 int_bit,
+ struct work_struct *handler)
{
- mailbox->handle_debug_dump_irq = gxp_mailbox_handle_debug_dump_irq;
- mailbox->debug_dump_int_mask = debug_dump_int_mask;
+ /* Bit 0 is reserved for incoming mailbox responses */
+ if (int_bit == 0 || int_bit >= GXP_MAILBOX_INT_BIT_COUNT)
+ return -EINVAL;
+
+ mailbox->interrupt_handlers[int_bit] = handler;
+
+ return 0;
+}
- INIT_WORK(&mailbox->debug_dump_work, debug_dump_process);
+int gxp_mailbox_unregister_interrupt_handler(struct gxp_mailbox *mailbox,
+ u32 int_bit)
+{
+ /* Bit 0 is reserved for incoming mailbox responses */
+ if (int_bit == 0 || int_bit >= GXP_MAILBOX_INT_BIT_COUNT)
+ return -EINVAL;
+
+ mailbox->interrupt_handlers[int_bit] = NULL;
+
+ return 0;
}
diff --git a/gxp-mailbox.h b/gxp-mailbox.h
index d2cfee4..06c9718 100644
--- a/gxp-mailbox.h
+++ b/gxp-mailbox.h
@@ -102,6 +102,8 @@ struct gxp_mailbox_descriptor {
u32 resp_queue_size;
};
+#define GXP_MAILBOX_INT_BIT_COUNT 16
+
struct gxp_mailbox {
uint core_id;
struct gxp_dev *gxp;
@@ -109,9 +111,11 @@ struct gxp_mailbox {
void __iomem *data_reg_base;
void (*handle_irq)(struct gxp_mailbox *mailbox);
- void (*handle_debug_dump_irq)(struct gxp_mailbox *mailbox);
+ struct work_struct *interrupt_handlers[GXP_MAILBOX_INT_BIT_COUNT];
unsigned int interrupt_virq;
- u32 debug_dump_int_mask;
+ struct task_struct *to_host_poll_task;
+ /* Protects to_host_poll_task while it holds a sync barrier */
+ struct mutex polling_lock;
u64 cur_seq;
@@ -137,8 +141,6 @@ struct gxp_mailbox {
wait_queue_head_t wait_list_waitq;
struct workqueue_struct *response_wq;
struct work_struct response_work;
- struct work_struct debug_dump_work;
- struct task_struct *to_host_poll_task;
};
typedef void __iomem *(*get_mailbox_base_t)(struct gxp_dev *gxp, uint index);
@@ -146,7 +148,6 @@ typedef void __iomem *(*get_mailbox_base_t)(struct gxp_dev *gxp, uint index);
struct gxp_mailbox_manager {
struct gxp_dev *gxp;
u8 num_cores;
- rwlock_t mailboxes_lock;
struct gxp_mailbox **mailboxes;
get_mailbox_base_t get_mailbox_csr_base;
get_mailbox_base_t get_mailbox_data_base;
@@ -157,6 +158,11 @@ struct gxp_mailbox_manager {
struct gxp_mailbox_manager *gxp_mailbox_create_manager(struct gxp_dev *gxp,
uint num_cores);
+/*
+ * The following functions all require their caller have locked
+ * gxp->vd_semaphore for reading.
+ */
+
struct gxp_mailbox *gxp_mailbox_alloc(struct gxp_mailbox_manager *mgr,
u8 core_id);
void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
@@ -173,9 +179,11 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
spinlock_t *queue_lock,
wait_queue_head_t *queue_waitq);
-void gxp_mailbox_register_debug_handler(struct gxp_mailbox *mailbox,
- void (*debug_dump_process)
- (struct work_struct *work),
- u32 debug_dump_int_mask);
+int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
+ u32 int_bit,
+ struct work_struct *handler);
+
+int gxp_mailbox_unregister_interrupt_handler(struct gxp_mailbox *mailbox,
+ u32 int_bit);
#endif /* __GXP_MAILBOX_H__ */
diff --git a/gxp-mb-notification.c b/gxp-mb-notification.c
new file mode 100644
index 0000000..3198984
--- /dev/null
+++ b/gxp-mb-notification.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP notification implementation on top of the mailbox driver.
+ *
+ * Copyright (C) 2021 Google LLC
+ */
+
+#include <linux/bitops.h>
+
+#include "gxp-mailbox.h"
+#include "gxp-mailbox-driver.h"
+#include "gxp-notification.h"
+
+int gxp_notification_register_handler(struct gxp_dev *gxp, uint core,
+ enum gxp_notification_to_host_type type,
+ struct work_struct *handler)
+{
+ struct gxp_mailbox *mailbox;
+
+ if (core >= GXP_NUM_CORES || type >= HOST_NOTIF_MAX)
+ return -EINVAL;
+
+ mailbox = gxp->mailbox_mgr->mailboxes[core];
+ if (!mailbox)
+ return -ENODEV;
+
+ return gxp_mailbox_register_interrupt_handler(mailbox, type, handler);
+}
+
+int gxp_notification_unregister_handler(struct gxp_dev *gxp, uint core,
+ enum gxp_notification_to_host_type type)
+{
+ struct gxp_mailbox *mailbox;
+
+ if (core >= GXP_NUM_CORES || type >= HOST_NOTIF_MAX)
+ return -EINVAL;
+
+ mailbox = gxp->mailbox_mgr->mailboxes[core];
+ if (!mailbox)
+ return-ENODEV;
+
+ return gxp_mailbox_unregister_interrupt_handler(mailbox, type);
+}
+
+int gxp_notification_send(struct gxp_dev *gxp, uint core,
+ enum gxp_notification_to_core_type type)
+{
+ struct gxp_mailbox *mailbox;
+
+ /*
+ * The mailbox submodule handles outgoing command interrupts directly
+ * so `CORE_NOTIF_MAILBOX_COMMAND` is not a valid input for this
+ * implementation.
+ */
+ if (core >= GXP_NUM_CORES || type == CORE_NOTIF_MAILBOX_COMMAND ||
+ type >= GXP_MAILBOX_INT_BIT_COUNT || type >= CORE_NOTIF_MAX)
+ return -EINVAL;
+
+ mailbox = gxp->mailbox_mgr->mailboxes[core];
+ if (!mailbox)
+ return -ENODEV;
+
+ gxp_mailbox_generate_device_interrupt(mailbox, BIT(type));
+
+ return 0;
+}
diff --git a/gxp-notification.h b/gxp-notification.h
new file mode 100644
index 0000000..1c18a30
--- /dev/null
+++ b/gxp-notification.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GXP notification interface.
+ *
+ * Copyright (C) 2021 Google LLC
+ */
+#ifndef __GXP_NOTIFICATION_H__
+#define __GXP_NOTIFICATION_H__
+
+#include <linux/workqueue.h>
+
+#include "gxp-internal.h"
+
+enum gxp_notification_to_host_type {
+ HOST_NOTIF_MAILBOX_RESPONSE = 0,
+ HOST_NOTIF_DEBUG_DUMP_READY = 1,
+ HOST_NOTIF_TELEMETRY_STATUS = 2,
+ HOST_NOTIF_MAX
+};
+
+enum gxp_notification_to_core_type {
+ CORE_NOTIF_MAILBOX_COMMAND = 0,
+ CORE_NOTIF_GENERATE_DEBUG_DUMP = 1,
+ CORE_NOTIF_TELEMETRY_STATUS = 2,
+ CORE_NOTIF_MAX
+};
+
+/**
+ * gxp_notification_register_handler() - Register a work_struct to be called
+ * when the specified @core sends a
+ * notification of the specified @type.
+ * @gxp: The GXP device to register the handler for
+ * @core: The core inside the GXP device to receive notifications from
+ * @type: The `gxp_notification_to_host_type` of notification to handle
+ * @handler: A callback to be invoked via `schedule_work()` when a notification
+ * of @type arrives.
+ *
+ * This function requires the specified @core has its firmware loaded and
+ * initialized before this function is called.
+ *
+ * If the callback requires additional context, such as the core number or a
+ * pointer to @gxp, the caller should allocate @handler as part of wrapper
+ * struct containing any context, then obtain that wrapping struct with
+ * `container_of()` inside the handler's callback.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The specified @core or @type is not valid
+ * * -ENODEV - The specified @core is not running firmware
+ */
+int gxp_notification_register_handler(struct gxp_dev *gxp, uint core,
+ enum gxp_notification_to_host_type type,
+ struct work_struct *handler);
+
+/**
+ * gxp_notification_unregister_handler() - Unregister the work for handling
+ * notifications of type @type from core
+ * @core.
+ * @gxp: The GXP device to unregister the handler for
+ * @core: The core inside the GXP device to remove the notifications handler for
+ * @type: The `gxp_notification_to_host_type` of notification to unregister
+ *
+ * This function requires the specified @core has its firmware loaded and
+ * initialized before this function is called.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The specified @core or @type is not valid
+ * * -ENODEV - The specified @core is not running firmware
+ */
+int gxp_notification_unregister_handler(
+ struct gxp_dev *gxp, uint core,
+ enum gxp_notification_to_host_type type);
+
+/**
+ * gxp_notification_send() - Send a notification of @type to @core.
+ * @gxp: The GXP device to send a notification to
+ * @core: The core inside the GXP device to route the notification to
+ * @type:The `gxp_notification_to_core_type` of notification to send
+ *
+ * This function requires the specified @core has its firmware loaded and
+ * initialized before this function is called.
+ *
+ * The caller must also hold gxp->vd_semaphore for reading, to ensure firmware
+ * continues running until this call completes.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The specified @core or @type is not valid
+ * * -ENODEV - The specified @core is not running firmware
+ */
+int gxp_notification_send(struct gxp_dev *gxp, uint core,
+ enum gxp_notification_to_core_type type);
+
+#endif /* __GXP_NOTIFICATION_H__ */
diff --git a/gxp-platform.c b/gxp-platform.c
index df7ab49..96a3a76 100644
--- a/gxp-platform.c
+++ b/gxp-platform.c
@@ -5,7 +5,7 @@
* Copyright (C) 2021 Google LLC
*/
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
#include <linux/platform_data/sscoredump.h>
#endif
@@ -22,7 +22,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
#include <soc/google/tpu-ext.h>
#endif
@@ -38,9 +38,11 @@
#include "gxp-mapping.h"
#include "gxp-pm.h"
#include "gxp-telemetry.h"
+#include "gxp-thermal.h"
#include "gxp-vd.h"
+#include "gxp-wakelock.h"
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
static struct sscd_platform_data gxp_sscd_pdata;
static void gxp_sscd_release(struct device *dev)
@@ -57,31 +59,51 @@ static struct platform_device gxp_sscd_dev = {
.release = gxp_sscd_release,
},
};
-#endif // CONFIG_ANDROID
+#endif // CONFIG_SUBSYSTEM_COREDUMP
static int gxp_open(struct inode *inode, struct file *file)
{
struct gxp_client *client;
struct gxp_dev *gxp = container_of(file->private_data, struct gxp_dev,
misc_dev);
+ int ret = 0;
client = gxp_client_create(gxp);
if (IS_ERR(client))
return PTR_ERR(client);
file->private_data = client;
- return 0;
+
+ ret = gxp_wakelock_acquire(gxp);
+ if (ret) {
+ gxp_client_destroy(client);
+ file->private_data = NULL;
+ }
+
+ return ret;
}
static int gxp_release(struct inode *inode, struct file *file)
{
struct gxp_client *client = file->private_data;
+ struct gxp_dev *gxp;
+
+ /*
+ * If open failed and no client was created then no clean-up is needed.
+ */
+ if (!client)
+ return 0;
+
+ gxp = client->gxp;
/*
* TODO (b/184572070): Unmap buffers and drop mailbox responses
* belonging to the client
*/
gxp_client_destroy(client);
+
+ gxp_wakelock_release(gxp);
+
return 0;
}
@@ -263,10 +285,12 @@ static int gxp_mailbox_command(struct gxp_client *client,
cmd.priority = 0; /* currently unused */
cmd.buffer_descriptor = buffer;
+ down_read(&gxp->vd_semaphore);
ret = gxp_mailbox_execute_cmd_async(
gxp->mailbox_mgr->mailboxes[phys_core], &cmd,
&gxp->mailbox_resp_queues[phys_core], &gxp->mailbox_resps_lock,
&gxp->mailbox_resp_waitqs[phys_core]);
+ up_read(&gxp->vd_semaphore);
if (ret) {
dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
ret);
@@ -411,15 +435,15 @@ static int gxp_allocate_vd(struct gxp_client *client,
return -EINVAL;
}
- mutex_lock(&gxp->vd_lock);
+ down_write(&gxp->vd_semaphore);
if (client->vd_allocated) {
- mutex_unlock(&gxp->vd_lock);
+ up_write(&gxp->vd_semaphore);
dev_err(gxp->dev, "Virtual device was already allocated for client\n");
return -EINVAL;
}
ret = gxp_vd_allocate(client, ibuf.core_count);
- mutex_unlock(&gxp->vd_lock);
+ up_write(&gxp->vd_semaphore);
return ret;
}
@@ -607,7 +631,7 @@ static int gxp_disable_telemetry(struct gxp_client *client, __u8 __user *argp)
static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
struct gxp_tpu_mbx_queue_ioctl __user *argp)
{
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
struct gxp_dev *gxp = client->gxp;
struct edgetpu_ext_mailbox_info *mbx_info;
struct gxp_tpu_mbx_queue_ioctl ibuf;
@@ -643,7 +667,7 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
if (!mbx_info)
return -ENOMEM;
- mutex_lock(&gxp->vd_lock);
+ down_write(&gxp->vd_semaphore);
if (client->tpu_mbx_allocated) {
dev_err(gxp->dev, "%s: Mappings already exist for TPU mailboxes\n",
@@ -684,7 +708,7 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
client->tpu_mbx_allocated = true;
error:
- mutex_unlock(&gxp->vd_lock);
+ up_write(&gxp->vd_semaphore);
kfree(mbx_info);
return ret;
@@ -696,7 +720,7 @@ error:
static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
struct gxp_tpu_mbx_queue_ioctl __user *argp)
{
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
struct gxp_dev *gxp = client->gxp;
struct gxp_tpu_mbx_queue_ioctl ibuf;
struct edgetpu_ext_client_info gxp_tpu_info;
@@ -705,7 +729,7 @@ static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- mutex_lock(&gxp->vd_lock);
+ down_write(&gxp->vd_semaphore);
if (!client->tpu_mbx_allocated) {
dev_err(gxp->dev, "%s: No mappings exist for TPU mailboxes\n",
@@ -729,7 +753,7 @@ static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
client->tpu_mbx_allocated = false;
out:
- mutex_unlock(&gxp->vd_lock);
+ up_write(&gxp->vd_semaphore);
return ret;
#else
@@ -737,6 +761,59 @@ out:
#endif
}
+static int gxp_register_telemetry_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_telemetry_eventfd_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_register_telemetry_eventfd_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ return gxp_telemetry_register_eventfd(gxp, ibuf.type, ibuf.eventfd);
+}
+
+static int gxp_unregister_telemetry_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_telemetry_eventfd_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_register_telemetry_eventfd_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ return gxp_telemetry_unregister_eventfd(gxp, ibuf.type);
+}
+
+static int gxp_read_global_counter(struct gxp_client *client,
+ __u64 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ u32 high_first, high_second, low;
+ u64 counter_val;
+
+ high_first = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_HIGH);
+ low = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_LOW);
+
+ /*
+ * Check if the lower 32 bits could have wrapped in-between reading
+ * the high and low bit registers by validating the higher 32 bits
+ * haven't changed.
+ */
+ high_second = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_HIGH);
+ if (high_first != high_second)
+ low = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_LOW);
+
+ counter_val = ((u64)high_second << 32) | low;
+
+ if (copy_to_user(argp, &counter_val, sizeof(counter_val)))
+ return -EFAULT;
+
+ return 0;
+}
+
static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
{
struct gxp_client *client = file->private_data;
@@ -789,6 +866,15 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_UNMAP_TPU_MBX_QUEUE:
ret = gxp_unmap_tpu_mbx_queue(client, argp);
break;
+ case GXP_REGISTER_TELEMETRY_EVENTFD:
+ ret = gxp_register_telemetry_eventfd(client, argp);
+ break;
+ case GXP_UNREGISTER_TELEMETRY_EVENTFD:
+ ret = gxp_unregister_telemetry_eventfd(client, argp);
+ break;
+ case GXP_READ_GLOBAL_COUNTER:
+ ret = gxp_read_global_counter(client, argp);
+ break;
default:
ret = -ENOTTY; /* unknown command */
}
@@ -838,6 +924,8 @@ static int gxp_platform_probe(struct platform_device *pdev)
int i __maybe_unused;
bool tpu_found __maybe_unused;
+ dev_notice(dev, "Probing gxp driver with commit %s\n", GIT_REPO_TAG);
+
gxp = devm_kzalloc(dev, sizeof(*gxp), GFP_KERNEL);
if (!gxp)
return -ENOMEM;
@@ -849,6 +937,8 @@ static int gxp_platform_probe(struct platform_device *pdev)
gxp->misc_dev.name = "gxp";
gxp->misc_dev.fops = &gxp_fops;
+ gxp_wakelock_init(gxp);
+
ret = misc_register(&gxp->misc_dev);
if (ret) {
dev_err(dev, "Failed to register misc device (ret = %d)\n",
@@ -873,14 +963,11 @@ static int gxp_platform_probe(struct platform_device *pdev)
goto err;
}
-#if defined(CONFIG_GXP_CLOUDRIPPER) && !defined(CONFIG_GXP_TEST)
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = gxp_pm_init(gxp);
if (ret) {
- dev_err(dev, "pm_runtime_get_sync returned %d\n", ret);
+ dev_err(dev, "Failed to init power management (ret=%d)\n", ret);
goto err;
}
-#endif
#ifndef CONFIG_GXP_USE_SW_MAILBOX
for (i = 0; i < GXP_NUM_CORES; i++) {
@@ -951,11 +1038,11 @@ static int gxp_platform_probe(struct platform_device *pdev)
}
spin_lock_init(&gxp->mailbox_resps_lock);
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
ret = gxp_debug_dump_init(gxp, &gxp_sscd_dev, &gxp_sscd_pdata);
#else
ret = gxp_debug_dump_init(gxp, NULL, NULL);
-#endif // !CONFIG_ANDROID
+#endif // !CONFIG_SUBSYSTEM_COREDUMP
if (ret) {
dev_err(dev, "Failed to initialize debug dump\n");
gxp_debug_dump_exit(gxp);
@@ -986,6 +1073,9 @@ static int gxp_platform_probe(struct platform_device *pdev)
gxp_telemetry_init(gxp);
gxp_create_debugfs(gxp);
gxp_pm_init(gxp);
+ gxp->thermal_mgr = gxp_thermal_init(gxp);
+ if (!gxp->thermal_mgr)
+ dev_err(dev, "Failed to init thermal driver\n");
dev_dbg(dev, "Probe finished\n");
return 0;
@@ -1022,18 +1112,35 @@ static int gxp_platform_remove(struct platform_device *pdev)
#endif
misc_deregister(&gxp->misc_dev);
-#ifdef CONFIG_GXP_CLOUDRIPPER
- // Request to power off BLK_AUR
- gxp_pm_blk_off(gxp);
- pm_runtime_disable(dev);
gxp_pm_destroy(gxp);
-#endif
devm_kfree(dev, (void *)gxp);
return 0;
}
+#if IS_ENABLED(CONFIG_PM_SLEEP)
+
+static int gxp_platform_suspend(struct device *dev)
+{
+ struct gxp_dev *gxp = dev_get_drvdata(dev);
+
+ return gxp_wakelock_suspend(gxp);
+}
+
+static int gxp_platform_resume(struct device *dev)
+{
+ struct gxp_dev *gxp = dev_get_drvdata(dev);
+
+ return gxp_wakelock_resume(gxp);
+}
+
+static const struct dev_pm_ops gxp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gxp_platform_suspend, gxp_platform_resume)
+};
+
+#endif /* IS_ENABLED(CONFIG_PM_SLEEP) */
+
#ifdef CONFIG_OF
static const struct of_device_id gxp_of_match[] = {
{ .compatible = "google,gxp", },
@@ -1057,12 +1164,15 @@ static struct platform_driver gxp_platform_driver = {
.name = GXP_DRIVER_NAME,
.of_match_table = of_match_ptr(gxp_of_match),
.acpi_match_table = ACPI_PTR(gxp_acpi_match),
+#if IS_ENABLED(CONFIG_PM_SLEEP)
+ .pm = &gxp_pm_ops,
+#endif
},
};
static int __init gxp_platform_init(void)
{
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
/* Registers SSCD platform device */
if (platform_device_register(&gxp_sscd_dev))
pr_err("Unable to register SSCD platform device\n");
@@ -1073,12 +1183,13 @@ static int __init gxp_platform_init(void)
static void __exit gxp_platform_exit(void)
{
platform_driver_unregister(&gxp_platform_driver);
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
platform_device_unregister(&gxp_sscd_dev);
#endif
}
MODULE_DESCRIPTION("Google GXP platform driver");
MODULE_LICENSE("GPL v2");
+MODULE_INFO(gitinfo, GIT_REPO_TAG);
module_init(gxp_platform_init);
module_exit(gxp_platform_exit);
diff --git a/gxp-pm.c b/gxp-pm.c
index cd478fb..697af81 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -31,10 +31,15 @@ static int gxp_pm_blkpwr_up(struct gxp_dev *gxp)
{
int ret = 0;
-#ifdef CONFIG_GXP_CLOUDRIPPER
- ret = pm_runtime_get_sync(gxp->dev);
+#if defined(CONFIG_GXP_CLOUDRIPPER) && !defined(CONFIG_GXP_TEST)
+ /*
+ * This function is equivalent to pm_runtime_get_sync, but will prevent
+ * the pm_runtime refcount from increasing if the call fails. It also
+ * only returns either 0 for success or an errno on failure.
+ */
+ ret = pm_runtime_resume_and_get(gxp->dev);
if (ret)
- dev_err(gxp->dev, "%s: pm_runtime_get_sync returned %d\n",
+ dev_err(gxp->dev, "%s: pm_runtime_resume_and_get returned %d\n",
__func__, ret);
#endif
return ret;
@@ -44,7 +49,7 @@ static int gxp_pm_blkpwr_down(struct gxp_dev *gxp)
{
int ret = 0;
-#ifdef CONFIG_GXP_CLOUDRIPPER
+#if defined(CONFIG_GXP_CLOUDRIPPER) && !defined(CONFIG_GXP_TEST)
/*
* Need to put TOP LPM into active state before blk off
* b/189396709
@@ -53,6 +58,12 @@ static int gxp_pm_blkpwr_down(struct gxp_dev *gxp)
lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_2, 0x0);
ret = pm_runtime_put_sync(gxp->dev);
if (ret)
+ /*
+ * pm_runtime_put_sync() returns the device's usage counter.
+ * Negative values indicate an error, while any positive values
+ * indicate the device is still in use somewhere. The only
+ * expected value here is 0, indicating no remaining users.
+ */
dev_err(gxp->dev, "%s: pm_runtime_put_sync returned %d\n",
__func__, ret);
#endif
@@ -63,7 +74,7 @@ int gxp_pm_blk_set_state_acpm(struct gxp_dev *gxp, unsigned long state)
{
int ret = 0;
-#ifdef CONFIG_GXP_CLOUDRIPPER
+#if defined(CONFIG_GXP_CLOUDRIPPER)
ret = exynos_acpm_set_rate(AUR_DVFS_DOMAIN, state);
dev_dbg(gxp->dev, "%s: state %lu, ret %d\n", __func__, state, ret);
#endif
@@ -74,7 +85,7 @@ int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp)
{
int ret = 0;
-#ifdef CONFIG_GXP_CLOUDRIPPER
+#if defined(CONFIG_GXP_CLOUDRIPPER)
ret = exynos_acpm_get_rate(AUR_DVFS_DOMAIN, AUR_DEBUG_CORE_FREQ);
dev_dbg(gxp->dev, "%s: state %d\n", __func__, ret);
#endif
@@ -96,6 +107,10 @@ int gxp_pm_blk_on(struct gxp_dev *gxp)
gxp_pm_blk_set_state_acpm(gxp, AUR_INIT_DVFS_STATE);
gxp->power_mgr->curr_state = AUR_INIT_DVFS_STATE;
}
+
+ /* Startup TOP's PSM */
+ gxp_lpm_init(gxp);
+
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
@@ -116,6 +131,9 @@ int gxp_pm_blk_off(struct gxp_dev *gxp)
return -EBUSY;
}
+ /* Shutdown TOP's PSM */
+ gxp_lpm_destroy(gxp);
+
ret = gxp_pm_blkpwr_down(gxp);
if (!ret)
gxp->power_mgr->curr_state = AUR_OFF;
@@ -270,6 +288,11 @@ int gxp_pm_init(struct gxp_dev *gxp)
mgr->pwr_state_req[i] = AUR_OFF;
mgr->ops = &gxp_aur_ops;
gxp->power_mgr = mgr;
+
+#if defined(CONFIG_GXP_CLOUDRIPPER) && !defined(CONFIG_GXP_TEST)
+ pm_runtime_enable(gxp->dev);
+#endif
+
return 0;
}
@@ -277,6 +300,10 @@ int gxp_pm_destroy(struct gxp_dev *gxp)
{
struct gxp_power_manager *mgr;
+#if defined(CONFIG_GXP_CLOUDRIPPER) && !defined(CONFIG_GXP_TEST)
+ pm_runtime_disable(gxp->dev);
+#endif
+
mgr = gxp->power_mgr;
mutex_destroy(&mgr->pm_lock);
return 0;
diff --git a/gxp-sw-mailbox-driver.c b/gxp-sw-mailbox-driver.c
index 43daf21..cd67358 100644
--- a/gxp-sw-mailbox-driver.c
+++ b/gxp-sw-mailbox-driver.c
@@ -5,6 +5,7 @@
* Copyright (C) 2020 Google LLC
*/
+#include <linux/bitops.h>
#include <linux/kthread.h>
#include "gxp-tmp.h"
@@ -291,12 +292,17 @@ static void data_write(struct gxp_mailbox *mailbox, uint reg_offset, u32 value)
/* IRQ Handling */
+#define MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK BIT(0)
+
static int poll_int_thread(void *data)
{
- u32 status_value, mask_value, masked_status_value;
+ u32 status_value, mask_value, masked_status_value, next_int;
struct gxp_mailbox *mailbox = (struct gxp_mailbox *)data;
+ struct work_struct **handlers = mailbox->interrupt_handlers;
while (!kthread_should_stop()) {
+ mutex_lock(&mailbox->polling_lock);
+
gxp_acquire_sync_barrier(mailbox->gxp,
MBOX_ACCESS_SYNC_BARRIER);
status_value =
@@ -306,17 +312,31 @@ static int poll_int_thread(void *data)
MBOX_ACCESS_SYNC_BARRIER);
masked_status_value = status_value & mask_value;
- if (masked_status_value) {
- if (masked_status_value & ~mailbox->debug_dump_int_mask)
- mailbox->handle_irq(mailbox);
- if (masked_status_value & mailbox->debug_dump_int_mask)
- mailbox->handle_debug_dump_irq(mailbox);
+ if (masked_status_value &
+ MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK) {
+ mailbox->handle_irq(mailbox);
+ masked_status_value &=
+ ~MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK;
+ }
+
+ while ((next_int = ffs(masked_status_value))) {
+ next_int--; /* ffs returns 1-based indices */
+ masked_status_value &= ~BIT(next_int);
- gxp_mailbox_clear_host_interrupt(
- mailbox, status_value & mask_value);
+ if (handlers[next_int])
+ schedule_work(handlers[next_int]);
+ else
+ pr_err_ratelimited(
+ "mailbox%d: received unknown interrupt bit 0x%X\n",
+ mailbox->core_id, next_int);
}
+ gxp_mailbox_clear_host_interrupt(mailbox,
+ status_value & mask_value);
+
+ mutex_unlock(&mailbox->polling_lock);
+
/* TODO(b/177701517): Polling frequency is untuned.*/
msleep(200);
}
@@ -336,6 +356,7 @@ void gxp_mailbox_driver_init(struct gxp_mailbox *mailbox)
csr_write(mailbox, MBOX_INTMR1_OFFSET, 0x00000000);
/* Setup a polling thread to check for to-host "interrupts" */
+ mutex_init(&mailbox->polling_lock);
mailbox->to_host_poll_task =
kthread_run(poll_int_thread, mailbox,
"gxp_poll_mailbox%d_to_host", mailbox->core_id);
@@ -349,8 +370,11 @@ void gxp_mailbox_driver_init(struct gxp_mailbox *mailbox)
void gxp_mailbox_driver_exit(struct gxp_mailbox *mailbox)
{
- if (!IS_ERR_OR_NULL(mailbox->to_host_poll_task))
+ if (!IS_ERR_OR_NULL(mailbox->to_host_poll_task)) {
+ mutex_lock(&mailbox->polling_lock);
kthread_stop(mailbox->to_host_poll_task);
+ mutex_unlock(&mailbox->polling_lock);
+ }
}
void __iomem *gxp_mailbox_get_csr_base(struct gxp_dev *gxp, uint index)
diff --git a/gxp-telemetry.c b/gxp-telemetry.c
index 4e97ec1..0afc6de 100644
--- a/gxp-telemetry.c
+++ b/gxp-telemetry.c
@@ -6,21 +6,68 @@
*/
#include <linux/slab.h>
+#include <linux/wait.h>
#include "gxp-config.h"
#include "gxp-dma.h"
+#include "gxp-firmware.h"
#include "gxp-firmware-data.h"
+#include "gxp-host-device-structs.h"
+#include "gxp-notification.h"
#include "gxp-telemetry.h"
+static inline bool is_core_telemetry_enabled(struct gxp_dev *gxp, uint core,
+ u8 type)
+{
+ u32 device_status =
+ gxp_fw_data_get_telemetry_device_status(gxp, core, type);
+
+ return device_status & GXP_TELEMETRY_DEVICE_STATUS_ENABLED;
+}
+
+static void telemetry_status_notification_work(struct work_struct *work)
+{
+ struct gxp_telemetry_work *telem_work =
+ container_of(work, struct gxp_telemetry_work, work);
+ struct gxp_dev *gxp = telem_work->gxp;
+ uint core = telem_work->core;
+ struct gxp_telemetry_manager *mgr = telem_work->gxp->telemetry_mgr;
+
+ /* Wake any threads waiting on an telemetry disable ACK */
+ wake_up(&mgr->waitq);
+
+ /* Signal the appropriate eventfd for any active telemetry types */
+ mutex_lock(&mgr->lock);
+
+ if (is_core_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_LOGGING) &&
+ mgr->logging_efd)
+ eventfd_signal(mgr->logging_efd, 1);
+
+ if (is_core_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_TRACING) &&
+ mgr->tracing_efd)
+ eventfd_signal(mgr->tracing_efd, 1);
+
+ mutex_unlock(&mgr->lock);
+}
+
int gxp_telemetry_init(struct gxp_dev *gxp)
{
struct gxp_telemetry_manager *mgr;
+ uint i;
mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
if (!mgr)
return -ENOMEM;
mutex_init(&mgr->lock);
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ INIT_WORK(&mgr->notification_works[i].work,
+ telemetry_status_notification_work);
+ mgr->notification_works[i].gxp = gxp;
+ mgr->notification_works[i].core = i;
+
+ }
+ init_waitqueue_head(&mgr->waitq);
gxp->telemetry_mgr = mgr;
@@ -49,6 +96,13 @@ static void gxp_telemetry_vma_open(struct vm_area_struct *vma)
mutex_unlock(&gxp->telemetry_mgr->lock);
}
+/*
+ * Forward declaration of telemetry_disable_locked() so that
+ * gxp_telemetry_vma_close() can invoke the locked version without having to
+ * release `telemetry_mgr->lock` and calling gxp_telemetry_disable().
+ */
+static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type);
+
static void gxp_telemetry_vma_close(struct vm_area_struct *vma)
{
struct gxp_dev *gxp;
@@ -63,8 +117,8 @@ static void gxp_telemetry_vma_close(struct vm_area_struct *vma)
mutex_lock(&gxp->telemetry_mgr->lock);
if (refcount_dec_and_test(&data->ref_count)) {
- if (data->enabled)
- gxp_telemetry_disable(gxp, type);
+ if (data->host_status & GXP_TELEMETRY_HOST_STATUS_ENABLED)
+ telemetry_disable_locked(gxp, type);
for (i = 0; i < GXP_NUM_CORES; i++)
gxp_dma_free_coherent(gxp, BIT(i), data->size,
@@ -156,7 +210,6 @@ static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
}
data->size = size;
refcount_set(&data->ref_count, 1);
- data->enabled = false;
return data;
@@ -304,6 +357,7 @@ int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type)
{
struct buffer_data *data;
int ret = 0;
+ uint core;
mutex_lock(&gxp->telemetry_mgr->lock);
@@ -325,13 +379,18 @@ int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type)
}
/* Populate the buffer fields in firmware-data */
- gxp_fw_data_set_telemetry_descriptors(
- gxp, type, (u32 *)data->buffer_daddrs, data->size);
+ data->host_status |= GXP_TELEMETRY_HOST_STATUS_ENABLED;
+ gxp_fw_data_set_telemetry_descriptors(gxp, type, data->host_status,
+ data->buffer_daddrs, data->size);
- /* TODO(b/202937192) To be done in a future CL */
/* Notify any running cores that firmware-data was updated */
-
- data->enabled = true;
+ down_read(&gxp->vd_semaphore);
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp_is_fw_running(gxp, core))
+ gxp_notification_send(gxp, core,
+ CORE_NOTIF_TELEMETRY_STATUS);
+ }
+ up_read(&gxp->vd_semaphore);
out:
mutex_unlock(&gxp->telemetry_mgr->lock);
@@ -339,13 +398,82 @@ out:
return ret;
}
-int gxp_telemetry_disable(struct gxp_dev *gxp, u8 type)
+/**
+ * notify_core_and_wait_for_disable() - Notify a core that telemetry state has
+ * been changed by the host and wait for
+ * the core to stop using telemetry.
+ * @gxp: The GXP device telemetry is changing for
+ * @core: The core in @gxp to notify of the telemetry state change
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Caller must hold @gxp's virtual device lock
+ *
+ * Return:
+ * * 0 - Firmware on @core is no longer using telemetry of @type
+ * * -ENXIO - Firmware on @core is unresponsive
+ */
+static int notify_core_and_wait_for_disable(struct gxp_dev *gxp, uint core,
+ u8 type)
+{
+ uint retries_left = 50;
+
+ gxp_notification_send(gxp, core, CORE_NOTIF_TELEMETRY_STATUS);
+
+ /* Wait for ACK from firmware */
+ while (is_core_telemetry_enabled(gxp, core, type) &&
+ gxp_is_fw_running(gxp, core) && retries_left) {
+ /* Release vd_semaphore while waiting */
+ up_read(&gxp->vd_semaphore);
+
+ /*
+ * The VD lock must be held to check if firmware is running, so
+ * the wait condition is only whether the firmware data has been
+ * updated to show the core disabling telemetry.
+ *
+ * If a core does stop running firmware while this function is
+ * asleep, it will be seen at the next timeout.
+ */
+ wait_event_timeout(gxp->telemetry_mgr->waitq,
+ !is_core_telemetry_enabled(gxp, core, type),
+ msecs_to_jiffies(10));
+ retries_left--;
+
+ down_read(&gxp->vd_semaphore);
+ }
+
+ /*
+ * If firmware has stopped running altogether, that is sufficient to be
+ * considered disabled. If firmware is started on this core again, it
+ * is responsible for clearing its status.
+ */
+ if (unlikely(is_core_telemetry_enabled(gxp, core, type) &&
+ gxp_is_fw_running(gxp, core)))
+ return -ENXIO;
+
+ return 0;
+}
+
+/**
+ * telemetry_disable_locked() - Helper function to break out the actual
+ * process of disabling telemetry so that it
+ * can be invoked by internal functions that are
+ * already holding the telemetry lock.
+ * @gxp: The GXP device to disable either logging or tracing for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Caller must hold `telemetry_mgr->lock`.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The @type provided is not valid
+ * * -ENXIO - Buffers for @type have not been created/mapped yet
+ */
+static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
{
struct buffer_data *data;
int ret = 0;
- u32 null_daddrs[GXP_NUM_CORES] = {0};
-
- mutex_lock(&gxp->telemetry_mgr->lock);
+ dma_addr_t null_daddrs[GXP_NUM_CORES] = {0};
+ uint core;
/* Cleanup telemetry manager's book-keeping */
switch (type) {
@@ -356,29 +484,119 @@ int gxp_telemetry_disable(struct gxp_dev *gxp, u8 type)
data = gxp->telemetry_mgr->tracing_buff_data;
break;
default:
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if (!data) {
- ret = -ENXIO;
- goto out;
- }
+ if (!data)
+ return -ENXIO;
- if (!data->enabled)
- goto out;
+ if (!(data->host_status & GXP_TELEMETRY_HOST_STATUS_ENABLED))
+ return 0;
/* Clear the log buffer fields in firmware-data */
- gxp_fw_data_set_telemetry_descriptors(gxp, type, null_daddrs, 0);
+ data->host_status &= ~GXP_TELEMETRY_HOST_STATUS_ENABLED;
+ gxp_fw_data_set_telemetry_descriptors(gxp, type, data->host_status,
+ null_daddrs, 0);
- /* TODO(b/202937192) To be done in a future CL */
/* Notify any running cores that firmware-data was updated */
- /* Wait for ACK from firmware */
+ down_read(&gxp->vd_semaphore);
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp_is_fw_running(gxp, core)) {
+ ret = notify_core_and_wait_for_disable(gxp, core, type);
+ if (ret)
+ dev_warn(
+ gxp->dev,
+ "%s: core%u failed to disable telemetry (type=%u, ret=%d)\n",
+ __func__, core, type, ret);
+ }
+ }
+ up_read(&gxp->vd_semaphore);
+
+ return 0;
+}
+
+int gxp_telemetry_disable(struct gxp_dev *gxp, u8 type)
+{
+ int ret;
+
+ mutex_lock(&gxp->telemetry_mgr->lock);
+
+ ret = telemetry_disable_locked(gxp, type);
+
+ mutex_unlock(&gxp->telemetry_mgr->lock);
+
+ return ret;
+}
+
+int gxp_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd)
+{
+ struct eventfd_ctx *new_ctx;
+ struct eventfd_ctx **ctx_to_set = NULL;
+ int ret = 0;
+
+ new_ctx = eventfd_ctx_fdget(fd);
+ if (IS_ERR(new_ctx))
+ return PTR_ERR(new_ctx);
- data->enabled = false;
+ mutex_lock(&gxp->telemetry_mgr->lock);
+
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ ctx_to_set = &gxp->telemetry_mgr->logging_efd;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ ctx_to_set = &gxp->telemetry_mgr->tracing_efd;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (*ctx_to_set) {
+ dev_warn(gxp->dev,
+ "Replacing existing telemetry eventfd (type=%u)\n",
+ type);
+ eventfd_ctx_put(*ctx_to_set);
+ }
+
+ *ctx_to_set = new_ctx;
out:
mutex_unlock(&gxp->telemetry_mgr->lock);
+ return ret;
+}
+
+int gxp_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type)
+{
+ int ret = 0;
+
+ mutex_lock(&gxp->telemetry_mgr->lock);
+
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ eventfd_ctx_put(gxp->telemetry_mgr->logging_efd);
+ gxp->telemetry_mgr->logging_efd = NULL;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ eventfd_ctx_put(gxp->telemetry_mgr->tracing_efd);
+ gxp->telemetry_mgr->tracing_efd = NULL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&gxp->telemetry_mgr->lock);
return ret;
}
+
+struct work_struct *gxp_telemetry_get_notification_handler(struct gxp_dev *gxp,
+ uint core)
+{
+ struct gxp_telemetry_manager *mgr = gxp->telemetry_mgr;
+
+ if (!mgr || core >= GXP_NUM_CORES)
+ return NULL;
+
+ return &mgr->notification_works[core].work;
+}
diff --git a/gxp-telemetry.h b/gxp-telemetry.h
index f481577..80436ba 100644
--- a/gxp-telemetry.h
+++ b/gxp-telemetry.h
@@ -7,22 +7,33 @@
#ifndef __GXP_TELEMETRY_H__
#define __GXP_TELEMETRY_H__
+#include <linux/eventfd.h>
#include <linux/refcount.h>
#include <linux/types.h>
#include "gxp.h"
#include "gxp-internal.h"
+struct gxp_telemetry_work {
+ struct work_struct work;
+ struct gxp_dev *gxp;
+ uint core;
+};
+
struct gxp_telemetry_manager {
struct buffer_data {
+ u32 host_status;
void *buffers[GXP_NUM_CORES];
dma_addr_t buffer_daddrs[GXP_NUM_CORES];
u32 size;
refcount_t ref_count;
- bool enabled;
} *logging_buff_data, *tracing_buff_data;
/* Protects logging_buff_data and tracing_buff_data */
struct mutex lock;
+ struct gxp_telemetry_work notification_works[GXP_NUM_CORES];
+ wait_queue_head_t waitq;
+ struct eventfd_ctx *logging_efd;
+ struct eventfd_ctx *tracing_efd;
};
/**
@@ -77,4 +88,47 @@ int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type);
*/
int gxp_telemetry_disable(struct gxp_dev *gxp, u8 type);
+/**
+ * gxp_telemetry_register_eventfd() - Register an eventfd to be signaled when
+ * telemetry notifications arrive while the
+ * specified @type of telemetry is enabled
+ * @gxp: The GXP device to register the eventfd for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ * @fd: A file descriptor for an eventfd from user-space
+ *
+ * If another eventfd has already been registered for the given @type, the old
+ * eventfd will be unregistered and replaced.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EBADF - @fd is not a valid file descriptor (via `eventfd_ctx_fdget()`)
+ * * -EINVAL - Invalid @type or @fd is not an eventfd
+ */
+int gxp_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd);
+
+/**
+ * gxp_telemetry_unregister_eventfd() - Unregister and release a reference to
+ * a previously registered eventfd
+ * @gxp: The GXP device to unregister the eventfd for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The @type provided is not valid
+ */
+int gxp_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type);
+
+/**
+ * gxp_telemetry_get_notification_handler() - Get the notification handler work
+ * for the specified core
+ * @gxp: The GXP device to obtain the handler for
+ * @core: The physical core number to obtain the handler
+ *
+ * Return: A pointer to the work_struct for the @core's notification handler if
+ * successful. NULL if telemetry has not been initialized or @core is
+ * invalid.
+ */
+struct work_struct *gxp_telemetry_get_notification_handler(struct gxp_dev *gxp,
+ uint core);
+
#endif /* __GXP_TELEMETRY_H__ */
diff --git a/gxp-thermal.c b/gxp-thermal.c
new file mode 100644
index 0000000..71cb67e
--- /dev/null
+++ b/gxp-thermal.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Platform thermal driver for GXP.
+ *
+ * Copyright (C) 2021 Google LLC
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <linux/version.h>
+
+#ifdef CONFIG_GXP_CLOUDRIPPER
+#include <linux/acpm_dvfs.h>
+#endif
+
+#include "gxp-internal.h"
+#include "gxp-pm.h"
+#include "gxp-thermal.h"
+#include "gxp-lpm.h"
+
+#define MAX_NUM_GXP_STATES 10
+#define OF_DATA_NUM_MAX (MAX_NUM_GXP_STATES * 2)
+
+/*
+ * Value comes from internal measurement
+ * https://docs.google.com/spreadsheets
+ * /d/1owRNFlm9EH-7IsycHXBnctyzGAd5j-VyLQOZ1ysFb7c
+ */
+static struct gxp_state_pwr state_pwr_map[MAX_NUM_GXP_STATES] = {
+ {1055000, 180},
+ {750000, 72},
+ {373000, 21},
+ {178000, 10},
+};
+
+static int gxp_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct gxp_thermal_manager *thermal = cdev->devdata;
+
+ if (!thermal->gxp_num_states)
+ return -EIO;
+
+ *state = thermal->gxp_num_states - 1;
+ return 0;
+}
+
+/*
+ * Set cooling state.
+ */
+static int gxp_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long cooling_state)
+{
+ int ret = 0;
+ struct gxp_thermal_manager *thermal = cdev->devdata;
+ struct device *dev = thermal->gxp->dev;
+ unsigned long pwr_state;
+
+ if (cooling_state >= thermal->gxp_num_states) {
+ dev_err(dev, "%s: invalid cooling state %lu\n", __func__,
+ cooling_state);
+ return -EINVAL;
+ }
+
+ mutex_lock(&thermal->lock);
+ cooling_state = max(thermal->sysfs_req, cooling_state);
+ if (cooling_state >= ARRAY_SIZE(state_pwr_map)) {
+ dev_err(dev, "Unsupported cooling state: %lu\n", cooling_state);
+ ret = -EINVAL;
+ goto out;
+ }
+ pwr_state = state_pwr_map[cooling_state].state;
+ dev_dbg(dev, "setting policy %ld\n", pwr_state);
+ if (cooling_state != thermal->cooling_state) {
+#ifdef CONFIG_GXP_CLOUDRIPPER
+ ret = exynos_acpm_set_policy(AUR_DVFS_DOMAIN,
+ pwr_state < AUR_UUD ? AUR_UUD : pwr_state);
+#endif
+ if (ret) {
+ dev_err(dev,
+ "error setting gxp cooling policy: %d\n", ret);
+ goto out;
+ }
+ thermal->cooling_state = cooling_state;
+ } else {
+ ret = -EALREADY;
+ }
+
+out:
+ mutex_unlock(&thermal->lock);
+ return ret;
+}
+
+static int gxp_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ int ret = 0;
+ struct gxp_thermal_manager *thermal = cdev->devdata;
+
+ mutex_lock(&thermal->lock);
+ *state = thermal->cooling_state;
+ if (*state >= thermal->gxp_num_states) {
+ dev_err(thermal->gxp->dev,
+ "Unknown cooling state: %lu, resetting\n", *state);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ mutex_unlock(&thermal->lock);
+ return ret;
+}
+
+static int gxp_state2power_internal(unsigned long state, u32 *power,
+ struct gxp_thermal_manager *thermal)
+{
+ int i;
+
+ for (i = 0; i < thermal->gxp_num_states; i++) {
+ if (state == state_pwr_map[i].state) {
+ *power = state_pwr_map[i].power;
+ return 0;
+ }
+ }
+ dev_err(thermal->gxp->dev, "Unknown state req for: %lu\n", state);
+ *power = 0;
+ return -EINVAL;
+}
+
+static int gxp_get_requested_power(struct thermal_cooling_device *cdev,
+ u32 *power)
+{
+ /* Use ACTIVE_NOM as default value */
+ unsigned long power_state = AUR_NOM;
+ struct gxp_thermal_manager *cooling = cdev->devdata;
+#ifdef CONFIG_GXP_CLOUDRIPPER
+
+ power_state = exynos_acpm_get_rate(AUR_DVFS_DOMAIN, 0);
+#endif
+ return gxp_state2power_internal(power_state, power,
+ cooling);
+}
+
+/* TODO(b/213272324): Move state2power table to dts */
+static int gxp_state2power(struct thermal_cooling_device *cdev,
+ unsigned long state, u32 *power)
+{
+ struct gxp_thermal_manager *thermal = cdev->devdata;
+
+ if (state >= thermal->gxp_num_states) {
+ dev_err(thermal->gxp->dev, "%s: invalid state: %lu\n", __func__,
+ state);
+ return -EINVAL;
+ }
+
+ return gxp_state2power_internal(state_pwr_map[state].state, power,
+ thermal);
+}
+
+static int gxp_power2state(struct thermal_cooling_device *cdev,
+ u32 power, unsigned long *state)
+{
+ int i, penultimate_throttle_state;
+ struct gxp_thermal_manager *thermal = cdev->devdata;
+
+ *state = 0;
+ /* Less than 2 state means we cannot really throttle */
+ if (thermal->gxp_num_states < 2)
+ return thermal->gxp_num_states == 1 ? 0 : -EIO;
+
+ penultimate_throttle_state = thermal->gxp_num_states - 2;
+ /*
+ * argument "power" is the maximum allowed power consumption in mW as
+ * defined by the PID control loop. Check for the first state that is
+ * less than or equal to the current allowed power. state_pwr_map is
+ * descending, so lowest power consumption is last value in the array
+ * return lowest state even if it consumes more power than allowed as
+ * not all platforms can handle throttling below an active state
+ */
+ for (i = penultimate_throttle_state; i >= 0; --i) {
+ if (power < state_pwr_map[i].power) {
+ *state = i + 1;
+ break;
+ }
+ }
+ return 0;
+}
+
+static struct thermal_cooling_device_ops gxp_cooling_ops = {
+ .get_max_state = gxp_get_max_state,
+ .get_cur_state = gxp_get_cur_state,
+ .set_cur_state = gxp_set_cur_state,
+ .get_requested_power = gxp_get_requested_power,
+ .state2power = gxp_state2power,
+ .power2state = gxp_power2state,
+};
+
+static void gxp_thermal_exit(struct gxp_thermal_manager *thermal)
+{
+ if (!IS_ERR_OR_NULL(thermal->cdev))
+ thermal_cooling_device_unregister(thermal->cdev);
+}
+
+static void devm_gxp_thermal_release(struct device *dev, void *res)
+{
+ struct gxp_thermal_manager *thermal = res;
+
+ gxp_thermal_exit(thermal);
+}
+
+static ssize_t
+user_vote_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device *cdev =
+ container_of(dev, struct thermal_cooling_device,
+ device);
+ struct gxp_thermal_manager *cooling = cdev->devdata;
+
+ if (!cooling)
+ return -ENODEV;
+
+ return sysfs_emit(buf, "%lu\n", cooling->sysfs_req);
+}
+
+static ssize_t user_vote_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_cooling_device *cdev =
+ container_of(dev, struct thermal_cooling_device,
+ device);
+ struct gxp_thermal_manager *cooling = cdev->devdata;
+ int ret;
+ unsigned long state;
+
+ if (!cooling)
+ return -ENODEV;
+
+ ret = kstrtoul(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ if (state >= cooling->gxp_num_states)
+ return -EINVAL;
+
+ mutex_lock(&cdev->lock);
+ cooling->sysfs_req = state;
+ cdev->updated = false;
+ mutex_unlock(&cdev->lock);
+ thermal_cdev_update(cdev);
+ return count;
+}
+
+static DEVICE_ATTR_RW(user_vote);
+
+static int
+gxp_thermal_cooling_register(struct gxp_thermal_manager *thermal, char *type)
+{
+ struct device_node *cooling_node = NULL;
+
+ thermal->op_data = NULL;
+ thermal->gxp_num_states = ARRAY_SIZE(state_pwr_map);
+
+ mutex_init(&thermal->lock);
+ cooling_node = of_find_node_by_name(NULL, GXP_COOLING_NAME);
+
+ /* TODO: Change this to fatal error once dts change is merged */
+ if (!cooling_node)
+ dev_warn(thermal->gxp->dev, "failed to find cooling node\n");
+ /* Initialize the cooling state as 0, means "no cooling" */
+ thermal->cooling_state = 0;
+ thermal->cdev = thermal_of_cooling_device_register(
+ cooling_node, type, thermal, &gxp_cooling_ops);
+ if (IS_ERR(thermal->cdev))
+ return PTR_ERR(thermal->cdev);
+
+ return device_create_file(&thermal->cdev->device, &dev_attr_user_vote);
+}
+
+static int cooling_init(struct gxp_thermal_manager *thermal, struct device *dev)
+{
+ int err;
+ struct dentry *d;
+
+ d = debugfs_create_dir("cooling", thermal->gxp->d_entry);
+ /* don't let debugfs creation failure abort the init procedure */
+ if (IS_ERR_OR_NULL(d))
+ dev_warn(dev, "failed to create debug fs for cooling");
+ thermal->cooling_root = d;
+
+ err = gxp_thermal_cooling_register(thermal, GXP_COOLING_NAME);
+ if (err) {
+ dev_err(dev, "failed to initialize external cooling\n");
+ gxp_thermal_exit(thermal);
+ return err;
+ }
+ return 0;
+}
+
+struct gxp_thermal_manager
+*gxp_thermal_init(struct gxp_dev *gxp)
+{
+ struct device *dev = gxp->dev;
+ struct gxp_thermal_manager *thermal;
+ int err;
+
+ thermal = devres_alloc(devm_gxp_thermal_release, sizeof(*thermal),
+ GFP_KERNEL);
+ if (!thermal)
+ return ERR_PTR(-ENOMEM);
+
+ thermal->gxp = gxp;
+ err = cooling_init(thermal, dev);
+ if (err) {
+ devres_free(thermal);
+ return ERR_PTR(err);
+ }
+
+ devres_add(dev, thermal);
+ return thermal;
+}
diff --git a/gxp-thermal.h b/gxp-thermal.h
new file mode 100644
index 0000000..c1939ef
--- /dev/null
+++ b/gxp-thermal.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Platform thermal driver for GXP.
+ *
+ * Copyright (C) 2021 Google LLC
+ */
+#ifndef __GXP_THERMAL_H__
+#define __GXP_THERMAL_H__
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/thermal.h>
+
+#include "gxp-internal.h"
+#include "gxp-pm.h"
+
+#define GXP_COOLING_NAME "gxp-cooling"
+
+struct gxp_thermal_manager {
+ struct dentry *cooling_root;
+ struct thermal_cooling_device *cdev;
+ struct mutex lock;
+ void *op_data;
+ unsigned long cooling_state;
+ unsigned long sysfs_req;
+ unsigned int gxp_num_states;
+ struct gxp_dev *gxp;
+ bool thermal_suspended; /* GXP thermal suspended state */
+};
+
+/*
+ * Internal structure to do the state/pwr mapping
+ * state: kHz that AUR is running
+ * power: mW that the state consume
+ */
+struct gxp_state_pwr {
+ unsigned long state;
+ u32 power;
+};
+
+struct gxp_thermal_manager *gxp_thermal_init(struct gxp_dev *gxp);
+
+#endif /* __GXP_THERMAL_H__ */
diff --git a/gxp-vd.c b/gxp-vd.c
index fcb99ea..bc5a6e3 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -21,33 +21,43 @@ int gxp_vd_init(struct gxp_dev *gxp)
uint core;
int ret;
- mutex_init(&gxp->vd_lock);
- mutex_lock(&gxp->vd_lock);
+ init_rwsem(&gxp->vd_semaphore);
- /* Mark all cores as free */
+ /* All cores start as free */
for (core = 0; core < GXP_NUM_CORES; core++)
gxp->core_to_client[core] = NULL;
ret = gxp_fw_init(gxp);
- mutex_unlock(&gxp->vd_lock);
+
return ret;
}
void gxp_vd_destroy(struct gxp_dev *gxp)
{
- mutex_lock(&gxp->vd_lock);
+ down_write(&gxp->vd_semaphore);
gxp_fw_destroy(gxp);
- mutex_unlock(&gxp->vd_lock);
+ up_write(&gxp->vd_semaphore);
}
-/* Caller must hold gxp->vd_lock */
+/* Caller must hold gxp->vd_semaphore for writing */
static void gxp_vd_release(struct gxp_client *client)
{
uint core;
struct gxp_dev *gxp = client->gxp;
+ /*
+ * Put all cores in the VD into reset so they can not wake each other up
+ */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_client[core] == client) {
+ gxp_write_32_core(
+ gxp, core, GXP_REG_ETM_PWRCTL,
+ 1 << GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT);
+ }
+ }
+
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp->core_to_client[core] == client) {
gxp->core_to_client[core] = NULL;
@@ -60,7 +70,7 @@ static void gxp_vd_release(struct gxp_client *client)
}
}
-/* Caller must hold gxp->vd_lock */
+/* Caller must hold gxp->vd_semaphore for writing */
int gxp_vd_allocate(struct gxp_client *client, u16 requested_cores)
{
struct gxp_dev *gxp = client->gxp;
@@ -133,10 +143,10 @@ int gxp_vd_virt_core_to_phys_core(struct gxp_client *client, u16 virt_core)
uint phys_core;
uint virt_core_index = 0;
- mutex_lock(&gxp->vd_lock);
+ down_read(&gxp->vd_semaphore);
if (!client->vd_allocated) {
- mutex_unlock(&gxp->vd_lock);
+ up_read(&gxp->vd_semaphore);
dev_dbg(gxp->dev, "Client has not allocated a virtual device\n");
return -EINVAL;
}
@@ -145,7 +155,7 @@ int gxp_vd_virt_core_to_phys_core(struct gxp_client *client, u16 virt_core)
if (gxp->core_to_client[phys_core] == client) {
if (virt_core_index == virt_core) {
/* Found virtual core */
- mutex_unlock(&gxp->vd_lock);
+ up_read(&gxp->vd_semaphore);
return phys_core;
}
@@ -153,7 +163,7 @@ int gxp_vd_virt_core_to_phys_core(struct gxp_client *client, u16 virt_core)
}
}
- mutex_unlock(&gxp->vd_lock);
+ up_read(&gxp->vd_semaphore);
dev_dbg(gxp->dev, "No mapping for virtual core %u\n", virt_core);
return -EINVAL;
}
@@ -207,18 +217,18 @@ void gxp_client_destroy(struct gxp_client *client)
{
struct gxp_dev *gxp = client->gxp;
- mutex_lock(&gxp->vd_lock);
+ down_write(&gxp->vd_semaphore);
-#ifdef CONFIG_ANDROID
+#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
/*
* Unmap TPU buffers, if the mapping is already removed, this
* is a no-op.
*/
gxp_dma_unmap_tpu_buffer(gxp, client->mbx_desc);
-#endif // CONFIG_ANDROID
+#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
gxp_vd_release(client);
- mutex_unlock(&gxp->vd_lock);
+ up_write(&gxp->vd_semaphore);
kfree(client);
}
diff --git a/gxp-wakelock.c b/gxp-wakelock.c
new file mode 100644
index 0000000..feb5c88
--- /dev/null
+++ b/gxp-wakelock.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP wakelock support
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include "gxp-dma.h"
+#include "gxp-pm.h"
+#include "gxp-wakelock.h"
+
+int gxp_wakelock_init(struct gxp_dev *gxp)
+{
+ struct gxp_wakelock_manager *mgr;
+
+ mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+
+ mutex_init(&mgr->lock);
+
+ gxp->wakelock_mgr = mgr;
+
+ return 0;
+}
+
+int gxp_wakelock_acquire(struct gxp_dev *gxp)
+{
+ struct gxp_wakelock_manager *mgr = gxp->wakelock_mgr;
+ int ret = 0;
+
+ mutex_lock(&mgr->lock);
+
+ if (mgr->suspended) {
+ /*
+ * Don't allow a new client to obtain a wakelock, powering up
+ * BLK_AUR, when the system is going to sleep.
+ */
+ dev_warn(gxp->dev,
+ "Attempt to obtain wakelock while suspending.\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (!mgr->count++) {
+ ret = gxp_pm_blk_on(gxp);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to power on BLK_AUR (ret=%d, client count=%u)\n",
+ ret, mgr->count);
+ goto err_blk_on;
+ }
+
+ ret = gxp_dma_ssmt_program(gxp);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to program SSMTs after powering on BLK_AUR (ret=%d)\n",
+ ret);
+ goto err_ssmt_program;
+ }
+ }
+
+out:
+ mutex_unlock(&mgr->lock);
+
+ return ret;
+
+err_ssmt_program:
+ gxp_pm_blk_off(gxp);
+err_blk_on:
+ mgr->count--;
+ mutex_unlock(&mgr->lock);
+ return ret;
+}
+
+void gxp_wakelock_release(struct gxp_dev *gxp)
+{
+ struct gxp_wakelock_manager *mgr = gxp->wakelock_mgr;
+ int ret = 0;
+
+ mutex_lock(&mgr->lock);
+
+ if (!mgr->count) {
+ dev_err(gxp->dev,
+ "Attempt to release wakelock with none held.\n");
+ goto out;
+ }
+
+ if (!--mgr->count) {
+ ret = gxp_pm_blk_off(gxp);
+ if (ret)
+ dev_err(gxp->dev,
+ "Failed to power down BLK_AUR (ret=%d, client count=%u)\n",
+ ret, mgr->count);
+ }
+
+out:
+ mutex_unlock(&mgr->lock);
+}
+
+int gxp_wakelock_suspend(struct gxp_dev *gxp)
+{
+ struct gxp_wakelock_manager *mgr = gxp->wakelock_mgr;
+ int ret;
+
+ mutex_lock(&mgr->lock);
+
+ /* Can't suspend if there are any active clients */
+ mgr->suspended = mgr->count == 0;
+ ret = mgr->suspended ? 0 : -EAGAIN;
+
+ mutex_unlock(&mgr->lock);
+
+ return ret;
+}
+
+int gxp_wakelock_resume(struct gxp_dev *gxp)
+{
+ struct gxp_wakelock_manager *mgr = gxp->wakelock_mgr;
+
+ mutex_lock(&mgr->lock);
+
+ mgr->suspended = false;
+
+ mutex_unlock(&mgr->lock);
+
+ return 0;
+}
diff --git a/gxp-wakelock.h b/gxp-wakelock.h
new file mode 100644
index 0000000..e1406c6
--- /dev/null
+++ b/gxp-wakelock.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GXP wakelock support
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+#ifndef __GXP_WAKELOCK_H__
+#define __GXP_WAKELOCK_H__
+
+#include "gxp.h"
+#include "gxp-internal.h"
+
+/*
+ * TODO(b/201600514): This is a temporary, basic interface to support
+ * b/204924965. It should be revisited and modified as necessary to properly
+ * support the full wakelock interface exposed to driver users.
+ */
+
+struct gxp_wakelock_manager {
+ /* Protects count and suspended */
+ struct mutex lock;
+ uint count;
+ bool suspended;
+};
+
+/**
+ * gxp_telemetry_init() - Initialize wakelock support
+ * @gxp: The GXP device to initialize wakelock support for
+ *
+ * Return:
+ * * 0 - Success
+ * * -ENOMEM - Insufficient memory is available to initialize support
+ */
+int gxp_wakelock_init(struct gxp_dev *gxp);
+
+/**
+ * gxp_wakelock_acquire() - Increment the GXP wakelock counter
+ * @gxp: The GXP device to increment the wakelock counter for
+ *
+ * If the wakelock counter transitions from 0 to 1, this will result in BLK_AUR
+ * being powered on.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EAGAIN - The system is suspending and BLK_AUR cannot be powered on
+ * * Other - An attempt to power on BLK_AUR failed
+ */
+int gxp_wakelock_acquire(struct gxp_dev *gxp);
+
+/**
+ * gxp_wakelock_release() - Decrement the GXP wakelock counter
+ * @gxp: The GXP device to decrement the wakelock counter for
+ *
+ * If the wakelock counter transitions from 1 to 0, this will result in BLK_AUR
+ * being powered off. In the event BLK_AUR cannot be powered off, a message
+ * will be logged, but the wakelock will still be released.
+ */
+void gxp_wakelock_release(struct gxp_dev *gxp);
+
+/**
+ * gxp_wakelock_suspend() - Check if the wakelock will allow a system suspend
+ * @gxp: The GXP device to check the wakelock of
+ *
+ * Return:
+ * * 0 - The wakelock has been suspended and is ready for system suspend
+ * * -EAGAIN - The wakelock is held, and system suspend should be aborted
+ */
+int gxp_wakelock_suspend(struct gxp_dev *gxp);
+
+/**
+ * gxp_wakelock_resume() - Notify the wakelock that system suspend has exited
+ * @gxp: The GXP device to notify the wakelock of
+ *
+ * Return:
+ * * 0 - The wakelock is ready to be acquired again
+ */
+int gxp_wakelock_resume(struct gxp_dev *gxp);
+
+#endif /* __GXP_WAKELOCK_H__ */
diff --git a/gxp.h b/gxp.h
index 1982667..4861d52 100644
--- a/gxp.h
+++ b/gxp.h
@@ -390,4 +390,32 @@ struct gxp_tpu_mbx_queue_ioctl {
#define GXP_UNMAP_TPU_MBX_QUEUE \
_IOW(GXP_IOCTL_BASE, 14, struct gxp_tpu_mbx_queue_ioctl)
+struct gxp_register_telemetry_eventfd_ioctl {
+ /*
+ * File-descriptor obtained via eventfd().
+ *
+ * Not used during the unregister step; the driver will unregister
+ * whichever eventfd it has currently registered for @type, if any.
+ */
+ __u32 eventfd;
+ /*
+ * Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`.
+ * The driver will signal @eventfd whenever any core signals a
+ * telemetry state change while this type of telemetry is active.
+ */
+ __u8 type;
+};
+
+#define GXP_REGISTER_TELEMETRY_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 15, struct gxp_register_telemetry_eventfd_ioctl)
+
+#define GXP_UNREGISTER_TELEMETRY_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 16, struct gxp_register_telemetry_eventfd_ioctl)
+
+/*
+ * Reads the 2 global counter registers in AURORA_TOP and combines them to
+ * return the full 64-bit value of the counter.
+ */
+#define GXP_READ_GLOBAL_COUNTER _IOR(GXP_IOCTL_BASE, 17, __u64)
+
#endif /* __GXP_H__ */