summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIlamparithi Mahendran <ilamparithim@google.com>2022-04-04 07:04:43 +0000
committerIlamparithi Mahendran <ilamparithim@google.com>2022-04-04 07:19:27 +0000
commit14814a3c18c7057fe4bea3f0f41bcd7b4af5bda4 (patch)
treeb5612c1a101ea547f2e8c6cf48062672c97450ec
parenta739c7f7e74e637cf882fa9ee73dee0aeb4374ee (diff)
downloadgs201-14814a3c18c7057fe4bea3f0f41bcd7b4af5bda4.tar.gz
Merge branch 'gs201-release' to android13-gs-pixel-5.10
* gs201-release: gxp: remove redundant aurora_base global var Bug: 176984045 gxp: remove obsolete TODO of wakelock Bug: 201600514 gxp: correct frequency values for the memory rail Bug: 227334108 gxp: Enforce required wakelocks on OOD IOCTLs gxp: Cleanup and rearrange the IOCTLs ordering gxp: Split version info into its own IOCTL gxp: Fix a race condition in response processing Bug: 226982987 gxp: Fix non-aggressor flag behavior bugs Bug: 226682644 gxp: Require BLOCK wakelock to read global counter Bug: 225402208 gxp: Clean-up pending commands on mailbox close Bug: 194060459, 184572070 gxp: Add missing break in gxp_ioctl() Bug: 226501806 gxp: fix the READY state support in mailbox ioctl Bug: b/226281679 gxp: set non-aggressor bit with non-aggressor vote Bug: 221320387 gxp: support NON_AGGRESSOR flag in power IOCTLs Bug: 221320387 Signed-off-by: Ilamparithi Mahendran <ilamparithim@google.com> Change-Id: I397bf1e41305d2c3b10ad4073d4ba2d81da99de9
-rw-r--r--gxp-client.c4
-rw-r--r--gxp-client.h1
-rw-r--r--gxp-debugfs.c14
-rw-r--r--gxp-firmware.c25
-rw-r--r--gxp-internal.h1
-rw-r--r--gxp-mailbox.c84
-rw-r--r--gxp-mailbox.h9
-rw-r--r--gxp-platform.c327
-rw-r--r--gxp-pm.c112
-rw-r--r--gxp-pm.h43
-rw-r--r--gxp-wakelock.h6
-rw-r--r--gxp.h775
12 files changed, 953 insertions, 448 deletions
diff --git a/gxp-client.c b/gxp-client.c
index 0fccea6..7ff4b5c 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -30,6 +30,7 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
client->requested_memory_power_state = 0;
client->vd = NULL;
client->tpu_mbx_allocated = false;
+ client->requested_aggressor = false;
return client;
}
@@ -61,7 +62,8 @@ void gxp_client_destroy(struct gxp_client *client)
if (client->has_block_wakelock) {
gxp_wakelock_release(client->gxp);
gxp_pm_update_requested_power_state(
- gxp, client->requested_power_state, AUR_OFF);
+ gxp, client->requested_power_state,
+ client->requested_aggressor, AUR_OFF, true);
gxp_pm_update_requested_memory_power_state(
gxp, client->requested_memory_power_state,
AUR_MEM_UNDEFINED);
diff --git a/gxp-client.h b/gxp-client.h
index 34c19eb..4f6fe8e 100644
--- a/gxp-client.h
+++ b/gxp-client.h
@@ -31,6 +31,7 @@ struct gxp_client {
uint requested_power_state;
/* Value is one of the MEMORY_POWER_STATE_* values from gxp.h. */
uint requested_memory_power_state;
+ bool requested_aggressor;
struct gxp_virtual_device *vd;
bool tpu_mbx_allocated;
diff --git a/gxp-debugfs.c b/gxp-debugfs.c
index ef2c072..0d9dae6 100644
--- a/gxp-debugfs.c
+++ b/gxp-debugfs.c
@@ -157,7 +157,8 @@ static int gxp_firmware_run_set(void *data, u64 val)
goto err_wakelock;
}
gxp->debugfs_client->has_block_wakelock = true;
- gxp_pm_update_requested_power_state(gxp, AUR_OFF, AUR_UUD);
+ gxp_pm_update_requested_power_state(gxp, AUR_OFF, true, AUR_UUD,
+ true);
down_write(&gxp->vd_semaphore);
ret = gxp_vd_start(gxp->debugfs_client->vd);
@@ -180,7 +181,8 @@ static int gxp_firmware_run_set(void *data, u64 val)
*/
gxp_client_destroy(gxp->debugfs_client);
gxp->debugfs_client = NULL;
- gxp_pm_update_requested_power_state(gxp, AUR_UUD, AUR_OFF);
+ gxp_pm_update_requested_power_state(gxp, AUR_UUD, true, AUR_OFF,
+ true);
}
out:
@@ -190,7 +192,7 @@ out:
err_start:
gxp_wakelock_release(gxp);
- gxp_pm_update_requested_power_state(gxp, AUR_UUD, AUR_OFF);
+ gxp_pm_update_requested_power_state(gxp, AUR_UUD, true, AUR_OFF, true);
err_wakelock:
/* Destroying a client cleans up any VDss or wakelocks it held. */
gxp_client_destroy(gxp->debugfs_client);
@@ -234,7 +236,8 @@ static int gxp_wakelock_set(void *data, u64 val)
goto out;
}
gxp->debugfs_wakelock_held = true;
- gxp_pm_update_requested_power_state(gxp, AUR_OFF, AUR_UUD);
+ gxp_pm_update_requested_power_state(gxp, AUR_OFF, true, AUR_UUD,
+ true);
} else {
/* Wakelock Release */
if (!gxp->debugfs_wakelock_held) {
@@ -245,7 +248,8 @@ static int gxp_wakelock_set(void *data, u64 val)
gxp_wakelock_release(gxp);
gxp->debugfs_wakelock_held = false;
- gxp_pm_update_requested_power_state(gxp, AUR_UUD, AUR_OFF);
+ gxp_pm_update_requested_power_state(gxp, AUR_UUD, true, AUR_OFF,
+ true);
}
out:
diff --git a/gxp-firmware.c b/gxp-firmware.c
index a9a7e5a..2af783e 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -40,7 +40,6 @@
#define FW_IMAGE_TYPE_OFFSET (0x400)
static const struct firmware *fw[GXP_NUM_CORES];
-static void __iomem *aurora_base;
static char *fw_elf[] = {Q7_ELF_FILE0, Q7_ELF_FILE1, Q7_ELF_FILE2,
Q7_ELF_FILE3};
@@ -319,8 +318,8 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
/* Wait for core to come up */
dev_notice(gxp->dev, "Waiting for core %u to power up...\n",
core);
- core_psm_base = ((u8 *)aurora_base) + LPM_BLOCK
- + CORE_PSM_BASE(core);
+ core_psm_base =
+ ((u8 *)gxp->regs.vaddr) + LPM_BLOCK + CORE_PSM_BASE(core);
ctr = 1000;
while (ctr) {
addr = core_psm_base + PSM_STATUS_OFFSET;
@@ -420,8 +419,6 @@ int gxp_fw_init(struct gxp_dev *gxp)
struct resource r;
int ret;
- aurora_base = gxp->regs.vaddr;
-
/* Power on BLK_AUR to read the revision and processor ID registers */
gxp_pm_blk_on(gxp);
@@ -551,6 +548,9 @@ out_firmware_unload:
void gxp_firmware_stop(struct gxp_dev *gxp, uint core)
{
+ struct gxp_async_response *cur, *nxt;
+ unsigned long flags;
+
if (!(gxp->firmware_running & BIT(core)))
dev_err(gxp->dev, "Firmware is not running on core %u\n", core);
@@ -565,6 +565,21 @@ void gxp_firmware_stop(struct gxp_dev *gxp, uint core)
gxp->mailbox_mgr->mailboxes[core]);
dev_notice(gxp->dev, "Mailbox %u released\n", core);
+ /*
+ * TODO(b/226211187) response queues should be owned by VDs
+ * This step should not be necessary until a VD is destroyed once the
+ * queues are owned directly by the VD and not shared by all users of
+ * a physical core.
+ */
+ /* Flush and free any abandoned responses left in the queue */
+ spin_lock_irqsave(&gxp->mailbox_resps_lock, flags);
+ list_for_each_entry_safe(cur, nxt, &gxp->mailbox_resp_queues[core],
+ list_entry) {
+ list_del(&cur->list_entry);
+ kfree(cur);
+ }
+ spin_unlock_irqrestore(&gxp->mailbox_resps_lock, flags);
+
gxp_pm_core_off(gxp, core);
gxp_firmware_unload(gxp, core);
}
diff --git a/gxp-internal.h b/gxp-internal.h
index 96ca1d4..c7b66e7 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -111,6 +111,7 @@ struct gxp_dev {
* May be NULL if the chip does not support firmware authentication
*/
struct device *gsa_dev;
+ u32 memory_per_core;
};
/* GXP device IO functions */
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index 5cd568b..405c567 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -237,6 +237,7 @@ static void gxp_mailbox_handle_response(struct gxp_mailbox *mailbox,
container_of(cur->resp,
struct gxp_async_response,
resp);
+
cancel_delayed_work(&async_resp->timeout_work);
if (async_resp->memory_power_state !=
AUR_MEM_UNDEFINED)
@@ -248,9 +249,12 @@ static void gxp_mailbox_handle_response(struct gxp_mailbox *mailbox,
gxp_pm_update_requested_power_state(
async_resp->mailbox->gxp,
async_resp->gxp_power_state,
- AUR_OFF);
+ async_resp->requested_aggressor,
+ AUR_OFF, true);
+
spin_lock_irqsave(async_resp->dest_queue_lock,
flags);
+
list_add_tail(&async_resp->list_entry,
async_resp->dest_queue);
/*
@@ -260,14 +264,24 @@ static void gxp_mailbox_handle_response(struct gxp_mailbox *mailbox,
* wait_list_lock and cancelling the timeout.
*/
async_resp->dest_queue = NULL;
- spin_unlock_irqrestore(
- async_resp->dest_queue_lock, flags);
+
+ /*
+ * Don't release the dest_queue_lock until both
+ * any eventfd has been signaled and any waiting
+ * thread has been woken. Otherwise one thread
+ * might consume and free the response before
+ * this function is done with it.
+ */
if (async_resp->client) {
gxp_client_signal_mailbox_eventfd(
async_resp->client,
mailbox->core_id);
}
wake_up(async_resp->dest_queue_waitq);
+
+ spin_unlock_irqrestore(
+ async_resp->dest_queue_lock, flags);
+
}
kfree(cur);
break;
@@ -523,6 +537,10 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
struct gxp_mailbox *mailbox)
{
int i;
+ struct gxp_mailbox_wait_list *cur, *nxt;
+ struct gxp_async_response *async_resp;
+ struct list_head resps_to_flush;
+ unsigned long flags;
if (!mailbox) {
dev_err(mgr->gxp->dev,
@@ -545,6 +563,52 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
cancel_work_sync(mailbox->interrupt_handlers[i]);
}
+ /*
+ * At this point only async responses should be pending. Flush them all
+ * from the `wait_list` at once so any remaining timeout workers
+ * waiting on `wait_list_lock` will know their responses have been
+ * handled already.
+ */
+ INIT_LIST_HEAD(&resps_to_flush);
+ mutex_lock(&mailbox->wait_list_lock);
+ list_for_each_entry_safe(cur, nxt, &mailbox->wait_list, list) {
+ list_del(&cur->list);
+ if (cur->is_async) {
+ list_add_tail(&cur->list, &resps_to_flush);
+ /*
+ * Clear the response's destination queue so that if the
+ * timeout worker is running, it won't try to process
+ * this response after `wait_list_lock` is released.
+ */
+ async_resp = container_of(
+ cur->resp, struct gxp_async_response, resp);
+ spin_lock_irqsave(async_resp->dest_queue_lock, flags);
+ async_resp->dest_queue = NULL;
+ spin_unlock_irqrestore(async_resp->dest_queue_lock,
+ flags);
+
+ } else {
+ dev_warn(
+ mailbox->gxp->dev,
+ "Unexpected synchronous command pending on mailbox release\n");
+ kfree(cur);
+ }
+ }
+ mutex_unlock(&mailbox->wait_list_lock);
+
+ /*
+ * Cancel the timeout timer of and free any responses that were still in
+ * the `wait_list` above.
+ */
+ list_for_each_entry_safe(cur, nxt, &resps_to_flush, list) {
+ list_del(&cur->list);
+ async_resp = container_of(cur->resp, struct gxp_async_response,
+ resp);
+ cancel_delayed_work_sync(&async_resp->timeout_work);
+ kfree(async_resp);
+ kfree(cur);
+ }
+
/* Reset the mailbox HW */
gxp_mailbox_reset_hw(mailbox);
@@ -753,7 +817,8 @@ static void async_cmd_timeout_work(struct work_struct *work)
if (async_resp->gxp_power_state != AUR_OFF)
gxp_pm_update_requested_power_state(
async_resp->mailbox->gxp,
- async_resp->gxp_power_state, AUR_OFF);
+ async_resp->gxp_power_state,
+ async_resp->requested_aggressor, AUR_OFF, true);
if (async_resp->client) {
gxp_client_signal_mailbox_eventfd(
@@ -773,6 +838,7 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
spinlock_t *queue_lock,
wait_queue_head_t *queue_waitq,
uint gxp_power_state, uint memory_power_state,
+ bool requested_aggressor,
struct gxp_client *client)
{
struct gxp_async_response *async_resp;
@@ -789,14 +855,16 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
async_resp->gxp_power_state = gxp_power_state;
async_resp->memory_power_state = memory_power_state;
async_resp->client = client;
+ async_resp->requested_aggressor = requested_aggressor;
INIT_DELAYED_WORK(&async_resp->timeout_work, async_cmd_timeout_work);
schedule_delayed_work(&async_resp->timeout_work,
msecs_to_jiffies(MAILBOX_TIMEOUT));
if (gxp_power_state != AUR_OFF)
- gxp_pm_update_requested_power_state(mailbox->gxp, AUR_OFF,
- gxp_power_state);
+ gxp_pm_update_requested_power_state(mailbox->gxp, AUR_OFF, true,
+ gxp_power_state,
+ requested_aggressor);
if (memory_power_state != AUR_MEM_UNDEFINED)
gxp_pm_update_requested_memory_power_state(
mailbox->gxp, AUR_MEM_UNDEFINED, memory_power_state);
@@ -813,7 +881,9 @@ err_free_resp:
mailbox->gxp, memory_power_state, AUR_MEM_UNDEFINED);
if (gxp_power_state != AUR_OFF)
gxp_pm_update_requested_power_state(mailbox->gxp,
- gxp_power_state, AUR_OFF);
+ gxp_power_state,
+ requested_aggressor,
+ AUR_OFF, true);
cancel_delayed_work_sync(&async_resp->timeout_work);
kfree(async_resp);
return ret;
diff --git a/gxp-mailbox.h b/gxp-mailbox.h
index 99ad7f9..933c177 100644
--- a/gxp-mailbox.h
+++ b/gxp-mailbox.h
@@ -79,7 +79,13 @@ struct gxp_async_response {
struct gxp_mailbox *mailbox;
/* Queue to add the response to once it is complete or timed out */
struct list_head *dest_queue;
+ /*
+ * The lock that protects queue pointed to by `dest_queue`.
+ * The mailbox code also uses this lock to protect changes to the
+ * `dest_queue` pointer itself when processing this response.
+ */
spinlock_t *dest_queue_lock;
+ /* Queue of clients to notify when this response is processed */
wait_queue_head_t *dest_queue_waitq;
/* Specified power state vote during the command execution */
uint gxp_power_state;
@@ -87,6 +93,8 @@ struct gxp_async_response {
uint memory_power_state;
/* gxp_client to signal when the response completes. May be NULL */
struct gxp_client *client;
+ /* Specified whether the power state vote is requested with aggressor flag */
+ bool requested_aggressor;
};
enum gxp_response_status {
@@ -186,6 +194,7 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
spinlock_t *queue_lock,
wait_queue_head_t *queue_waitq,
uint gxp_power_state, uint memory_power_state,
+ bool requested_aggressor,
struct gxp_client *client);
int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
diff --git a/gxp-platform.c b/gxp-platform.c
index e653e84..f702157 100644
--- a/gxp-platform.c
+++ b/gxp-platform.c
@@ -364,7 +364,7 @@ gxp_mailbox_command_compat(struct gxp_client *client,
gxp->mailbox_mgr->mailboxes[phys_core], &cmd,
&gxp->mailbox_resp_queues[phys_core], &gxp->mailbox_resps_lock,
&gxp->mailbox_resp_waitqs[phys_core], gxp_power_state,
- memory_power_state, client);
+ memory_power_state, true, client);
up_read(&gxp->vd_semaphore);
if (ret) {
dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
@@ -395,6 +395,7 @@ static int gxp_mailbox_command(struct gxp_client *client,
int phys_core;
int ret = 0;
uint gxp_power_state, memory_power_state;
+ bool requested_aggressor = false;
if (copy_from_user(&ibuf, argp, sizeof(ibuf))) {
dev_err(gxp->dev,
@@ -407,7 +408,7 @@ static int gxp_mailbox_command(struct gxp_client *client,
return -EINVAL;
}
if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
- ibuf.gxp_power_state > GXP_POWER_STATE_NOM) {
+ ibuf.gxp_power_state > GXP_POWER_STATE_READY) {
dev_err(gxp->dev, "Requested power state is invalid\n");
return -EINVAL;
}
@@ -462,13 +463,14 @@ static int gxp_mailbox_command(struct gxp_client *client,
cmd.buffer_descriptor = buffer;
gxp_power_state = aur_state_array[ibuf.gxp_power_state];
memory_power_state = aur_memory_state_array[ibuf.memory_power_state];
+ requested_aggressor = (ibuf.power_flags & GXP_POWER_NON_AGGRESSOR) == 0;
down_read(&gxp->vd_semaphore);
ret = gxp_mailbox_execute_cmd_async(
gxp->mailbox_mgr->mailboxes[phys_core], &cmd,
&gxp->mailbox_resp_queues[phys_core], &gxp->mailbox_resps_lock,
&gxp->mailbox_resp_waitqs[phys_core], gxp_power_state,
- memory_power_state, client);
+ memory_power_state, requested_aggressor, client);
up_read(&gxp->vd_semaphore);
if (ret) {
dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
@@ -607,11 +609,7 @@ static int gxp_get_specs(struct gxp_client *client,
struct gxp_specs_ioctl ibuf;
ibuf.core_count = GXP_NUM_CORES;
- ibuf.version_major = 0;
- ibuf.version_minor = 0;
- ibuf.version_build = 1;
- ibuf.threads_per_core = 1;
- ibuf.memory_per_core = 0;
+ ibuf.memory_per_core = client->gxp->memory_per_core;
if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
return -EFAULT;
@@ -667,6 +665,7 @@ gxp_etm_trace_start_command(struct gxp_client *client,
struct gxp_dev *gxp = client->gxp;
struct gxp_etm_trace_start_ioctl ibuf;
int phys_core;
+ int ret = 0;
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
@@ -686,12 +685,23 @@ gxp_etm_trace_start_command(struct gxp_client *client,
if (ibuf.pc_match_mask_length > ETM_TRACE_PC_MATCH_MASK_LEN_MAX)
return -EINVAL;
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_ETM_TRACE_START_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
phys_core =
gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
if (phys_core < 0) {
dev_err(gxp->dev, "Trace start failed: Invalid virtual core id (%u)\n",
ibuf.virtual_core_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/*
@@ -700,7 +710,10 @@ gxp_etm_trace_start_command(struct gxp_client *client,
* (b/185819530).
*/
- return 0;
+out:
+ up_read(&client->semaphore);
+
+ return ret;
}
static int gxp_etm_trace_sw_stop_command(struct gxp_client *client,
@@ -709,16 +722,28 @@ static int gxp_etm_trace_sw_stop_command(struct gxp_client *client,
struct gxp_dev *gxp = client->gxp;
u16 virtual_core_id;
int phys_core;
+ int ret = 0;
if (copy_from_user(&virtual_core_id, argp, sizeof(virtual_core_id)))
return -EFAULT;
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_ETM_TRACE_SW_STOP_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
if (phys_core < 0) {
dev_err(gxp->dev, "Trace stop via software trigger failed: Invalid virtual core id (%u)\n",
virtual_core_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/*
@@ -727,7 +752,10 @@ static int gxp_etm_trace_sw_stop_command(struct gxp_client *client,
* (b/185819530).
*/
- return 0;
+out:
+ up_read(&client->semaphore);
+
+ return ret;
}
static int gxp_etm_trace_cleanup_command(struct gxp_client *client,
@@ -736,15 +764,27 @@ static int gxp_etm_trace_cleanup_command(struct gxp_client *client,
struct gxp_dev *gxp = client->gxp;
u16 virtual_core_id;
int phys_core;
+ int ret = 0;
if (copy_from_user(&virtual_core_id, argp, sizeof(virtual_core_id)))
return -EFAULT;
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_ETM_TRACE_CLEANUP_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
if (phys_core < 0) {
dev_err(gxp->dev, "Trace cleanup failed: Invalid virtual core id (%u)\n",
virtual_core_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/*
@@ -753,7 +793,10 @@ static int gxp_etm_trace_cleanup_command(struct gxp_client *client,
* (b/185819530).
*/
- return 0;
+out:
+ up_read(&client->semaphore);
+
+ return ret;
}
static int
@@ -773,15 +816,35 @@ gxp_etm_get_trace_info_command(struct gxp_client *client,
if (ibuf.type > 1)
return -EINVAL;
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_ETM_GET_TRACE_INFO_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
phys_core = gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
if (phys_core < 0) {
dev_err(gxp->dev, "Get trace info failed: Invalid virtual core id (%u)\n",
ibuf.virtual_core_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
trace_header = kzalloc(GXP_TRACE_HEADER_SIZE, GFP_KERNEL);
+ if (!trace_header) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
trace_data = kzalloc(GXP_TRACE_RAM_SIZE, GFP_KERNEL);
+ if (!trace_data) {
+ ret = -ENOMEM;
+ goto out_free_header;
+ }
/*
* TODO (b/185260919): Get trace information from system FW once
@@ -792,20 +855,24 @@ gxp_etm_get_trace_info_command(struct gxp_client *client,
if (copy_to_user((void __user *)ibuf.trace_header_addr, trace_header,
GXP_TRACE_HEADER_SIZE)) {
ret = -EFAULT;
- goto out;
+ goto out_free_data;
}
if (ibuf.type == 1) {
if (copy_to_user((void __user *)ibuf.trace_data_addr,
trace_data, GXP_TRACE_RAM_SIZE)) {
ret = -EFAULT;
- goto out;
+ goto out_free_data;
}
}
-out:
- kfree(trace_header);
+out_free_data:
kfree(trace_data);
+out_free_header:
+ kfree(trace_header);
+
+out:
+ up_read(&client->semaphore);
return ret;
}
@@ -863,6 +930,16 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_write(&client->semaphore);
+
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_MAP_TPU_MBX_QUEUE requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
virtual_core_list = ibuf.virtual_core_list;
core_count = hweight_long(virtual_core_list);
phys_core_list = gxp_vd_virt_core_list_to_phys_core_list(
@@ -870,23 +947,24 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
if (!phys_core_list) {
dev_err(gxp->dev, "%s: invalid virtual core list 0x%x\n",
__func__, virtual_core_list);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
mbx_info =
kmalloc(sizeof(struct edgetpu_ext_mailbox_info) + core_count *
sizeof(struct edgetpu_ext_mailbox_descriptor),
GFP_KERNEL);
- if (!mbx_info)
- return -ENOMEM;
-
- down_write(&client->semaphore);
+ if (!mbx_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (client->tpu_mbx_allocated) {
dev_err(gxp->dev, "%s: Mappings already exist for TPU mailboxes\n",
__func__);
ret = -EBUSY;
- goto error;
+ goto out_free;
}
gxp_tpu_info.tpu_fd = ibuf.tpu_fd;
@@ -899,7 +977,7 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
if (ret) {
dev_err(gxp->dev, "%s: Failed to allocate ext tpu mailboxes %d\n",
__func__, ret);
- goto error;
+ goto out_free;
}
/* Align queue size to page size for iommu map. */
mbx_info->cmdq_size = ALIGN(mbx_info->cmdq_size, PAGE_SIZE);
@@ -913,17 +991,19 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
FREE_EXTERNAL_MAILBOX, &gxp_tpu_info,
NULL);
- goto error;
+ goto out_free;
}
client->mbx_desc.phys_core_list = phys_core_list;
client->mbx_desc.cmdq_size = mbx_info->cmdq_size;
client->mbx_desc.respq_size = mbx_info->respq_size;
client->tpu_mbx_allocated = true;
-error:
+out_free:
+ kfree(mbx_info);
+
+out:
up_write(&client->semaphore);
- kfree(mbx_info);
return ret;
#else
return -ENODEV;
@@ -942,8 +1022,16 @@ static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
down_write(&client->semaphore);
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_UNMAP_TPU_MBX_QUEUE requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
if (!client->tpu_mbx_allocated) {
dev_err(gxp->dev, "%s: No mappings exist for TPU mailboxes\n",
__func__);
@@ -1006,6 +1094,17 @@ static int gxp_read_global_counter(struct gxp_client *client,
struct gxp_dev *gxp = client->gxp;
u32 high_first, high_second, low;
u64 counter_val;
+ int ret = 0;
+
+ /* Caller must hold BLOCK wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_block_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_READ_GLOBAL_COUNTER requires the client hold a BLOCK wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
high_first = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_HIGH);
low = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_LOW);
@@ -1022,9 +1121,120 @@ static int gxp_read_global_counter(struct gxp_client *client,
counter_val = ((u64)high_second << 32) | low;
if (copy_to_user(argp, &counter_val, sizeof(counter_val)))
+ ret = -EFAULT;
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_acquire_wake_lock_compat(
+ struct gxp_client *client,
+ struct gxp_acquire_wakelock_compat_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_acquire_wakelock_compat_ioctl ibuf;
+ bool acquired_block_wakelock = false;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- return 0;
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
+ dev_err(gxp->dev,
+ "GXP_POWER_STATE_OFF is not a valid value when acquiring a wakelock\n");
+ return -EINVAL;
+ }
+ if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
+ ibuf.gxp_power_state > GXP_POWER_STATE_READY) {
+ dev_err(gxp->dev, "Requested power state is invalid\n");
+ return -EINVAL;
+ }
+ if ((ibuf.memory_power_state < MEMORY_POWER_STATE_MIN ||
+ ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) &&
+ ibuf.memory_power_state != MEMORY_POWER_STATE_UNDEFINED) {
+ dev_err(gxp->dev,
+ "Requested memory power state %d is invalid\n",
+ ibuf.memory_power_state);
+ return -EINVAL;
+ }
+
+ down_write(&client->semaphore);
+
+ /* Acquire a BLOCK wakelock if requested */
+ if (ibuf.components_to_wake & WAKELOCK_BLOCK) {
+ if (!client->has_block_wakelock) {
+ ret = gxp_wakelock_acquire(gxp);
+ acquired_block_wakelock = true;
+ }
+
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to acquire BLOCK wakelock for client (ret=%d)\n",
+ ret);
+ goto out;
+ }
+
+ client->has_block_wakelock = true;
+ }
+
+ /* Acquire a VIRTUAL_DEVICE wakelock if requested */
+ if (ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) {
+ if (!client->has_block_wakelock) {
+ dev_err(gxp->dev,
+ "Must hold BLOCK wakelock to acquire VIRTUAL_DEVICE wakelock\n");
+ ret = -EINVAL;
+ goto out;
+
+ }
+
+ if (!client->has_vd_wakelock) {
+ down_write(&gxp->vd_semaphore);
+ ret = gxp_vd_start(client->vd);
+ up_write(&gxp->vd_semaphore);
+ }
+
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to acquire VIRTUAL_DEVICE wakelock for client (ret=%d)\n",
+ ret);
+ goto err_acquiring_vd_wl;
+ }
+
+ client->has_vd_wakelock = true;
+ }
+
+ gxp_pm_update_requested_power_state(
+ gxp, client->requested_power_state, client->requested_aggressor,
+ aur_state_array[ibuf.gxp_power_state], true);
+ client->requested_power_state = aur_state_array[ibuf.gxp_power_state];
+ client->requested_aggressor = true;
+ gxp_pm_update_requested_memory_power_state(
+ gxp, client->requested_memory_power_state,
+ aur_memory_state_array[ibuf.memory_power_state]);
+ client->requested_memory_power_state =
+ aur_memory_state_array[ibuf.memory_power_state];
+out:
+ up_write(&client->semaphore);
+
+ return ret;
+
+err_acquiring_vd_wl:
+ /*
+ * In a single call, if any wakelock acquisition fails, all of them do.
+ * If the client was acquiring both wakelocks and failed to acquire the
+ * VIRTUAL_DEVICE wakelock after successfully acquiring the BLOCK
+ * wakelock, then release it before returning the error code.
+ */
+ if (acquired_block_wakelock) {
+ gxp_wakelock_release(gxp);
+ client->has_block_wakelock = false;
+ }
+
+ up_write(&client->semaphore);
+
+ return ret;
}
static int gxp_acquire_wake_lock(struct gxp_client *client,
@@ -1033,6 +1243,7 @@ static int gxp_acquire_wake_lock(struct gxp_client *client,
struct gxp_dev *gxp = client->gxp;
struct gxp_acquire_wakelock_ioctl ibuf;
bool acquired_block_wakelock = false;
+ bool requested_aggressor = false;
int ret = 0;
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
@@ -1101,11 +1312,13 @@ static int gxp_acquire_wake_lock(struct gxp_client *client,
client->has_vd_wakelock = true;
}
+ requested_aggressor = (ibuf.flags & GXP_POWER_NON_AGGRESSOR) == 0;
gxp_pm_update_requested_power_state(
- gxp, client->requested_power_state,
- aur_state_array[ibuf.gxp_power_state]);
+ gxp, client->requested_power_state, client->requested_aggressor,
+ aur_state_array[ibuf.gxp_power_state], requested_aggressor);
client->requested_power_state = aur_state_array[ibuf.gxp_power_state];
+ client->requested_aggressor = requested_aggressor;
gxp_pm_update_requested_memory_power_state(
gxp, client->requested_memory_power_state,
aur_memory_state_array[ibuf.memory_power_state]);
@@ -1181,7 +1394,8 @@ static int gxp_release_wake_lock(struct gxp_client *client, __u32 __user *argp)
* to change the power state.
*/
gxp_pm_update_requested_power_state(
- gxp, client->requested_power_state, AUR_OFF);
+ gxp, client->requested_power_state,
+ client->requested_aggressor, AUR_OFF, true);
client->requested_power_state = AUR_OFF;
gxp_pm_update_requested_memory_power_state(
gxp, client->requested_memory_power_state,
@@ -1332,6 +1546,33 @@ static int gxp_unregister_mailbox_eventfd(
return 0;
}
+static int
+gxp_get_interface_version(struct gxp_client *client,
+ struct gxp_interface_version_ioctl __user *argp)
+{
+ struct gxp_interface_version_ioctl ibuf;
+ int ret;
+
+ ibuf.version_major = GXP_INTERFACE_VERSION_MAJOR;
+ ibuf.version_minor = GXP_INTERFACE_VERSION_MINOR;
+ memset(ibuf.version_build, 0, GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE);
+ ret = snprintf(ibuf.version_build,
+ GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE - 1,
+ GIT_REPO_TAG);
+
+ if (ret < 0 || ret >= GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE) {
+ dev_warn(
+ client->gxp->dev,
+ "Buffer size insufficient to hold GIT_REPO_TAG (size=%d)\n",
+ ret);
+ }
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ return -EFAULT;
+
+ return 0;
+}
+
static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
{
struct gxp_client *client = file->private_data;
@@ -1393,8 +1634,8 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_READ_GLOBAL_COUNTER:
ret = gxp_read_global_counter(client, argp);
break;
- case GXP_ACQUIRE_WAKE_LOCK:
- ret = gxp_acquire_wake_lock(client, argp);
+ case GXP_ACQUIRE_WAKE_LOCK_COMPAT:
+ ret = gxp_acquire_wake_lock_compat(client, argp);
break;
case GXP_RELEASE_WAKE_LOCK:
ret = gxp_release_wake_lock(client, argp);
@@ -1414,6 +1655,12 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_UNREGISTER_MAILBOX_EVENTFD:
ret = gxp_unregister_mailbox_eventfd(client, argp);
break;
+ case GXP_ACQUIRE_WAKE_LOCK:
+ ret = gxp_acquire_wake_lock(client, argp);
+ break;
+ case GXP_GET_INTERFACE_VERSION:
+ ret = gxp_get_interface_version(client, argp);
+ break;
default:
ret = -ENOTTY; /* unknown command */
}
@@ -1463,6 +1710,7 @@ static int gxp_platform_probe(struct platform_device *pdev)
int ret;
int __maybe_unused i;
bool __maybe_unused tpu_found;
+ u64 prop;
dev_notice(dev, "Probing gxp driver with commit %s\n", GIT_REPO_TAG);
@@ -1648,6 +1896,15 @@ static int gxp_platform_probe(struct platform_device *pdev)
"GSA device found, Firmware authentication available\n");
}
+ ret = of_property_read_u64(dev->of_node, "gxp-memory-per-core",
+ &prop);
+ if (ret) {
+ dev_err(dev, "Unable to get memory-per-core from device tree\n");
+ gxp->memory_per_core = 0;
+ } else {
+ gxp->memory_per_core = (u32)prop;
+ }
+
gxp_fw_data_init(gxp);
gxp_telemetry_init(gxp);
gxp_create_debugfs(gxp);
diff --git a/gxp-pm.c b/gxp-pm.c
index c315a46..e7d0a6a 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -40,9 +40,11 @@ static const uint aur_memory_state_array[] = {
* values are copied from the implementation in TPU firmware for PRO,
* i.e. google3/third_party/darwinn/firmware/janeiro/power_manager.cc.
*/
-static const s32 aur_memory_state2int_table[] = { 0, 0, 0, 200, 332, 465, 533 };
-static const s32 aur_memory_state2mif_table[] = { 0, 0, 0, 1014,
- 1352, 2028, 3172 };
+static const s32 aur_memory_state2int_table[] = { 0, 0, 0, 200000,
+ 332000, 465000, 533000 };
+static const s32 aur_memory_state2mif_table[] = { 0, 0, 0,
+ 1014000, 1352000, 2028000,
+ 3172000 };
static struct gxp_pm_device_ops gxp_aur_ops = {
.pre_blk_powerup = NULL,
@@ -97,9 +99,14 @@ static int gxp_pm_blkpwr_down(struct gxp_dev *gxp)
return ret;
}
-static int gxp_pm_blk_set_state_acpm(struct gxp_dev *gxp, unsigned long state)
+static int gxp_pm_blk_set_state_acpm(struct gxp_dev *gxp, unsigned long state, bool aggressor)
{
- return gxp_pm_blk_set_rate_acpm(gxp, aur_power_state2rate[state]);
+ unsigned long rate;
+
+ rate = aur_power_state2rate[state];
+ if (!aggressor)
+ rate |= BIT(AUR_NON_AGGRESSOR_BIT);
+ return gxp_pm_blk_set_rate_acpm(gxp, rate);
}
int gxp_pm_blk_set_rate_acpm(struct gxp_dev *gxp, unsigned long rate)
@@ -136,7 +143,9 @@ static void gxp_pm_blk_set_state_acpm_async(struct work_struct *work)
set_cmu_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_NORMAL);
else if (set_acpm_state_work->state == AUR_READY)
set_cmu_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_LOW);
- gxp_pm_blk_set_state_acpm(set_acpm_state_work->gxp, set_acpm_state_work->state);
+ gxp_pm_blk_set_state_acpm(set_acpm_state_work->gxp,
+ set_acpm_state_work->state,
+ set_acpm_state_work->aggressor_vote);
mutex_unlock(&set_acpm_state_work->gxp->power_mgr->pm_lock);
}
@@ -163,7 +172,8 @@ int gxp_pm_blk_on(struct gxp_dev *gxp)
mutex_lock(&gxp->power_mgr->pm_lock);
ret = gxp_pm_blkpwr_up(gxp);
if (!ret) {
- gxp_pm_blk_set_state_acpm(gxp, AUR_INIT_DVFS_STATE);
+ gxp_pm_blk_set_state_acpm(gxp, AUR_INIT_DVFS_STATE,
+ true /*aggressor*/);
gxp->power_mgr->curr_state = AUR_INIT_DVFS_STATE;
}
@@ -265,53 +275,59 @@ int gxp_pm_core_off(struct gxp_dev *gxp, uint core)
return 0;
}
-static int gxp_pm_req_state_locked(struct gxp_dev *gxp, enum aur_power_state state)
+static int gxp_pm_req_state_locked(struct gxp_dev *gxp,
+ enum aur_power_state state,
+ bool aggressor_vote)
{
if (state > AUR_MAX_ALLOW_STATE) {
dev_err(gxp->dev, "Invalid state %d\n", state);
return -EINVAL;
}
- if (state != gxp->power_mgr->curr_state) {
+ if (state != gxp->power_mgr->curr_state ||
+ aggressor_vote != gxp->power_mgr->curr_aggressor_vote) {
if (state == AUR_OFF) {
- dev_warn(gxp->dev, "It is not supported to request AUR_OFF\n");
+ dev_warn(gxp->dev,
+ "It is not supported to request AUR_OFF\n");
} else {
gxp->power_mgr->set_acpm_state_work.gxp = gxp;
gxp->power_mgr->set_acpm_state_work.state = state;
- gxp->power_mgr->set_acpm_state_work.prev_state = gxp->power_mgr->curr_state;
+ gxp->power_mgr->set_acpm_state_work.aggressor_vote =
+ aggressor_vote;
+ gxp->power_mgr->set_acpm_state_work.prev_state =
+ gxp->power_mgr->curr_state;
queue_work(gxp->power_mgr->wq,
&gxp->power_mgr->set_acpm_state_work.work);
}
gxp->power_mgr->curr_state = state;
+ gxp->power_mgr->curr_aggressor_vote = aggressor_vote;
}
return 0;
}
-int gxp_pm_req_state(struct gxp_dev *gxp, enum aur_power_state state)
-{
- int ret = 0;
-
- mutex_lock(&gxp->power_mgr->pm_lock);
- ret = gxp_pm_req_state_locked(gxp, state);
- mutex_unlock(&gxp->power_mgr->pm_lock);
- return ret;
-}
-
/* Caller must hold pm_lock */
static void gxp_pm_revoke_power_state_vote(struct gxp_dev *gxp,
- enum aur_power_state revoked_state)
+ enum aur_power_state revoked_state,
+ bool origin_requested_aggressor)
{
unsigned int i;
+ uint *pwr_state_req_count;
if (revoked_state == AUR_OFF)
return;
+ if (origin_requested_aggressor)
+ pwr_state_req_count = gxp->power_mgr->pwr_state_req_count;
+ else
+ pwr_state_req_count =
+ gxp->power_mgr->non_aggressor_pwr_state_req_count;
+
for (i = 0; i < AUR_NUM_POWER_STATE; i++) {
if (aur_state_array[i] == revoked_state) {
- if (gxp->power_mgr->pwr_state_req_count[i] == 0)
+ if (pwr_state_req_count[i] == 0)
dev_err(gxp->dev, "Invalid state %d\n",
revoked_state);
else
- gxp->power_mgr->pwr_state_req_count[i]--;
+ pwr_state_req_count[i]--;
return;
}
}
@@ -319,47 +335,70 @@ static void gxp_pm_revoke_power_state_vote(struct gxp_dev *gxp,
/* Caller must hold pm_lock */
static void gxp_pm_vote_power_state(struct gxp_dev *gxp,
- enum aur_power_state state)
+ enum aur_power_state state,
+ bool requested_aggressor)
{
unsigned int i;
+ uint *pwr_state_req_count;
if (state == AUR_OFF)
return;
+ if (requested_aggressor)
+ pwr_state_req_count = gxp->power_mgr->pwr_state_req_count;
+ else
+ pwr_state_req_count =
+ gxp->power_mgr->non_aggressor_pwr_state_req_count;
+
for (i = 0; i < AUR_NUM_POWER_STATE; i++) {
if (aur_state_array[i] == state) {
- gxp->power_mgr->pwr_state_req_count[i]++;
+ pwr_state_req_count[i]++;
return;
}
}
}
/* Caller must hold pm_lock */
-static unsigned long gxp_pm_get_max_voted_power_state(struct gxp_dev *gxp)
+static void gxp_pm_get_max_voted_power_state(struct gxp_dev *gxp,
+ unsigned long *state,
+ bool *aggressor_vote)
{
int i;
- unsigned long state = AUR_OFF;
+ *state = AUR_OFF;
for (i = AUR_NUM_POWER_STATE - 1; i >= 0; i--) {
if (gxp->power_mgr->pwr_state_req_count[i] > 0) {
- state = aur_state_array[i];
+ *aggressor_vote = true;
+ *state = aur_state_array[i];
break;
}
}
- return state;
+ if (*state == AUR_OFF) {
+ /* No aggressor vote, check non-aggressor vote counts */
+ *aggressor_vote = false;
+ for (i = AUR_NUM_POWER_STATE - 1; i >= 0; i--) {
+ if (gxp->power_mgr->non_aggressor_pwr_state_req_count[i] > 0) {
+ *state = aur_state_array[i];
+ break;
+ }
+ }
+ }
}
int gxp_pm_update_requested_power_state(struct gxp_dev *gxp,
enum aur_power_state origin_state,
- enum aur_power_state requested_state)
+ bool origin_requested_aggressor,
+ enum aur_power_state requested_state,
+ bool requested_aggressor)
{
int ret;
- unsigned long max_state;
+ unsigned long max_state = AUR_OFF;
+ bool aggressor_vote = false;
mutex_lock(&gxp->power_mgr->pm_lock);
- gxp_pm_revoke_power_state_vote(gxp, origin_state);
- gxp_pm_vote_power_state(gxp, requested_state);
- max_state = gxp_pm_get_max_voted_power_state(gxp);
- ret = gxp_pm_req_state_locked(gxp, max_state);
+ gxp_pm_revoke_power_state_vote(gxp, origin_state, origin_requested_aggressor);
+ gxp_pm_vote_power_state(gxp, requested_state, requested_aggressor);
+ gxp_pm_get_max_voted_power_state(gxp, &max_state, &aggressor_vote);
+ ret = gxp_pm_req_state_locked(gxp, max_state, aggressor_vote);
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
}
@@ -511,6 +550,7 @@ int gxp_pm_init(struct gxp_dev *gxp)
mutex_init(&mgr->pm_lock);
mgr->curr_state = AUR_OFF;
mgr->curr_memory_state = AUR_MEM_UNDEFINED;
+ mgr->curr_aggressor_vote = true;
refcount_set(&(mgr->blk_wake_ref), 0);
mgr->ops = &gxp_aur_ops;
gxp->power_mgr = mgr;
diff --git a/gxp-pm.h b/gxp-pm.h
index 9834247..111ba7a 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -52,6 +52,13 @@ enum aur_power_cmu_mux_state {
#define AUR_MAX_ALLOW_STATE AUR_READY
#define AUR_MAX_ALLOW_MEMORY_STATE AUR_MEM_MAX
+/*
+ * The bit to indicate non-aggressor vote for `exynos_acpm_set_rate`.
+ * Lower 3 byte of frequency parameter of `exynos_acpm_set_rate` will still be
+ * the requested rate.
+ */
+#define AUR_NON_AGGRESSOR_BIT 24
+
struct gxp_pm_device_ops {
int (*pre_blk_powerup)(struct gxp_dev *gxp);
int (*post_blk_powerup)(struct gxp_dev *gxp);
@@ -64,6 +71,7 @@ struct gxp_set_acpm_state_work {
struct gxp_dev *gxp;
unsigned long state;
unsigned long prev_state;
+ bool aggressor_vote;
};
struct gxp_req_pm_qos_work {
@@ -77,7 +85,9 @@ struct gxp_power_manager {
struct gxp_dev *gxp;
struct mutex pm_lock;
uint pwr_state_req_count[AUR_NUM_POWER_STATE];
+ uint non_aggressor_pwr_state_req_count[AUR_NUM_POWER_STATE];
uint mem_pwr_state_req_count[AUR_NUM_MEMORY_POWER_STATE];
+ bool curr_aggressor_vote;
int curr_state;
int curr_memory_state;
refcount_t blk_wake_ref;
@@ -142,8 +152,8 @@ int gxp_pm_core_on(struct gxp_dev *gxp, uint core);
int gxp_pm_core_off(struct gxp_dev *gxp, uint core);
/**
- * gxp_pm_acquire_blk_wakelock() - Acquire blk wakelock
- * to make sure block won't shutdown.
+ * gxp_pm_acquire_blk_wakelock() - Acquire blk wakelock to make sure block won't
+ * shutdown.
*
* Can be called multiple times and it will increase
* reference count.
@@ -170,20 +180,8 @@ int gxp_pm_acquire_blk_wakelock(struct gxp_dev *gxp);
int gxp_pm_release_blk_wakelock(struct gxp_dev *gxp);
/**
- * gxp_pm_req_state() - API to request a desired power state.
- * @gxp: The GXP device to operate
- * @state: The requested state
- *
- * Return:
- * * 0 - Voting registered
- * * -EINVAL - Invalid requested state
- */
-int gxp_pm_req_state(struct gxp_dev *gxp, enum aur_power_state state);
-
-/**
- * gxp_pm_init() - API for initialize PM
- * interface for GXP, should only be called
- * once per probe
+ * gxp_pm_init() - API for initialize PM interface for GXP, should only be
+ * called once per probe
* @gxp: The GXP device to operate
*
* Return:
@@ -232,8 +230,13 @@ int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp);
* requested state.
* @gxp: The GXP device to operate.
* @origin_state: An existing old requested state, will be cleared. If this is
- * the first vote, pass AUR_OFF.
+ * the first vote, pass AUR_OFF.
+ * @origin_requested_aggressor: Specify whether the existing vote was requested with
+ * aggressor flag.
* @requested_state: The new requested state.
+ * @requested_aggressor: Specify whether the new vote is requested with aggressor
+ * flag. Will take no effect if the @requested state is
+ * AUR_OFF.
*
* Return:
* * 0 - Voting registered
@@ -241,14 +244,16 @@ int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp);
*/
int gxp_pm_update_requested_power_state(struct gxp_dev *gxp,
enum aur_power_state origin_state,
- enum aur_power_state requested_state);
+ bool origin_requested_aggressor,
+ enum aur_power_state requested_state,
+ bool requested_aggressor);
/**
* gxp_pm_update_requested_memory_power_state() - API for a GXP client to vote for a
* requested memory power state.
* @gxp: The GXP device to operate.
* @origin_state: An existing old requested state, will be cleared. If this is
- * the first vote, pass AUR_MEM_UNDEFINED.
+ * the first vote, pass AUR_MEM_UNDEFINED.
* @requested_state: The new requested state.
*
* Return:
diff --git a/gxp-wakelock.h b/gxp-wakelock.h
index e1406c6..50e4628 100644
--- a/gxp-wakelock.h
+++ b/gxp-wakelock.h
@@ -10,12 +10,6 @@
#include "gxp.h"
#include "gxp-internal.h"
-/*
- * TODO(b/201600514): This is a temporary, basic interface to support
- * b/204924965. It should be revisited and modified as necessary to properly
- * support the full wakelock interface exposed to driver users.
- */
-
struct gxp_wakelock_manager {
/* Protects count and suspended */
struct mutex lock;
diff --git a/gxp.h b/gxp.h
index 001bb89..971b1f1 100644
--- a/gxp.h
+++ b/gxp.h
@@ -10,6 +10,11 @@
#include <linux/ioctl.h>
#include <linux/types.h>
+/* Interface Version */
+#define GXP_INTERFACE_VERSION_MAJOR 1
+#define GXP_INTERFACE_VERSION_MINOR 0
+#define GXP_INTERFACE_VERSION_BUILD 0
+
/*
* mmap offsets for logging and tracing buffers
* Requested size will be divided evenly among all cores. The whole buffer
@@ -19,17 +24,6 @@
#define GXP_MMAP_LOG_BUFFER_OFFSET 0x10000
#define GXP_MMAP_TRACE_BUFFER_OFFSET 0x20000
-#define GXP_IOCTL_BASE 0xEE
-
-/* GXP map flag macros */
-
-/* The mask for specifying DMA direction in GXP map flag */
-#define GXP_MAP_DIR_MASK 3
-/* The targeted DMA direction for the buffer */
-#define GXP_MAP_DMA_BIDIRECTIONAL 0
-#define GXP_MAP_DMA_TO_DEVICE 1
-#define GXP_MAP_DMA_FROM_DEVICE 2
-
/*
* TODO(b/209083969) The following IOCTLs will no longer require the caller
* to hold a virtual device wakelock to call them once virtual device
@@ -39,7 +33,265 @@
* - GXP_SYNC_BUFFER
* - GXP_MAP_DMABUF
* - GXP_UNMAP_DMABUF
+ * - GXP_MAP_TPU_MBX_QUEUE
+ * - GXP_UNMAP_TPU_MBX_QUEUE
+ */
+
+#define GXP_IOCTL_BASE 0xEE
+
+#define GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE 64
+struct gxp_interface_version_ioctl {
+ /*
+ * Driver major version number.
+ * Increments whenever a non-backwards compatible change to the
+ * interface defined in this file changes.
+ */
+ __u16 version_major;
+ /*
+ * Driver minor version number.
+ * Increments whenever a backwards compatible change, such as the
+ * addition of a new IOCTL, is made to the interface defined in this
+ * file.
+ */
+ __u16 version_minor;
+ /*
+ * Driver build identifier.
+ * NULL-terminated string of the git hash of the commit the driver was
+ * built from. If the driver had uncommitted changes the string will
+ * end with "-dirty".
+ */
+ char version_build[GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE];
+};
+
+/* Query the driver's interface version. */
+#define GXP_GET_INTERFACE_VERSION \
+ _IOR(GXP_IOCTL_BASE, 26, struct gxp_interface_version_ioctl)
+
+struct gxp_specs_ioctl {
+ /* Maximum number of cores that can be allocated to a virtual device */
+ __u8 core_count;
+ /* Deprecated fields that should be ignored */
+ __u16 reserved_0;
+ __u16 reserved_1;
+ __u16 reserved_2;
+ __u8 reserved_3;
+ /*
+ * Amount of "tightly-coupled memory" or TCM available to each core.
+ * The value returned will be in kB, or 0 if the value was not
+ * specified in the device-tree.
+ */
+ __u32 memory_per_core;
+};
+
+/* Query system specs. */
+#define GXP_GET_SPECS \
+ _IOR(GXP_IOCTL_BASE, 5, struct gxp_specs_ioctl)
+
+struct gxp_virtual_device_ioctl {
+ /*
+ * Input:
+ * The number of cores requested for the virtual device.
+ */
+ __u8 core_count;
+ /*
+ * Input:
+ * The number of threads requested per core.
+ */
+ __u16 threads_per_core;
+ /*
+ * Input:
+ * The amount of memory requested per core, in kB.
+ */
+ __u32 memory_per_core;
+ /*
+ * Output:
+ * The ID assigned to the virtual device and shared with its cores.
+ */
+ __u32 vdid;
+};
+
+/* Allocate virtual device. */
+#define GXP_ALLOCATE_VIRTUAL_DEVICE \
+ _IOWR(GXP_IOCTL_BASE, 6, struct gxp_virtual_device_ioctl)
+
+/*
+ * Components for which a client may hold a wakelock.
+ * Acquired by passing these values as `components_to_wake` in
+ * `struct gxp_acquire_wakelock_ioctl` to GXP_ACQUIRE_WAKELOCK and released by
+ * passing these values directly as the argument to GXP_RELEASE_WAKELOCK.
+ *
+ * Multiple wakelocks can be acquired or released at once by passing multiple
+ * components, ORed together.
+ */
+#define WAKELOCK_BLOCK (1 << 0)
+#define WAKELOCK_VIRTUAL_DEVICE (1 << 1)
+
+/*
+ * DSP subsystem Power state values for use as `gxp_power_state` in
+ * `struct gxp_acquire_wakelock_ioctl`.
+ * Note: GXP_POWER_STATE_READY is the state to keep the BLOCK idle. By setting
+ * this state, the driver will request UUD frequency and switch the CMUMUX
+ * clocks into 25 MHz to save more power.
+ */
+#define GXP_POWER_STATE_OFF 0
+#define GXP_POWER_STATE_UUD 1
+#define GXP_POWER_STATE_SUD 2
+#define GXP_POWER_STATE_UD 3
+#define GXP_POWER_STATE_NOM 4
+#define GXP_POWER_STATE_READY 5
+
+/*
+ * Memory interface power state values for use as `memory_power_state` in
+ * `struct gxp_acquire_wakelock_ioctl`.
+ */
+#define MEMORY_POWER_STATE_UNDEFINED 0
+#define MEMORY_POWER_STATE_MIN 1
+#define MEMORY_POWER_STATE_VERY_LOW 2
+#define MEMORY_POWER_STATE_LOW 3
+#define MEMORY_POWER_STATE_HIGH 4
+#define MEMORY_POWER_STATE_VERY_HIGH 5
+#define MEMORY_POWER_STATE_MAX 6
+
+/*
+ * GXP power flag macros, supported by `flags` in `gxp_acquire_wakelock_ioctl`
+ * and `power_flags in `gxp_mailbox_command_ioctl`.
+ * The client can request non-aggressor vote by this flag, which means if the
+ * requested voltage is lower than the current voltage of VDD_CAM, adopt the
+ * current voltage of VDD_CAM for DSP. On the other hand, if the requested
+ * voltage is higher, adopt the requested one for DSP.
+ *
+ * Note: aggressor votes will have higher priority than non-aggressor votes.
*/
+#define GXP_POWER_NON_AGGRESSOR (1 << 0)
+
+struct gxp_acquire_wakelock_ioctl {
+ /*
+ * The components for which a wakelock will be acquired.
+ * Should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
+ * bitwise OR of both.
+ *
+ * A VIRTUAL_DEVICE wakelock cannot be acquired until the client has
+ * allocated a virtual device. To acquire a VIRTUAL_DEVICE wakelock, a
+ * client must already have acquired a BLOCK wakelock or acquire both
+ * in the same call.
+ */
+ __u32 components_to_wake;
+ /*
+ * Minimum power state to operate the entire DSP subsystem at until
+ * the BLOCK wakelock is released. One of the GXP_POWER_STATE_* defines
+ * from above. Note that the requested power state will not be cleared
+ * if only the VIRTUAL_DEVICE wakelock is released.
+ *
+ * `GXP_POWER_STATE_OFF` is not a valid value when acquiring a
+ * wakelock.
+ */
+ __u32 gxp_power_state;
+ /*
+ * Memory interface power state to request from the system so long as
+ * the BLOCK wakelock is held. One of the MEMORY_POWER_STATE* defines
+ * from above. The requested memory power state will not be cleared if
+ * only the VIRTUAL_DEVICE wakelock is released.
+ *
+ * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
+ * the memory interface power state will be made.
+ */
+ __u32 memory_power_state;
+ /*
+ * How long to wait, in microseconds, before returning if insufficient
+ * physical cores are available when attempting to acquire a
+ * VIRTUAL_DEVICE wakelock. A value of 0 indicates that the IOCTL
+ * should not wait at all if cores are not available.
+ */
+ __u32 vd_timeout_us;
+ /*
+ * Flags indicating power attribute requests from the runtime.
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [0:0] - NON_AGGRESSOR setting for ACPM:
+ * 0 = AGGRESSOR, default value
+ * 1 = NON_AGGRESSOR
+ * If the client makes a NON_AGGRESSOR request, the DSP is
+ * only guaranteed to operate at `gxp_power_state` when it
+ * is the only component active on its voltage rail. If
+ * another component becomes active on the rail, at any
+ * point while a NON_AGGRESSOR request is active, the rail
+ * will defer to the other component's requested state.
+ *
+ * Note: An AGGRESSOR request from any client overrides all
+ * NON_AGGRESSOR requests. At that point, the DSP will
+ * operate at the AGGRESSOR request's `gxp_power_state`,
+ * regardless of other components on the DSP's rail or what
+ * power state any NON_AGGRESSOR requests specified.
+ * [31:1] - RESERVED
+ */
+ __u32 flags;
+};
+
+/*
+ * Acquire a wakelock and request minimum power states for the DSP subsystem
+ * and the memory interface.
+ *
+ * Upon a successful return, the specified components will be powered on and if
+ * they were not already running at the specified or higher power states,
+ * requests will have been sent to transition both the DSP subsystem and
+ * memory interface to the specified states.
+ *
+ * If the same client invokes this IOCTL for the same component more than once
+ * without a corresponding call to `GXP_RELEASE_WAKE_LOCK` in between, the
+ * second call will update requested power states, but have no other effects.
+ * No additional call to `GXP_RELEASE_WAKE_LOCK` will be required.
+ *
+ * If a client attempts to acquire a VIRTUAL_DEVICE wakelock and there are
+ * insufficient physical cores available, the driver will wait up to
+ * `vd_timeout_us` microseconds, then return -EBUSY if sufficient cores were
+ * never made available. In this case, if both BLOCK and VIRTUAL_DEVICE
+ * wakelocks were being requested, neither will have been acquired.
+ */
+#define GXP_ACQUIRE_WAKE_LOCK \
+ _IOW(GXP_IOCTL_BASE, 25, struct gxp_acquire_wakelock_ioctl)
+
+/*
+ * Legacy "acquire wakelock" IOCTL that does not support power flags.
+ * This IOCTL exists for backwards compatibility with older runtimes. All other
+ * fields are the same as in `struct gxp_acquire_wakelock_ioctl`.
+ */
+struct gxp_acquire_wakelock_compat_ioctl {
+ __u32 components_to_wake;
+ __u32 gxp_power_state;
+ __u32 memory_power_state;
+ __u32 vd_timeout_us;
+};
+
+#define GXP_ACQUIRE_WAKE_LOCK_COMPAT \
+ _IOW(GXP_IOCTL_BASE, 18, struct gxp_acquire_wakelock_compat_ioctl)
+
+/*
+ * Release a wakelock acquired via `GXP_ACQUIRE_WAKE_LOCK`.
+ *
+ * The argument should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
+ * bitwise OR of both.
+ *
+ * Upon releasing a VIRTUAL_DEVICE wakelock, a client's virtual device will be
+ * removed from physical cores. At that point the cores may be reallocated to
+ * another client or powered down.
+ *
+ * If no clients hold a BLOCK wakelock, the entire DSP subsytem may be powered
+ * down. If a client attempts to release a BLOCK wakelock while still holding
+ * a VIRTUAL_DEVICE wakelock, this IOCTL will return -EBUSY.
+ *
+ * If a client attempts to release a wakelock it does not hold, this IOCTL will
+ * return -ENODEV.
+ */
+#define GXP_RELEASE_WAKE_LOCK _IOW(GXP_IOCTL_BASE, 19, __u32)
+
+/* GXP map flag macros */
+/* The mask for specifying DMA direction in GXP map flag */
+#define GXP_MAP_DIR_MASK 3
+/* The targeted DMA direction for the buffer */
+#define GXP_MAP_DMA_BIDIRECTIONAL 0
+#define GXP_MAP_DMA_TO_DEVICE 1
+#define GXP_MAP_DMA_FROM_DEVICE 2
struct gxp_map_ioctl {
/*
@@ -131,7 +383,59 @@ struct gxp_sync_ioctl {
#define GXP_SYNC_BUFFER \
_IOW(GXP_IOCTL_BASE, 2, struct gxp_sync_ioctl)
-struct gxp_mailbox_command_compat_ioctl {
+struct gxp_map_dmabuf_ioctl {
+ /*
+ * Bitfield indicating which virtual cores to map the dma-buf for.
+ * To map for virtual core X, set bit X in this field, i.e. `1 << X`.
+ *
+ * This field is not used by the unmap dma-buf IOCTL, which always
+ * unmaps a dma-buf for all cores it had been mapped for.
+ */
+ __u16 virtual_core_list;
+ __s32 dmabuf_fd; /* File descriptor of the dma-buf to map. */
+ /*
+ * Flags indicating mapping attribute requests from the runtime.
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [1:0] - DMA_DIRECTION:
+ * 00 = DMA_BIDIRECTIONAL (host/device can write buffer)
+ * 01 = DMA_TO_DEVICE (host can write buffer)
+ * 10 = DMA_FROM_DEVICE (device can write buffer)
+ * Note: DMA_DIRECTION is the direction in which data moves
+ * from the host's perspective.
+ * [31:2] - RESERVED
+ */
+ __u32 flags;
+ /*
+ * Device address the dmabuf is mapped to.
+ * - GXP_MAP_DMABUF uses this field to return the address the dma-buf
+ * can be accessed from by the device.
+ * - GXP_UNMAP_DMABUF expects this field to contain the value from the
+ * mapping call, and uses it to determine which dma-buf to unmap.
+ */
+ __u64 device_address;
+};
+
+/*
+ * Map host buffer via its dma-buf FD.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
+ */
+#define GXP_MAP_DMABUF _IOWR(GXP_IOCTL_BASE, 20, struct gxp_map_dmabuf_ioctl)
+
+/*
+ * Un-map host buffer previously mapped by GXP_MAP_DMABUF.
+ *
+ * Only the @device_address field is used. Other fields are fetched from the
+ * kernel's internal records. It is recommended to use the argument that was
+ * passed in GXP_MAP_DMABUF to un-map the dma-buf.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
+ */
+#define GXP_UNMAP_DMABUF _IOW(GXP_IOCTL_BASE, 21, struct gxp_map_dmabuf_ioctl)
+
+struct gxp_mailbox_command_ioctl {
/*
* Input:
* The virtual core to dispatch the command to.
@@ -157,9 +461,45 @@ struct gxp_mailbox_command_compat_ioctl {
__u32 size;
/*
* Input:
+ * Minimum power state to operate the entire DSP subsystem at until
+ * the mailbox command is finished(executed or timeout). One of the
+ * GXP_POWER_STATE_* defines from below.
+ *
+ * `GXP_POWER_STATE_OFF` is not a valid value when executing a
+ * mailbox command. The caller should pass GXP_POWER_STATE_UUD if the
+ * command is expected to run at the power state the wakelock has
+ * specified.
+ */
+ __u32 gxp_power_state;
+ /*
+ * Input:
+ * Memory interface power state to request from the system so long as
+ * the mailbox command is executing. One of the MEMORY_POWER_STATE*
+ * defines from below.
+ *
+ * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
+ * the memory interface power state will be made.
+ */
+ __u32 memory_power_state;
+ /*
+ * Input:
* Flags describing the command, for use by the GXP device.
*/
__u32 flags;
+ /*
+ * Input:
+ * Flags indicating power attribute requests from the runtime.
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [0:0] - NON_AGGRESSOR setting for ACPM:
+ * 0 = AGGRESSOR, default value
+ * 1 = NON_AGGRESSOR
+ * Note: It takes effect only if every client holds a
+ * wakelock with NON_AGGRESSOR.
+ * [31:1] - RESERVED
+ */
+ __u32 power_flags;
};
/*
@@ -167,6 +507,24 @@ struct gxp_mailbox_command_compat_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
+#define GXP_MAILBOX_COMMAND \
+ _IOWR(GXP_IOCTL_BASE, 23, struct gxp_mailbox_command_ioctl)
+
+/*
+ * Legacy "mailbox command" IOCTL that does not support power requests.
+ * This IOCTL exists for backwards compatibility with older runtimes. All
+ * fields, other than the unsupported `gxp_power_state`, `memory_power_state`,
+ * and `power_flags`, are the same as in `struct gxp_mailbox_command_ioctl`.
+ */
+struct gxp_mailbox_command_compat_ioctl {
+ __u16 virtual_core_id;
+ __u64 sequence_number;
+ __u64 device_address;
+ __u32 size;
+ __u32 flags;
+};
+
+/* The client must hold a VIRTUAL_DEVICE wakelock. */
#define GXP_MAILBOX_COMMAND_COMPAT \
_IOW(GXP_IOCTL_BASE, 3, struct gxp_mailbox_command_compat_ioctl)
@@ -211,45 +569,48 @@ struct gxp_mailbox_response_ioctl {
#define GXP_MAILBOX_RESPONSE \
_IOWR(GXP_IOCTL_BASE, 4, struct gxp_mailbox_response_ioctl)
-struct gxp_specs_ioctl {
- __u8 core_count;
- __u16 version_major;
- __u16 version_minor;
- __u16 version_build;
- __u8 threads_per_core;
- __u32 memory_per_core; /* measured in kB */
-};
-
-/* Query system specs. */
-#define GXP_GET_SPECS \
- _IOR(GXP_IOCTL_BASE, 5, struct gxp_specs_ioctl)
-
-struct gxp_virtual_device_ioctl {
- /*
- * Input:
- * The number of cores requested for the virtual device.
- */
- __u8 core_count;
+struct gxp_register_mailbox_eventfd_ioctl {
/*
- * Input:
- * The number of threads requested per core.
+ * This eventfd will be signaled whenever a mailbox response arrives
+ * for the core specified by `virtual_core_id`.
+ *
+ * When registering, if an eventfd has already been registered for the
+ * specified core, the old eventfd will be unregistered and replaced.
+ *
+ * Not used during the unregister call, which clears any existing
+ * eventfd.
*/
- __u16 threads_per_core;
+ __u32 eventfd;
/*
- * Input:
- * The amount of memory requested per core, in kB.
+ * Reserved.
+ * Pass 0 for backwards compatibility.
*/
- __u32 memory_per_core;
+ __u32 flags;
/*
- * Output:
- * The ID assigned to the virtual device and shared with its cores.
+ * The virtual core to register or unregister an eventfd from.
+ * While an eventfd is registered, it will be signaled exactly once
+ * any time a command to this virtual core receives a response or times
+ * out.
*/
- __u32 vdid;
+ __u16 virtual_core_id;
};
-/* Allocate virtual device. */
-#define GXP_ALLOCATE_VIRTUAL_DEVICE \
- _IOWR(GXP_IOCTL_BASE, 6, struct gxp_virtual_device_ioctl)
+/*
+ * Register an eventfd to be signaled whenever the specified virtual core
+ * sends a mailbox response.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_REGISTER_MAILBOX_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 22, struct gxp_register_mailbox_eventfd_ioctl)
+
+/*
+ * Clear a previously registered mailbox response eventfd.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_UNREGISTER_MAILBOX_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 24, struct gxp_register_mailbox_eventfd_ioctl)
#define ETM_TRACE_LSB_MASK 0x1
#define ETM_TRACE_SYNC_MSG_PERIOD_MIN 8
@@ -297,13 +658,19 @@ struct gxp_etm_trace_start_ioctl {
__u8 pc_match_sense;
};
-/* Configure ETM trace registers and start ETM tracing. */
+/*
+ * Configure ETM trace registers and start ETM tracing.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
+ */
#define GXP_ETM_TRACE_START_COMMAND \
_IOW(GXP_IOCTL_BASE, 7, struct gxp_etm_trace_start_ioctl)
/*
* Halts trace generation via a software trigger. The virtual core id is passed
* in as an input.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
*/
#define GXP_ETM_TRACE_SW_STOP_COMMAND \
_IOW(GXP_IOCTL_BASE, 8, __u16)
@@ -314,6 +681,8 @@ struct gxp_etm_trace_start_ioctl {
* of trace data missing towards the end of the trace session.
* This is a workaround for b/180728272 and b/181623511.
* The virtual core id is passed in as an input.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
*/
#define GXP_ETM_TRACE_CLEANUP_COMMAND \
_IOW(GXP_IOCTL_BASE, 9, __u16)
@@ -347,7 +716,11 @@ struct gxp_etm_get_trace_info_ioctl {
__u64 trace_data_addr;
};
-/* Retrieves trace header and/or trace data for decoding purposes. */
+/*
+ * Retrieves trace header and/or trace data for decoding purposes.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
+ */
#define GXP_ETM_GET_TRACE_INFO_COMMAND \
_IOWR(GXP_IOCTL_BASE, 10, struct gxp_etm_get_trace_info_ioctl)
@@ -379,42 +752,6 @@ struct gxp_etm_get_trace_info_ioctl {
*/
#define GXP_DISABLE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 12, __u8)
-struct gxp_tpu_mbx_queue_ioctl {
- __u32 tpu_fd; /* TPU virtual device group fd */
- /*
- * Bitfield indicating which virtual cores to allocate and map the
- * buffers for.
- * To map for virtual core X, set bit X in this field, i.e. `1 << X`.
- *
- * This field is not used by the unmap IOCTL, which always unmaps the
- * buffers for all cores it had been mapped for.
- */
- __u32 virtual_core_list;
- /*
- * The user address of an edgetpu_mailbox_attr struct, containing
- * cmd/rsp queue size, mailbox priority and other relevant info.
- * This structure is defined in edgetpu.h in the TPU driver.
- */
- __u64 attr_ptr;
-};
-
-/*
- * Map TPU-DSP mailbox cmd/rsp queue buffers.
- */
-#define GXP_MAP_TPU_MBX_QUEUE \
- _IOW(GXP_IOCTL_BASE, 13, struct gxp_tpu_mbx_queue_ioctl)
-
-/*
- * Un-map TPU-DSP mailbox cmd/rsp queue buffers previously mapped by
- * GXP_MAP_TPU_MBX_QUEUE.
- *
- * Only the @tpu_fd field will be used. Other fields will be fetched
- * from the kernel's internal records. It is recommended to use the argument
- * that was passed in GXP_MAP_TPU_MBX_QUEUE to un-map the buffers.
- */
-#define GXP_UNMAP_TPU_MBX_QUEUE \
- _IOW(GXP_IOCTL_BASE, 14, struct gxp_tpu_mbx_queue_ioctl)
-
struct gxp_register_telemetry_eventfd_ioctl {
/*
* File-descriptor obtained via eventfd().
@@ -440,279 +777,49 @@ struct gxp_register_telemetry_eventfd_ioctl {
/*
* Reads the 2 global counter registers in AURORA_TOP and combines them to
* return the full 64-bit value of the counter.
- */
-#define GXP_READ_GLOBAL_COUNTER _IOR(GXP_IOCTL_BASE, 17, __u64)
-
-/*
- * Components for which a client may hold a wakelock.
- * Acquired by passing these values as `components_to_wake` in
- * `struct gxp_acquire_wakelock_ioctl` to GXP_ACQUIRE_WAKELOCK and released by
- * passing these values directly as the argument to GXP_RELEASE_WAKELOCK.
- *
- * Multiple wakelocks can be acquired or released at once by passing multiple
- * components, ORed together.
- */
-#define WAKELOCK_BLOCK (1 << 0)
-#define WAKELOCK_VIRTUAL_DEVICE (1 << 1)
-
-/*
- * DSP subsystem Power state values for use as `gxp_power_state` in
- * `struct gxp_acquire_wakelock_ioctl`.
- * Note: GXP_POWER_STATE_READY is the state to keep the BLOCK idle. By setting
- * this state, the driver will request UUD frequency and switch the CMUMUX
- * clocks into 25 MHz to save more power.
- */
-#define GXP_POWER_STATE_OFF 0
-#define GXP_POWER_STATE_UUD 1
-#define GXP_POWER_STATE_SUD 2
-#define GXP_POWER_STATE_UD 3
-#define GXP_POWER_STATE_NOM 4
-#define GXP_POWER_STATE_READY 5
-
-/*
- * Memory interface power state values for use as `memory_power_state` in
- * `struct gxp_acquire_wakelock_ioctl`.
- */
-#define MEMORY_POWER_STATE_UNDEFINED 0
-#define MEMORY_POWER_STATE_MIN 1
-#define MEMORY_POWER_STATE_VERY_LOW 2
-#define MEMORY_POWER_STATE_LOW 3
-#define MEMORY_POWER_STATE_HIGH 4
-#define MEMORY_POWER_STATE_VERY_HIGH 5
-#define MEMORY_POWER_STATE_MAX 6
-
-struct gxp_acquire_wakelock_ioctl {
- /*
- * The components for which a wakelock will be acquired.
- * Should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
- * bitwise OR of both.
- *
- * A VIRTUAL_DEVICE wakelock cannot be acquired until the client has
- * allocated a virtual device. To acquire a VIRTUAL_DEVICE wakelock, a
- * client must already have acquired a BLOCK wakelock or acquire both
- * in the same call.
- */
- __u32 components_to_wake;
- /*
- * Minimum power state to operate the entire DSP subsystem at until
- * the BLOCK wakelock is released. One of the GXP_POWER_STATE_* defines
- * from above. Note that the requested power state will not be cleared
- * if only the VIRTUAL_DEVICE wakelock is released.
- *
- * `GXP_POWER_STATE_OFF` is not a valid value when acquiring a
- * wakelock.
- */
- __u32 gxp_power_state;
- /*
- * Memory interface power state to request from the system so long as
- * the BLOCK wakelock is held. One of the MEMORY_POWER_STATE* defines
- * from above. The requested memory power state will not be cleared if
- * only the VIRTUAL_DEVICE wakelock is released.
- *
- * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
- * the memory interface power state will be made.
- */
- __u32 memory_power_state;
- /*
- * How long to wait, in microseconds, before returning if insufficient
- * physical cores are available when attempting to acquire a
- * VIRTUAL_DEVICE wakelock. A value of 0 indicates that the IOCTL
- * should not wait at all if cores are not available.
- */
- __u32 vd_timeout_us;
-};
-
-/*
- * Acquire a wakelock and request minimum power states for the DSP subsystem
- * and the memory interface.
*
- * Upon a successful return, the specified components will be powered on and if
- * they were not already running at the specified or higher power states,
- * requests will have been sent to transition both the DSP subsystem and
- * memory interface to the specified states.
- *
- * If the same client invokes this IOCTL for the same component more than once
- * without a corresponding call to `GXP_RELEASE_WAKE_LOCK` in between, the
- * second call will update requested power states, but have no other effects.
- * No additional call to `GXP_RELEASE_WAKE_LOCK` will be required.
- *
- * If a client attempts to acquire a VIRTUAL_DEVICE wakelock and there are
- * insufficient physical cores available, the driver will wait up to
- * `vd_timeout_us` microseconds, then return -EBUSY if sufficient cores were
- * never made available. In this case, if both BLOCK and VIRTUAL_DEVICE
- * wakelocks were being requested, neither will have been acquired.
+ * The client must hold a BLOCK wakelock.
*/
-#define GXP_ACQUIRE_WAKE_LOCK \
- _IOW(GXP_IOCTL_BASE, 18, struct gxp_acquire_wakelock_ioctl)
-
-/*
- * Release a wakelock acquired via `GXP_ACQUIRE_WAKE_LOCK`.
- *
- * The argument should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
- * bitwise OR of both.
- *
- * Upon releasing a VIRTUAL_DEVICE wakelock, a client's virtual device will be
- * removed from physical cores. At that point the cores may be reallocated to
- * another client or powered down.
- *
- * If no clients hold a BLOCK wakelock, the entire DSP subsytem may be powered
- * down. If a client attempts to release a BLOCK wakelock while still holding
- * a VIRTUAL_DEVICE wakelock, this IOCTL will return -EBUSY.
- *
- * If a client attempts to release a wakelock it does not hold, this IOCTL will
- * return -ENODEV.
- */
-#define GXP_RELEASE_WAKE_LOCK _IOW(GXP_IOCTL_BASE, 19, __u32)
+#define GXP_READ_GLOBAL_COUNTER _IOR(GXP_IOCTL_BASE, 17, __u64)
-struct gxp_map_dmabuf_ioctl {
+struct gxp_tpu_mbx_queue_ioctl {
+ __u32 tpu_fd; /* TPU virtual device group fd */
/*
- * Bitfield indicating which virtual cores to map the dma-buf for.
+ * Bitfield indicating which virtual cores to allocate and map the
+ * buffers for.
* To map for virtual core X, set bit X in this field, i.e. `1 << X`.
*
- * This field is not used by the unmap dma-buf IOCTL, which always
- * unmaps a dma-buf for all cores it had been mapped for.
- */
- __u16 virtual_core_list;
- __s32 dmabuf_fd; /* File descriptor of the dma-buf to map. */
- /*
- * Flags indicating mapping attribute requests from the runtime.
- * Set RESERVED bits to 0 to ensure backwards compatibility.
- *
- * Bitfields:
- * [1:0] - DMA_DIRECTION:
- * 00 = DMA_BIDIRECTIONAL (host/device can write buffer)
- * 01 = DMA_TO_DEVICE (host can write buffer)
- * 10 = DMA_FROM_DEVICE (device can write buffer)
- * Note: DMA_DIRECTION is the direction in which data moves
- * from the host's perspective.
- * [31:2] - RESERVED
+ * This field is not used by the unmap IOCTL, which always unmaps the
+ * buffers for all cores it had been mapped for.
*/
- __u32 flags;
+ __u32 virtual_core_list;
/*
- * Device address the dmabuf is mapped to.
- * - GXP_MAP_DMABUF uses this field to return the address the dma-buf
- * can be accessed from by the device.
- * - GXP_UNMAP_DMABUF expects this field to contain the value from the
- * mapping call, and uses it to determine which dma-buf to unmap.
+ * The user address of an edgetpu_mailbox_attr struct, containing
+ * cmd/rsp queue size, mailbox priority and other relevant info.
+ * This structure is defined in edgetpu.h in the TPU driver.
*/
- __u64 device_address;
+ __u64 attr_ptr;
};
/*
- * Map host buffer via its dma-buf FD.
+ * Map TPU-DSP mailbox cmd/rsp queue buffers.
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_MAP_DMABUF _IOWR(GXP_IOCTL_BASE, 20, struct gxp_map_dmabuf_ioctl)
+#define GXP_MAP_TPU_MBX_QUEUE \
+ _IOW(GXP_IOCTL_BASE, 13, struct gxp_tpu_mbx_queue_ioctl)
/*
- * Un-map host buffer previously mapped by GXP_MAP_DMABUF.
- *
- * Only the @device_address field is used. Other fields are fetched from the
- * kernel's internal records. It is recommended to use the argument that was
- * passed in GXP_MAP_DMABUF to un-map the dma-buf.
+ * Un-map TPU-DSP mailbox cmd/rsp queue buffers previously mapped by
+ * GXP_MAP_TPU_MBX_QUEUE.
*
- * The client must hold a VIRTUAL_DEVICE wakelock.
- */
-#define GXP_UNMAP_DMABUF _IOW(GXP_IOCTL_BASE, 21, struct gxp_map_dmabuf_ioctl)
-
-struct gxp_mailbox_command_ioctl {
- /*
- * Input:
- * The virtual core to dispatch the command to.
- */
- __u16 virtual_core_id;
- /*
- * Output:
- * The sequence number assigned to this command. The caller can use
- * this value to match responses fetched via `GXP_MAILBOX_RESPONSE`
- * with this command.
- */
- __u64 sequence_number;
- /*
- * Input:
- * Device address to the buffer containing a GXP command. The user
- * should have obtained this address from the GXP_MAP_BUFFER ioctl.
- */
- __u64 device_address;
- /*
- * Input:
- * Size of the buffer at `device_address` in bytes.
- */
- __u32 size;
- /*
- * Input:
- * Minimum power state to operate the entire DSP subsystem at until
- * the mailbox command is finished(executed or timeout). One of the
- * GXP_POWER_STATE_* defines from below.
- *
- * `GXP_POWER_STATE_OFF` is not a valid value when executing a
- * mailbox command. The caller should pass GXP_POWER_STATE_UUD if the
- * command is expected to run at the power state the wakelock has
- * specified.
- */
- __u32 gxp_power_state;
- /*
- * Input:
- * Memory interface power state to request from the system so long as
- * the mailbox command is executing. One of the MEMORY_POWER_STATE*
- * defines from below.
- *
- * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
- * the memory interface power state will be made.
- */
- __u32 memory_power_state;
- /*
- * Input:
- * Flags describing the command, for use by the GXP device.
- */
- __u32 flags;
- /*
- * Input:
- * Flags relevant to the power state requests. Currently reserved.
- */
- /* TODO(221320387): Document the flags once support is implemented. */
- __u32 power_flags;
-};
-
-/*
- * Push element to the mailbox commmand queue.
+ * Only the @tpu_fd field will be used. Other fields will be fetched
+ * from the kernel's internal records. It is recommended to use the argument
+ * that was passed in GXP_MAP_TPU_MBX_QUEUE to un-map the buffers.
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_MAILBOX_COMMAND \
- _IOWR(GXP_IOCTL_BASE, 23, struct gxp_mailbox_command_ioctl)
-
-struct gxp_register_mailbox_eventfd_ioctl {
- /*
- * This eventfd will be signaled whenever a mailbox response arrives
- * for the core specified by `virtual_core_id`.
- *
- * When registering, if an eventfd has already been registered for the
- * specified core, the old eventfd will be unregistered and replaced.
- *
- * Not used during the unregister call, which clears any existing
- * eventfd.
- */
- __u32 eventfd;
- /*
- * Reserved.
- * Pass 0 for backwards compatibility.
- */
- __u32 flags;
- /*
- * The virtual core to register or unregister an eventfd from.
- * While an eventfd is registered, it will be signaled exactly once
- * any time a command to this virtual core receives a response or times
- * out.
- */
- __u16 virtual_core_id;
-};
-
-#define GXP_REGISTER_MAILBOX_EVENTFD \
- _IOW(GXP_IOCTL_BASE, 22, struct gxp_register_mailbox_eventfd_ioctl)
-
-#define GXP_UNREGISTER_MAILBOX_EVENTFD \
- _IOW(GXP_IOCTL_BASE, 24, struct gxp_register_mailbox_eventfd_ioctl)
+#define GXP_UNMAP_TPU_MBX_QUEUE \
+ _IOW(GXP_IOCTL_BASE, 14, struct gxp_tpu_mbx_queue_ioctl)
#endif /* __GXP_H__ */