summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurora zuma automerger <aurora-zuma-automerger@google.com>2023-05-04 14:28:21 +0000
committerCopybara-Service <copybara-worker@google.com>2023-05-04 11:42:21 -0700
commit9e67071e743d6e2e48c6584a827d0d4cc42750b8 (patch)
tree1f45490026e3e7e376631e29132d187ff313ac69
parent7edc47df27e29a4b061fb4063f93ffb7ede85442 (diff)
downloadzuma-9e67071e743d6e2e48c6584a827d0d4cc42750b8.tar.gz
gxp: [Copybara Auto Merge] Merge branch 'zuma' into 'android14-gs-pixel-5.15'
gxp: fix UAF on awaiter releasing Bug: 280737201 gxp: cancel KCI workers before CSR reset We need to ensure reverse KCI handler stop before the mailbox CSR reset. Bug: 280548609 gxp: reset KCI mailbox after shutdown Bug: 279990832 gxp: ring doorbell when shutdown KCI fails Bug: 279990832 (repeat) gxp: Support debug dump for dsp core relocation Bug: 264016687 gxp: high clk mux when secure wakelock is held Bug: 279201155 gxp: switch clock on idle Bug: 279201155 (repeat) gcip: Move HAS_IOVAD_BEST_FIT_ALGO to header Bug: 278833993 GCIP_MAIN_REV_ID: d513c7d9d0cbd7d0f5c50c5585050c203515e874 gcip: Add HAS_IOVAD_BEST_FIT_ALGO Bug: 278833993 (repeat) GCIP_HEADERS_REV_ID: 078574603518db2d6f048ed7c4ce8cf83bf74aa3 gxp: handle MCU handshake failure Bug: 279449877 gxp: introduce wait_pg_state_locked Bug: 279449877 (repeat) gxp: review comments fixup gcip: remove useless written variable GCIP_MAIN_REV_ID: b8613d3c465486e5fc0266f9b2dbee4140379124 gcip: add missing component type Bug: 279866789 gcip: add missing thread IDs Bug: 279866789 (repeat) GCIP_HEADERS_REV_ID: 89285c7e9bd11aac15f4fc500b1c53187dbde2f3 gxp: cancel work before powering down Bug: 279231237 gxp: adopt gcip-usage-stats Bug: 271374535 gxp: update usage before blk_off Bug: 271374535 (repeat) gcip: fix false not found freq warning log gcip: decide subcomponent after update_usage_kci gcip: support unsorted default DVFS freqs GCIP_MAIN_REV_ID: 2f6129c57ab1da86d20ea62db296664ed0b9e820 gxp: bump version to 1.15 Bug: 277863755 gxp: change gem5 mailbox client as real chip Bug: 265101896 gcip: manage response status internally Bug: 278819094 gcip: add status field to struct gcip_mailbox_async_resp Bug: 278819094 (repeat) gcip: introduce struct gcip_mailbox_async_resp Bug: 278819094 (repeat) GCIP_MAIN_REV_ID: 2cf4400cd1db0eae255b120e1d822b70dee63205 gcip: remove GCIP_KCI_STATUS_* Bug: 278819094 (repeat) gcip: manage response status internally Bug: 278819094 (repeat) gcip: add status field to struct gcip_mailbox_async_resp Bug: 278819094 (repeat) gcip: introduce struct gcip_mailbox_async_resp Bug: 278819094 (repeat) GCIP_HEADERS_REV_ID: 4d5014126a0c46443e38905f1cb90449c0c8ff8a gxp: propagate an error code of UCI to the resp ioctl Bug: 278819335 gxp: release vmbox after invalidating vd Bug: 277863755 (repeat) GitOrigin-RevId: 1182ebcca8b41bc303592e5589abda9d67961c0a Change-Id: Ia66371862d1476c8286a5e4079e763abe096ff54
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-iommu.c8
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-kci.c16
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-mailbox.c65
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-usage-stats.c46
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-iommu.h10
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-kci.h22
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-mailbox.h23
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-usage-stats.h6
-rw-r--r--gxp-dci.c19
-rw-r--r--gxp-debug-dump.c2
-rw-r--r--gxp-firmware.c108
-rw-r--r--gxp-kci.c47
-rw-r--r--gxp-mailbox-manager.c6
-rw-r--r--gxp-mailbox.c12
-rw-r--r--gxp-mailbox.h2
-rw-r--r--gxp-mcu-firmware.c103
-rw-r--r--gxp-mcu-platform.c3
-rw-r--r--gxp-pm.c77
-rw-r--r--gxp-pm.h22
-rw-r--r--gxp-uci.c54
-rw-r--r--gxp-uci.h2
-rw-r--r--gxp-usage-stats.c151
-rw-r--r--gxp-usage-stats.h38
-rw-r--r--gxp-vd.c33
-rw-r--r--gxp-vd.h5
-rw-r--r--gxp.h15
26 files changed, 529 insertions, 366 deletions
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-iommu.c b/gcip-kernel-driver/drivers/gcip/gcip-iommu.c
index 8f6570f..75509cd 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-iommu.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-iommu.c
@@ -21,14 +21,6 @@
#include <gcip/gcip-iommu.h>
#include <gcip/gcip-mem-pool.h>
-/*
- * TODO(b/277649169) Best fit IOVA allocator was removed in 6.1 GKI
- * The API needs to either be upstreamed, integrated into this driver, or disabled for 6.1
- * compatibility. For now, disable best-fit on all non-Android kernels and any GKI > 5.15.
- */
-#define HAS_IOVAD_BEST_FIT_ALGO (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) && \
- (IS_ENABLED(CONFIG_GCIP_TEST) || IS_ENABLED(CONFIG_ANDROID)))
-
#if HAS_IOVAD_BEST_FIT_ALGO
#include <linux/dma-iommu.h>
#endif
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-kci.c b/gcip-kernel-driver/drivers/gcip/gcip-kci.c
index 33f8021..417b078 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-kci.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-kci.c
@@ -136,20 +136,6 @@ static void gcip_kci_set_resp_elem_seq(struct gcip_mailbox *mailbox, void *resp,
elem->seq = seq;
}
-static u16 gcip_kci_get_resp_elem_status(struct gcip_mailbox *mailbox, void *resp)
-{
- struct gcip_kci_response_element *elem = resp;
-
- return elem->status;
-}
-
-static void gcip_kci_set_resp_elem_status(struct gcip_mailbox *mailbox, void *resp, u16 status)
-{
- struct gcip_kci_response_element *elem = resp;
-
- elem->status = status;
-}
-
static void gcip_kci_acquire_wait_list_lock(struct gcip_mailbox *mailbox, bool irqsave,
unsigned long *flags)
{
@@ -273,8 +259,6 @@ static const struct gcip_mailbox_ops gcip_mailbox_ops = {
.release_resp_queue_lock = gcip_kci_release_resp_queue_lock,
.get_resp_elem_seq = gcip_kci_get_resp_elem_seq,
.set_resp_elem_seq = gcip_kci_set_resp_elem_seq,
- .get_resp_elem_status = gcip_kci_get_resp_elem_status,
- .set_resp_elem_status = gcip_kci_set_resp_elem_status,
.acquire_wait_list_lock = gcip_kci_acquire_wait_list_lock,
.release_wait_list_lock = gcip_kci_release_wait_list_lock,
.wait_for_cmd_queue_not_full = gcip_kci_wait_for_cmd_queue_not_full,
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
index c7aa921..4571aa9 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
@@ -42,8 +42,6 @@
#define GET_RESP_ELEM_SEQ(resp) mailbox->ops->get_resp_elem_seq(mailbox, resp)
#define SET_RESP_ELEM_SEQ(resp, seq) mailbox->ops->set_resp_elem_seq(mailbox, resp, seq)
-#define GET_RESP_ELEM_STATUS(resp) mailbox->ops->get_resp_elem_status(mailbox, resp)
-#define SET_RESP_ELEM_STATUS(resp, status) mailbox->ops->set_resp_elem_status(mailbox, resp, status)
#define ACQUIRE_WAIT_LIST_LOCK(irqsave, flags) \
mailbox->ops->acquire_wait_list_lock(mailbox, irqsave, flags)
@@ -52,7 +50,7 @@
struct gcip_mailbox_wait_list_elem {
struct list_head list;
- void *resp;
+ struct gcip_mailbox_async_resp *async_resp;
struct gcip_mailbox_resp_awaiter *awaiter;
};
@@ -74,16 +72,17 @@ static void gcip_mailbox_awaiter_dec_refs(struct gcip_mailbox_resp_awaiter *awai
*
* This is used when the kernel gives up waiting for the response.
*/
-static void gcip_mailbox_del_wait_resp(struct gcip_mailbox *mailbox, void *resp)
+static void gcip_mailbox_del_wait_resp(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_async_resp *async_resp)
{
struct gcip_mailbox_wait_list_elem *cur;
unsigned long flags;
- u64 cur_seq, seq = GET_RESP_ELEM_SEQ(resp);
+ u64 cur_seq, seq = GET_RESP_ELEM_SEQ(async_resp->resp);
ACQUIRE_WAIT_LIST_LOCK(true, &flags);
list_for_each_entry (cur, &mailbox->wait_list, list) {
- cur_seq = GET_RESP_ELEM_SEQ(cur->resp);
+ cur_seq = GET_RESP_ELEM_SEQ(cur->async_resp->resp);
if (cur_seq > seq)
break;
if (cur_seq == seq) {
@@ -108,7 +107,8 @@ static void gcip_mailbox_del_wait_resp(struct gcip_mailbox *mailbox, void *resp)
*
* Returns 0 on success, or -ENOMEM if failed on allocation.
*/
-static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
+static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_async_resp *async_resp,
struct gcip_mailbox_resp_awaiter *awaiter, bool atomic)
{
struct gcip_mailbox_wait_list_elem *entry;
@@ -120,7 +120,7 @@ static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
return -ENOMEM;
if (mailbox->ops->before_enqueue_wait_list) {
- ret = mailbox->ops->before_enqueue_wait_list(mailbox, resp, awaiter);
+ ret = mailbox->ops->before_enqueue_wait_list(mailbox, async_resp->resp, awaiter);
if (ret) {
kfree(entry);
return ret;
@@ -131,7 +131,7 @@ static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
if (awaiter)
refcount_inc(&awaiter->refs);
- entry->resp = resp;
+ entry->async_resp = async_resp;
entry->awaiter = awaiter;
ACQUIRE_WAIT_LIST_LOCK(true, &flags);
list_add_tail(&entry->list, &mailbox->wait_list);
@@ -146,7 +146,8 @@ static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
* synchronous, the @cmd will be put into the queue, but the caller may not wait the response and
* ignore it. If the request is async, @awaiter should be passed too.
*/
-static int gcip_mailbox_enqueue_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp,
+static int gcip_mailbox_enqueue_cmd(struct gcip_mailbox *mailbox, void *cmd,
+ struct gcip_mailbox_async_resp *async_resp,
struct gcip_mailbox_resp_awaiter *awaiter)
{
int ret = 0;
@@ -177,11 +178,11 @@ static int gcip_mailbox_enqueue_cmd(struct gcip_mailbox *mailbox, void *cmd, voi
goto out;
}
- if (resp) {
+ if (async_resp->resp) {
/* Adds @resp to the wait_list only if the cmd can be pushed successfully. */
- SET_RESP_ELEM_SEQ(resp, GET_CMD_ELEM_SEQ(cmd));
- SET_RESP_ELEM_STATUS(resp, GCIP_MAILBOX_STATUS_WAITING_RESPONSE);
- ret = gcip_mailbox_push_wait_resp(mailbox, resp, awaiter, atomic);
+ SET_RESP_ELEM_SEQ(async_resp->resp, GET_CMD_ELEM_SEQ(cmd));
+ async_resp->status = GCIP_MAILBOX_STATUS_WAITING_RESPONSE;
+ ret = gcip_mailbox_push_wait_resp(mailbox, async_resp, awaiter, atomic);
if (ret)
goto out;
}
@@ -245,11 +246,10 @@ static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *res
if (mailbox->ops->before_handle_resp && !mailbox->ops->before_handle_resp(mailbox, resp))
return;
- SET_RESP_ELEM_STATUS(resp, GCIP_MAILBOX_STATUS_OK);
ACQUIRE_WAIT_LIST_LOCK(true, &flags);
list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
- cur_seq = GET_RESP_ELEM_SEQ(cur->resp);
+ cur_seq = GET_RESP_ELEM_SEQ(cur->async_resp->resp);
if (cur_seq > seq) {
/*
* This response has already timed out and been removed
@@ -259,7 +259,8 @@ static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *res
break;
}
if (cur_seq == seq) {
- memcpy(cur->resp, resp, mailbox->resp_elem_size);
+ cur->async_resp->status = GCIP_MAILBOX_STATUS_OK;
+ memcpy(cur->async_resp->resp, resp, mailbox->resp_elem_size);
list_del(&cur->list);
if (cur->awaiter) {
awaiter = cur->awaiter;
@@ -293,7 +294,7 @@ static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *res
break;
}
if (!mailbox->ignore_seq_order && cur_seq < seq) {
- SET_RESP_ELEM_STATUS(cur->resp, GCIP_MAILBOX_STATUS_NO_RESPONSE);
+ cur->async_resp->status = GCIP_MAILBOX_STATUS_NO_RESPONSE;
list_del(&cur->list);
if (cur->awaiter) {
/* Remove the reference of the arrived handler. */
@@ -435,7 +436,7 @@ static void gcip_mailbox_async_cmd_timeout_work(struct work_struct *work)
* Once this function has the wait_list_lock, no future response
* processing will begin until this response has been removed.
*/
- gcip_mailbox_del_wait_resp(mailbox, awaiter->resp);
+ gcip_mailbox_del_wait_resp(mailbox, &awaiter->async_resp);
/*
* Handle timed out awaiter. If `handle_awaiter_timedout` is defined, @awaiter
@@ -521,8 +522,7 @@ static int gcip_mailbox_set_ops(struct gcip_mailbox *mailbox, const struct gcip_
if (!ops->get_resp_queue_size || !ops->get_resp_queue_head || !ops->get_resp_queue_tail ||
!ops->inc_resp_queue_head || !ops->acquire_resp_queue_lock ||
- !ops->release_resp_queue_lock || !ops->get_resp_elem_seq || !ops->set_resp_elem_seq ||
- !ops->get_resp_elem_status || !ops->set_resp_elem_status) {
+ !ops->release_resp_queue_lock || !ops->get_resp_elem_seq || !ops->set_resp_elem_seq) {
dev_err(mailbox->dev, "Incomplete mailbox RESP queue ops.\n");
return -EINVAL;
}
@@ -605,26 +605,33 @@ void gcip_mailbox_consume_responses_work(struct gcip_mailbox *mailbox)
int gcip_mailbox_send_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp)
{
+ struct gcip_mailbox_async_resp async_resp = {
+ .resp = resp,
+ };
int ret;
- ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, resp, NULL);
+ ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, &async_resp, NULL);
if (ret)
return ret;
+ /*
+ * If @resp is NULL, it will not enqueue the response into the waiting list. Therefore, it
+ * is fine to release @async_resp.
+ */
if (!resp)
return 0;
ret = wait_event_timeout(mailbox->wait_list_waitq,
- GET_RESP_ELEM_STATUS(resp) != GCIP_MAILBOX_STATUS_WAITING_RESPONSE,
+ async_resp.status != GCIP_MAILBOX_STATUS_WAITING_RESPONSE,
msecs_to_jiffies(mailbox->timeout));
if (!ret) {
dev_dbg(mailbox->dev, "event wait timeout");
- gcip_mailbox_del_wait_resp(mailbox, resp);
+ gcip_mailbox_del_wait_resp(mailbox, &async_resp);
return -ETIMEDOUT;
}
- if (GET_RESP_ELEM_STATUS(resp) != GCIP_MAILBOX_STATUS_OK) {
+ if (async_resp.status != GCIP_MAILBOX_STATUS_OK) {
dev_err(mailbox->dev, "Mailbox cmd %u response status %u", GET_CMD_ELEM_CODE(cmd),
- GET_RESP_ELEM_STATUS(resp));
+ async_resp.status);
return -ENOMSG;
}
@@ -641,7 +648,7 @@ struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mail
if (!awaiter)
return ERR_PTR(-ENOMEM);
- awaiter->resp = resp;
+ awaiter->async_resp.resp = resp;
awaiter->mailbox = mailbox;
awaiter->data = data;
awaiter->release_data = mailbox->ops->release_awaiter_data;
@@ -651,7 +658,7 @@ struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mail
INIT_DELAYED_WORK(&awaiter->timeout_work, gcip_mailbox_async_cmd_timeout_work);
schedule_delayed_work(&awaiter->timeout_work, msecs_to_jiffies(mailbox->timeout));
- ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, awaiter->resp, awaiter);
+ ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, &awaiter->async_resp, awaiter);
if (ret)
goto err_free_resp;
@@ -665,7 +672,7 @@ err_free_resp:
void gcip_mailbox_cancel_awaiter(struct gcip_mailbox_resp_awaiter *awaiter)
{
- gcip_mailbox_del_wait_resp(awaiter->mailbox, awaiter->resp);
+ gcip_mailbox_del_wait_resp(awaiter->mailbox, &awaiter->async_resp);
gcip_mailbox_cancel_awaiter_timeout(awaiter);
}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-usage-stats.c b/gcip-kernel-driver/drivers/gcip/gcip-usage-stats.c
index 876733e..5c50705 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-usage-stats.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-usage-stats.c
@@ -84,7 +84,8 @@ gcip_usage_stats_find_core_usage_entry_locked(int32_t uid, uint8_t core_id,
static unsigned int gcip_usage_stats_find_dvfs_freq_index(struct gcip_usage_stats *ustats,
uint32_t dvfs_freq)
{
- int i, nums, idx = 0;
+ int i, nums, closest_freq_idx, idx = 0;
+ uint32_t cur_freq, closest_freq = 0;
mutex_lock(&ustats->dvfs_freqs_lock);
@@ -94,8 +95,10 @@ static unsigned int gcip_usage_stats_find_dvfs_freq_index(struct gcip_usage_stat
*/
if (ustats->dvfs_freqs_num) {
for (i = ustats->dvfs_freqs_num - 1; i >= 0; i--) {
- if (dvfs_freq == ustats->dvfs_freqs[i])
+ if (dvfs_freq == ustats->dvfs_freqs[i]) {
idx = i;
+ break;
+ }
}
if (i < 0)
@@ -117,10 +120,20 @@ static unsigned int gcip_usage_stats_find_dvfs_freq_index(struct gcip_usage_stat
}
for (i = nums - 1; i >= 0; i--) {
- if (dvfs_freq >= ustats->ops->get_default_dvfs_freq(i, ustats->data))
+ cur_freq = ustats->ops->get_default_dvfs_freq(i, ustats->data);
+
+ if (dvfs_freq == cur_freq)
return i;
+
+ if (dvfs_freq > cur_freq && closest_freq < cur_freq) {
+ closest_freq = cur_freq;
+ closest_freq_idx = i;
+ }
}
+ if (closest_freq)
+ return closest_freq_idx;
+
dev_warn(ustats->dev,
"Failed to find the freq from the default ones of the kernel driver, freq=%u",
dvfs_freq);
@@ -328,7 +341,6 @@ static ssize_t gcip_usage_stats_component_utilization_show(struct device *dev,
container_of(dev_attr, struct gcip_usage_stats_attr, dev_attr);
struct gcip_usage_stats *ustats = attr->ustats;
int32_t val;
- ssize_t written;
ustats->ops->update_usage_kci(ustats->data);
@@ -339,11 +351,7 @@ static ssize_t gcip_usage_stats_component_utilization_show(struct device *dev,
mutex_unlock(&ustats->usage_stats_lock);
- written = scnprintf(buf, PAGE_SIZE, "%d\n", val);
- if (written < 0)
- return written;
-
- return written;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", val);
}
/* Following functions are related to `COUNTER` metrics. */
@@ -396,11 +404,17 @@ static ssize_t gcip_usage_stats_counter_show(struct device *dev, struct device_a
container_of(dev_attr, struct gcip_usage_stats_attr, dev_attr);
struct gcip_usage_stats *ustats = attr->ustats;
ssize_t written = 0;
- int subcomponent = ustats->version >= GCIP_USAGE_STATS_V2 ? attr->subcomponent : 0;
- int i;
+ int subcomponent, i;
ustats->ops->update_usage_kci(ustats->data);
+ /*
+ * We need to decide @subcomponent after calling `update_usage_kci` because IP kernel
+ * drivers may want to change the version of @ustats to lower one if the firmware doesn't
+ * support a higher version.
+ */
+ subcomponent = ustats->version >= GCIP_USAGE_STATS_V2 ? attr->subcomponent : 0;
+
mutex_lock(&ustats->usage_stats_lock);
if (subcomponent == GCIP_USAGE_STATS_ATTR_ALL_SUBCOMPONENTS) {
@@ -579,11 +593,17 @@ static ssize_t gcip_usage_stats_max_watermark_show(struct device *dev,
container_of(dev_attr, struct gcip_usage_stats_attr, dev_attr);
struct gcip_usage_stats *ustats = attr->ustats;
ssize_t written = 0;
- int subcomponent = ustats->version >= GCIP_USAGE_STATS_V2 ? attr->subcomponent : 0;
- int i;
+ int subcomponent, i;
ustats->ops->update_usage_kci(ustats->data);
+ /*
+ * We need to decide @subcomponent after calling `update_usage_kci` because IP kernel
+ * drivers may want to change the version of @ustats to lower one if the firmware doesn't
+ * support a higher version.
+ */
+ subcomponent = ustats->version >= GCIP_USAGE_STATS_V2 ? attr->subcomponent : 0;
+
mutex_lock(&ustats->usage_stats_lock);
if (subcomponent == GCIP_USAGE_STATS_ATTR_ALL_SUBCOMPONENTS) {
diff --git a/gcip-kernel-driver/include/gcip/gcip-iommu.h b/gcip-kernel-driver/include/gcip/gcip-iommu.h
index 4e04b7e..1797f94 100644
--- a/gcip-kernel-driver/include/gcip/gcip-iommu.h
+++ b/gcip-kernel-driver/include/gcip/gcip-iommu.h
@@ -23,11 +23,21 @@
#include <linux/iommu.h>
#include <linux/iova.h>
#include <linux/scatterlist.h>
+#include <linux/version.h>
#include <gcip/gcip-domain-pool.h>
#include <gcip/gcip-mem-pool.h>
/*
+ * TODO(b/277649169) Best fit IOVA allocator was removed in 6.1 GKI
+ * The API needs to either be upstreamed, integrated into this driver, or disabled for 6.1
+ * compatibility. For now, disable best-fit on all non-Android kernels and any GKI > 5.15.
+ */
+#define HAS_IOVAD_BEST_FIT_ALGO \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) && \
+ (IS_ENABLED(CONFIG_GCIP_TEST) || IS_ENABLED(CONFIG_ANDROID)))
+
+/*
* Helpers for manipulating @gcip_map_flags parameter of the `gcip_iommu_domain_{map,unmap}_sg`
* functions.
*/
diff --git a/gcip-kernel-driver/include/gcip/gcip-kci.h b/gcip-kernel-driver/include/gcip/gcip-kci.h
index 51dd803..1cfc82e 100644
--- a/gcip-kernel-driver/include/gcip/gcip-kci.h
+++ b/gcip-kernel-driver/include/gcip/gcip-kci.h
@@ -17,22 +17,6 @@
#include <gcip/gcip-mailbox.h>
/*
- * The status field in a firmware response is set to this by us when the response is fetched from
- * the queue.
- */
-#define GCIP_KCI_STATUS_OK GCIP_MAILBOX_STATUS_OK
-/*
- * gcip_kci#mailbox.wait_list uses this value to record the status of responses that haven't been
- * received yet.
- */
-#define GCIP_KCI_STATUS_WAITING_RESPONSE GCIP_MAILBOX_STATUS_WAITING_RESPONSE
-/*
- * Used when an expected response is not received, see the documentation of
- * gcip_mailbox_handle_response() for details.
- */
-#define GCIP_KCI_STATUS_NO_RESPONSE GCIP_MAILBOX_STATUS_NO_RESPONSE
-
-/*
* Command/response sequence numbers capped at half the range of the 64-bit value range. The second
* half is reserved for incoming requests from firmware.
* These are tagged with the MSB set.
@@ -62,9 +46,9 @@ struct gcip_kci_response_element {
u64 seq;
u16 code;
/*
- * Reserved for host use - firmware can't touch this.
- * If a value is written here it will be discarded and overwritten during response
- * processing. However, when repurposed as an RKCI command, the FW can set this field.
+ * Firmware can set some data according to the type of the response.
+ * TODO(b/279386960): as we don't manage the status of responses using this field anymore,
+ * rename this field to more reasonable name.
*/
u16 status;
/*
diff --git a/gcip-kernel-driver/include/gcip/gcip-mailbox.h b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
index 835503f..af48ba6 100644
--- a/gcip-kernel-driver/include/gcip/gcip-mailbox.h
+++ b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
@@ -88,10 +88,21 @@ static inline bool gcip_valid_circ_queue_size(u32 size, u32 wrap_bit)
struct gcip_mailbox;
+/*
+ * A struct wraps the IP-defined response to manage additional information such as status needed by
+ * the logic of GCIP.
+ */
+struct gcip_mailbox_async_resp {
+ /* Status code. Must be one of GCIP_MAILBOX_STATUS_*. */
+ uint16_t status;
+ /* IP-defined response. */
+ void *resp;
+};
+
/* Wrapper struct for responses consumed by a thread other than the one which sent the command. */
struct gcip_mailbox_resp_awaiter {
/* Response. */
- void *resp;
+ struct gcip_mailbox_async_resp async_resp;
/* The work which will be executed when the timeout occurs. */
struct delayed_work timeout_work;
/*
@@ -217,16 +228,6 @@ struct gcip_mailbox_ops {
* Context: normal and in_interrupt().
*/
void (*set_resp_elem_seq)(struct gcip_mailbox *mailbox, void *resp, u64 seq);
- /*
- * Gets the status of @resp queue element.
- * Context: normal and in_interrupt().
- */
- u16 (*get_resp_elem_status)(struct gcip_mailbox *mailbox, void *resp);
- /*
- * Sets the status of @resp queue element.
- * Context: normal and in_interrupt().
- */
- void (*set_resp_elem_status)(struct gcip_mailbox *mailbox, void *resp, u16 status);
/*
* Acquires the lock of wait_list. If @irqsave is true, "_irqsave" functions can be used to
diff --git a/gcip-kernel-driver/include/gcip/gcip-usage-stats.h b/gcip-kernel-driver/include/gcip/gcip-usage-stats.h
index a20fe33..a4e0cb4 100644
--- a/gcip-kernel-driver/include/gcip/gcip-usage-stats.h
+++ b/gcip-kernel-driver/include/gcip/gcip-usage-stats.h
@@ -152,6 +152,8 @@ enum gcip_usage_stats_component_utilization_type {
GCIP_USAGE_STATS_COMPONENT_UTILIZATION_IP,
/* A compute core. */
GCIP_USAGE_STATS_COMPONENT_UTILIZATION_CORES,
+ /* The DSP or TPU Control Core (R52). */
+ GCIP_USAGE_STATS_COMPONENT_UTILIZATION_CONTROL,
/* The number of total types. Must be located at the end of this enum. */
GCIP_USAGE_STATS_COMPONENT_UTILIZATION_NUM_TYPES,
@@ -302,6 +304,10 @@ enum gcip_usage_stats_thread_stats_thread_id {
* DSP cores.
*/
GCIP_USAGE_STATS_THREAD_DSP_CORE_MANAGER,
+ /* The driving thread for intercore message handling. */
+ GCIP_USAGE_STATS_THREAD_INTERCORE_SUBORDINATE,
+ /* The thread that executes callback when the timer expires. */
+ GCIP_USAGE_STATS_THREAD_TIMER_SERVICE,
/* The number of total threads. Must be located at the end of this enum. */
GCIP_USAGE_STATS_THREAD_NUM_TYPES,
diff --git a/gxp-dci.c b/gxp-dci.c
index 82bfc78..e39d987 100644
--- a/gxp-dci.c
+++ b/gxp-dci.c
@@ -244,22 +244,6 @@ static void gxp_dci_set_resp_elem_seq(struct gcip_mailbox *mailbox, void *resp,
elem->seq = seq;
}
-static u16 gxp_dci_get_resp_elem_status(struct gcip_mailbox *mailbox,
- void *resp)
-{
- struct gxp_dci_response *elem = resp;
-
- return elem->status;
-}
-
-static void gxp_dci_set_resp_elem_status(struct gcip_mailbox *mailbox,
- void *resp, u16 status)
-{
- struct gxp_dci_response *elem = resp;
-
- elem->status = status;
-}
-
static void
gxp_dci_handle_awaiter_arrived(struct gcip_mailbox *mailbox,
struct gcip_mailbox_resp_awaiter *awaiter)
@@ -273,6 +257,7 @@ gxp_dci_handle_awaiter_arrived(struct gcip_mailbox *mailbox,
spin_lock_irqsave(async_resp->dest_queue_lock, flags);
+ async_resp->resp.status = GXP_DCI_RESP_OK;
list_add_tail(&async_resp->list_entry, async_resp->dest_queue);
/*
* Marking the dest_queue as NULL indicates the
@@ -364,8 +349,6 @@ static const struct gcip_mailbox_ops gxp_dci_gcip_mbx_ops = {
.release_resp_queue_lock = gxp_mailbox_gcip_ops_release_resp_queue_lock,
.get_resp_elem_seq = gxp_dci_get_resp_elem_seq,
.set_resp_elem_seq = gxp_dci_set_resp_elem_seq,
- .get_resp_elem_status = gxp_dci_get_resp_elem_status,
- .set_resp_elem_status = gxp_dci_set_resp_elem_status,
.acquire_wait_list_lock = gxp_mailbox_gcip_ops_acquire_wait_list_lock,
.release_wait_list_lock = gxp_mailbox_gcip_ops_release_wait_list_lock,
.wait_for_cmd_queue_not_full =
diff --git a/gxp-debug-dump.c b/gxp-debug-dump.c
index a63a044..d099e7b 100644
--- a/gxp-debug-dump.c
+++ b/gxp-debug-dump.c
@@ -661,7 +661,7 @@ static int gxp_handle_debug_dump(struct gxp_dev *gxp,
goto out;
}
} else {
- virt_core = core_id;
+ virt_core = core_header->core_id;
}
/* fw ro section */
diff --git a/gxp-firmware.c b/gxp-firmware.c
index ecb1f80..5b1e5ab 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -577,7 +577,7 @@ static const struct attribute_group gxp_firmware_attr_group = {
static int debugfs_firmware_run_set(void *data, u64 val)
{
- struct gxp_dev *gxp = (struct gxp_dev *)data;
+ struct gxp_dev *gxp = data;
struct gxp_client *client;
int ret = 0;
uint core;
@@ -591,78 +591,74 @@ static int debugfs_firmware_run_set(void *data, u64 val)
mutex_lock(&gxp->debugfs_client_lock);
- if (val) {
- if (gxp->debugfs_client) {
- dev_err(gxp->dev, "Firmware is already running!\n");
+ if (!val) {
+ if (!gxp->debugfs_client) {
+ dev_err(gxp->dev, "Firmware is not running!\n");
ret = -EIO;
goto out;
}
/*
- * Since this debugfs node destroys, then creates new fw_data,
- * and runs firmware on every DSP core, it cannot be run if
- * any of the cores already has a VD running on it.
+ * Cleaning up the client will stop the VD it owns and release
+ * the BLOCK wakelock it is holding.
*/
- down_write(&gxp->vd_semaphore);
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core]) {
- dev_err(gxp->dev,
- "Unable to run firmware with debugfs while other clients are running\n");
- ret = -EBUSY;
- up_write(&gxp->vd_semaphore);
- goto out;
- }
- }
- up_write(&gxp->vd_semaphore);
+ goto out_destroy_client;
+ }
+ if (gxp->debugfs_client) {
+ dev_err(gxp->dev, "Firmware is already running!\n");
+ ret = -EIO;
+ goto out;
+ }
- client = gxp_client_create(gxp);
- if (IS_ERR(client)) {
- dev_err(gxp->dev, "Failed to create client\n");
+ /*
+ * Since this debugfs node destroys, then creates new fw_data, and runs firmware on every
+ * DSP core, it cannot be run if any of the cores already has a VD running on it.
+ */
+ down_write(&gxp->vd_semaphore);
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core]) {
+ dev_err(gxp->dev,
+ "Unable to run firmware with debugfs while other clients are running\n");
+ ret = -EBUSY;
+ up_write(&gxp->vd_semaphore);
goto out;
}
- gxp->debugfs_client = client;
-
- mutex_lock(&gxp->client_list_lock);
- list_add(&client->list_entry, &gxp->client_list);
- mutex_unlock(&gxp->client_list_lock);
+ }
+ up_write(&gxp->vd_semaphore);
- down_write(&client->semaphore);
+ client = gxp_client_create(gxp);
+ if (IS_ERR(client)) {
+ dev_err(gxp->dev, "Failed to create client\n");
+ goto out;
+ }
+ gxp->debugfs_client = client;
- ret = gxp_client_allocate_virtual_device(client, GXP_NUM_CORES,
- 0);
- if (ret) {
- dev_err(gxp->dev, "Failed to allocate VD\n");
- goto err_destroy_client;
- }
+ mutex_lock(&gxp->client_list_lock);
+ list_add(&client->list_entry, &gxp->client_list);
+ mutex_unlock(&gxp->client_list_lock);
- ret = gxp_client_acquire_block_wakelock(
- client, &acquired_block_wakelock);
- if (ret) {
- dev_err(gxp->dev, "Failed to acquire BLOCK wakelock\n");
- goto err_destroy_client;
- }
+ down_write(&client->semaphore);
- ret = gxp_client_acquire_vd_wakelock(client, uud_states);
- if (ret) {
- dev_err(gxp->dev, "Failed to acquire VD wakelock\n");
- goto err_release_block_wakelock;
- }
+ ret = gxp_client_allocate_virtual_device(client, GXP_NUM_CORES, 0);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to allocate VD\n");
+ goto err_destroy_client;
+ }
- up_write(&client->semaphore);
- } else {
- if (!gxp->debugfs_client) {
- dev_err(gxp->dev, "Firmware is not running!\n");
- ret = -EIO;
- goto out;
- }
+ ret = gxp_client_acquire_block_wakelock(client, &acquired_block_wakelock);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to acquire BLOCK wakelock\n");
+ goto err_destroy_client;
+ }
- /*
- * Cleaning up the client will stop the VD it owns and release
- * the BLOCK wakelock it is holding.
- */
- goto out_destroy_client;
+ ret = gxp_client_acquire_vd_wakelock(client, uud_states);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to acquire VD wakelock\n");
+ goto err_release_block_wakelock;
}
+ up_write(&client->semaphore);
+
out:
mutex_unlock(&gxp->debugfs_client_lock);
diff --git a/gxp-kci.c b/gxp-kci.c
index e916080..3caee9b 100644
--- a/gxp-kci.c
+++ b/gxp-kci.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <gcip/gcip-telemetry.h>
+#include <gcip/gcip-usage-stats.h>
#include "gxp-config.h"
#include "gxp-core-telemetry.h"
@@ -150,7 +151,7 @@ static void gxp_kci_handle_rkci(struct gxp_kci *gkci,
* of the logic against possible concurrent scenarios.
*/
gxp_kci_resp_rkci_ack(gkci, resp);
- gxp_vd_invalidate_with_client_id(gxp, client_id, core_list);
+ gxp_vd_invalidate_with_client_id(gxp, client_id, core_list, true);
break;
}
@@ -316,7 +317,13 @@ static struct gxp_mailbox_ops mbx_ops = {
static inline int gxp_kci_send_cmd(struct gxp_mailbox *mailbox,
struct gcip_kci_command_element *cmd)
{
- return gxp_mailbox_send_cmd(mailbox, cmd, NULL);
+ int ret;
+
+ gxp_pm_busy(mailbox->gxp);
+ ret = gxp_mailbox_send_cmd(mailbox, cmd, NULL);
+ gxp_pm_idle(mailbox->gxp);
+
+ return ret;
}
int gxp_kci_init(struct gxp_mcu *mcu)
@@ -342,13 +349,14 @@ int gxp_kci_init(struct gxp_mcu *mcu)
int gxp_kci_reinit(struct gxp_kci *gkci)
{
- dev_notice(gkci->gxp->dev, "%s not yet implemented\n", __func__);
+ gxp_mailbox_reset(gkci->mbx);
return 0;
}
void gxp_kci_cancel_work_queues(struct gxp_kci *gkci)
{
- gcip_kci_cancel_work_queues(gkci->mbx->mbx_impl.gcip_kci);
+ if (gkci->mbx)
+ gcip_kci_cancel_work_queues(gkci->mbx->mbx_impl.gcip_kci);
}
void gxp_kci_exit(struct gxp_kci *gkci)
@@ -466,10 +474,11 @@ int gxp_kci_update_usage_locked(struct gxp_kci *gkci)
{
struct gxp_dev *gxp = gkci->gxp;
struct gcip_kci_command_element cmd = {
- .code = GCIP_KCI_CODE_GET_USAGE,
+ .code = GCIP_KCI_CODE_GET_USAGE_V2,
.dma = {
.address = 0,
.size = 0,
+ .flags = GCIP_USAGE_STATS_V2,
},
};
struct gxp_mapped_resource buf;
@@ -478,26 +487,32 @@ int gxp_kci_update_usage_locked(struct gxp_kci *gkci)
if (!gkci || !gkci->mbx)
return -ENODEV;
- ret = gxp_mcu_mem_alloc_data(gkci->mcu, &buf,
- GXP_MCU_USAGE_BUFFER_SIZE);
+ ret = gxp_mcu_mem_alloc_data(gkci->mcu, &buf, GXP_MCU_USAGE_BUFFER_SIZE);
if (ret) {
- dev_warn_once(gxp->dev, "%s: failed to allocate usage buffer",
- __func__);
+ dev_warn_once(gxp->dev, "Failed to allocate usage buffer");
return -ENOMEM;
}
+retry_v1:
+ if (gxp->usage_stats && gxp->usage_stats->ustats.version == GCIP_USAGE_STATS_V1)
+ cmd.code = GCIP_KCI_CODE_GET_USAGE_V1;
+
cmd.dma.address = buf.daddr;
cmd.dma.size = GXP_MCU_USAGE_BUFFER_SIZE;
- memset(buf.vaddr, 0, sizeof(struct gxp_usage_header));
+ memset(buf.vaddr, 0, sizeof(struct gcip_usage_stats_header));
ret = gxp_kci_send_cmd(gkci->mbx, &cmd);
- if (ret == GCIP_KCI_ERROR_UNIMPLEMENTED ||
- ret == GCIP_KCI_ERROR_UNAVAILABLE)
- dev_dbg(gxp->dev, "firmware does not report usage\n");
- else if (ret == GCIP_KCI_ERROR_OK)
+ if (ret == GCIP_KCI_ERROR_UNIMPLEMENTED || ret == GCIP_KCI_ERROR_UNAVAILABLE) {
+ if (gxp->usage_stats && gxp->usage_stats->ustats.version != GCIP_USAGE_STATS_V1) {
+ gxp->usage_stats->ustats.version = GCIP_USAGE_STATS_V1;
+ goto retry_v1;
+ }
+ dev_dbg(gxp->dev, "Firmware does not report usage");
+ } else if (ret == GCIP_KCI_ERROR_OK) {
gxp_usage_stats_process_buffer(gxp, buf.vaddr);
- else if (ret != -ETIMEDOUT)
- dev_warn_once(gxp->dev, "%s: error %d", __func__, ret);
+ } else if (ret != -ETIMEDOUT) {
+ dev_warn_once(gxp->dev, "Failed to send GET_USAGE KCI, ret=%d", ret);
+ }
gxp_mcu_mem_free_data(gkci->mcu, &buf);
diff --git a/gxp-mailbox-manager.c b/gxp-mailbox-manager.c
index 5d5a276..d65cb4a 100644
--- a/gxp-mailbox-manager.c
+++ b/gxp-mailbox-manager.c
@@ -32,9 +32,7 @@ static int debugfs_mailbox_execute_cmd(void *data, u64 val)
mutex_lock(&gxp->debugfs_client_lock);
client = gxp->debugfs_client;
-#if GXP_HAS_MCU
if (gxp_is_direct_mode(gxp)) {
-#endif
core = val / 1000;
if (core >= GXP_NUM_CORES) {
dev_notice(gxp->dev,
@@ -58,8 +56,8 @@ static int debugfs_mailbox_execute_cmd(void *data, u64 val)
client = gxp_client_create(gxp);
mbx = gxp->mailbox_mgr->mailboxes[core];
cmd_code = GXP_MBOX_CODE_DISPATCH;
-#if GXP_HAS_MCU
} else {
+#if GXP_HAS_MCU
if (!client) {
dev_err(gxp->dev,
"You should load firmwares via gxp/firmware_run first\n");
@@ -84,8 +82,8 @@ static int debugfs_mailbox_execute_cmd(void *data, u64 val)
}
cmd_code = CORE_COMMAND;
+#endif /* GXP_HAS_MCU */
}
-#endif
retval = gxp->mailbox_mgr->execute_cmd(client, mbx, core, cmd_code, 0,
0, 0, 0, 1, power_states, NULL,
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index 758b707..8b1d843 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -328,10 +328,7 @@ static int enable_mailbox(struct gxp_mailbox *mailbox)
int ret;
gxp_mailbox_write_descriptor(mailbox, mailbox->descriptor_buf.dsp_addr);
- gxp_mailbox_write_cmd_queue_head(mailbox, 0);
- gxp_mailbox_write_cmd_queue_tail(mailbox, 0);
- gxp_mailbox_write_resp_queue_head(mailbox, 0);
- gxp_mailbox_write_resp_queue_tail(mailbox, 0);
+ gxp_mailbox_reset(mailbox);
ret = init_mailbox_impl(mailbox);
if (ret)
@@ -446,7 +443,12 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
void gxp_mailbox_reset(struct gxp_mailbox *mailbox)
{
- dev_notice(mailbox->gxp->dev, "%s not yet implemented\n", __func__);
+ gxp_mailbox_write_cmd_queue_head(mailbox, 0);
+ gxp_mailbox_write_cmd_queue_tail(mailbox, 0);
+ gxp_mailbox_write_resp_queue_head(mailbox, 0);
+ gxp_mailbox_write_resp_queue_tail(mailbox, 0);
+ mailbox->cmd_queue_tail = 0;
+ mailbox->resp_queue_head = 0;
}
int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
diff --git a/gxp-mailbox.h b/gxp-mailbox.h
index 887578e..6713cd7 100644
--- a/gxp-mailbox.h
+++ b/gxp-mailbox.h
@@ -27,7 +27,7 @@
* Offset from the host mailbox interface to the device interface that needs to
* be mapped.
*/
-#if defined(CONFIG_GXP_IP_ZEBU) || defined(CONFIG_GXP_GEM5)
+#if defined(CONFIG_GXP_IP_ZEBU)
#define MAILBOX_DEVICE_INTERFACE_OFFSET 0x180000
#else
#define MAILBOX_DEVICE_INTERFACE_OFFSET 0x10000
diff --git a/gxp-mcu-firmware.c b/gxp-mcu-firmware.c
index 7c97481..a4f1f40 100644
--- a/gxp-mcu-firmware.c
+++ b/gxp-mcu-firmware.c
@@ -68,6 +68,41 @@ static bool is_signed_firmware(const struct firmware *fw,
return true;
}
+/*
+ * Waits for the MCU LPM transition to the PG state. If it fails, it will reboot the whole block.
+ *
+ * Must be called with holding @mcu_fw->lock.
+ *
+ * @ring_doorbell: If the situation is that the MCU cannot execute the transition by itself such
+ * as HW watchdog timeout, it must be passed as true to trigger the doorbell and
+ * let the MCU do that forcefully.
+ */
+static int wait_for_pg_state_locked(struct gxp_dev *gxp, bool ring_doorbell)
+{
+ struct gxp_mcu *mcu = &to_mcu_dev(gxp)->mcu;
+ struct gxp_mcu_firmware *mcu_fw = gxp_mcu_firmware_of(gxp);
+ int ret = 0;
+
+ lockdep_assert_held(&mcu_fw->lock);
+
+ if (ring_doorbell) {
+ gxp_mailbox_set_control(mcu->kci.mbx, GXP_MBOX_CONTROL_MAGIC_POWER_DOWN);
+ gxp_doorbell_enable_for_core(gxp, CORE_WAKEUP_DOORBELL(GXP_MCU_CORE_ID),
+ GXP_MCU_CORE_ID);
+ gxp_doorbell_set(gxp, CORE_WAKEUP_DOORBELL(GXP_MCU_CORE_ID));
+ }
+
+ if (!gxp_lpm_wait_state_eq(gxp, CORE_TO_PSM(GXP_MCU_CORE_ID), LPM_PG_STATE)) {
+ dev_warn(
+ gxp->dev,
+ "MCU PSM transition to PS3 fails, current state: %u. Falling back to power cycle AUR block.\n",
+ gxp_lpm_get_state(gxp, CORE_TO_PSM(GXP_MCU_CORE_ID)));
+ ret = gxp_pm_blk_reboot(gxp, 5000);
+ }
+
+ return ret;
+}
+
int gxp_mcu_firmware_load(struct gxp_dev *gxp, char *fw_name,
const struct firmware **fw)
{
@@ -227,11 +262,21 @@ static void gxp_mcu_firmware_stop_locked(struct gxp_mcu_firmware *mcu_fw)
if (ret)
dev_warn(gxp->dev, "KCI shutdown failed: %d", ret);
- if (!gxp_lpm_wait_state_eq(gxp, CORE_TO_PSM(GXP_MCU_CORE_ID),
- LPM_PG_STATE))
- dev_warn(gxp->dev,
- "MCU PSM transition to PS3 fails, current state: %u\n",
- gxp_lpm_get_state(gxp, CORE_TO_PSM(GXP_MCU_CORE_ID)));
+ /*
+ * If shutdown KCI fails, we can suspect MCU has some issues. In that case, it would be
+ * good to ring the doorbell and make MCU transit to PG state by force.
+ */
+ ret = wait_for_pg_state_locked(gxp, ret);
+ if (ret)
+ dev_err(gxp->dev, "Failed to transit MCU to PG state after KCI shutdown: %d", ret);
+
+ gxp_kci_cancel_work_queues(&mcu->kci);
+ /*
+ * Clears up all remaining KCI commands. Otherwise, MCU may drain them improperly after it
+ * reboots.
+ */
+ gxp_kci_reinit(&mcu->kci);
+
if (mcu_fw->is_secure)
gsa_send_dsp_cmd(gxp->gsa_dev, GSA_DSP_SHUTDOWN);
}
@@ -241,6 +286,7 @@ static int gxp_mcu_firmware_power_up(struct gxp_mcu_firmware *mcu_fw)
struct gxp_dev *gxp = mcu_fw->gxp;
int ret;
int state;
+ bool pg_state = false;
gxp_bpm_configure(gxp, GXP_MCU_CORE_ID, INST_BPM_OFFSET,
BPM_EVENT_READ_XFER);
@@ -267,8 +313,20 @@ static int gxp_mcu_firmware_power_up(struct gxp_mcu_firmware *mcu_fw)
}
ret = gxp_mcu_firmware_handshake(mcu_fw);
- if (ret)
+ if (ret) {
+ /* MCU seems to have some problems. Wait for it being transit to PG state. */
+ if (wait_for_pg_state_locked(gxp, true))
+ dev_err(gxp->dev,
+ "Failed to transit MCU LPM to PG while handling handshake failure");
+ /*
+ * Set it as true even though it fails to wait for the PG state above just in case
+ * the BLK is not powered on properly. Because accessing LPM CSRs in `gxp_lpm_down`
+ * in that case might cause the kernel panic.
+ */
+ pg_state = true;
goto err_mcu_shutdown;
+ }
+
dev_info(gxp->dev, "MCU firmware run succeeded");
return ret;
@@ -277,7 +335,8 @@ err_mcu_shutdown:
if (mcu_fw->is_secure)
gsa_send_dsp_cmd(gxp->gsa_dev, GSA_DSP_SHUTDOWN);
err_lpm_down:
- gxp_lpm_down(gxp, GXP_MCU_CORE_ID);
+ if (!pg_state)
+ gxp_lpm_down(gxp, GXP_MCU_CORE_ID);
return ret;
}
@@ -574,35 +633,15 @@ void gxp_mcu_firmware_crash_handler(struct gxp_dev *gxp,
/* Turn off and on the MCU PSM and restart the MCU firmware. */
mutex_lock(&mcu_fw->lock);
- /*
- * In this case, the MCU can't trigger the PSM transition to PG state by itself and won't
- * fall into the WFI mode. We have to trigger the doorbell to let the MCU do that.
- */
- if (crash_type == GCIP_FW_CRASH_HW_WDG_TIMEOUT) {
- struct gxp_mcu *mcu = &to_mcu_dev(gxp)->mcu;
-
- gxp_mailbox_set_control(mcu->kci.mbx,
- GXP_MBOX_CONTROL_MAGIC_POWER_DOWN);
- gxp_doorbell_enable_for_core(
- gxp, CORE_WAKEUP_DOORBELL(GXP_MCU_CORE_ID),
- GXP_MCU_CORE_ID);
- gxp_doorbell_set(gxp, CORE_WAKEUP_DOORBELL(GXP_MCU_CORE_ID));
- }
-
- if (!gxp_lpm_wait_state_eq(gxp, CORE_TO_PSM(GXP_MCU_CORE_ID),
- LPM_PG_STATE)) {
- dev_warn(
- gxp->dev,
- "MCU PSM transition to PS3 fails, current state: %u. Falling back to power cycle AUR block.\n",
- gxp_lpm_get_state(gxp, CORE_TO_PSM(GXP_MCU_CORE_ID)));
- ret = gxp_pm_blk_reboot(gxp, 5000);
- if (ret)
- goto out;
+ ret = wait_for_pg_state_locked(gxp, crash_type == GCIP_FW_CRASH_HW_WDG_TIMEOUT);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to transit MCU LPM state to PG (ret=%d)", ret);
+ goto out;
}
ret = gxp_mcu_firmware_run_locked(mcu_fw);
if (ret)
- dev_err(gxp->dev, "Failed to run MCU firmware (ret=%d)\n", ret);
+ dev_err(gxp->dev, "Failed to run MCU firmware (ret=%d)", ret);
out:
mutex_unlock(&mcu_fw->lock);
diff --git a/gxp-mcu-platform.c b/gxp-mcu-platform.c
index 203f1d5..467525f 100644
--- a/gxp-mcu-platform.c
+++ b/gxp-mcu-platform.c
@@ -150,10 +150,13 @@ static int gxp_mcu_pm_after_blk_on(struct gxp_dev *gxp)
static void gxp_mcu_pm_before_blk_off(struct gxp_dev *gxp)
{
+ struct gxp_kci *kci = &(gxp_mcu_of(gxp)->kci);
struct gxp_mcu_firmware *mcu_fw = gxp_mcu_firmware_of(gxp);
if (gxp_is_direct_mode(gxp))
return;
+ if (mcu_fw->status == GCIP_FW_VALID)
+ gxp_kci_update_usage_locked(kci);
gxp_mcu_firmware_stop(mcu_fw);
}
diff --git a/gxp-pm.c b/gxp-pm.c
index 4bb8072..625d106 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -8,7 +8,10 @@
#include <linux/acpm_dvfs.h>
#include <linux/bits.h>
#include <linux/io.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <soc/google/exynos_pm_qos.h>
@@ -20,16 +23,24 @@
#include "gxp-config.h"
#include "gxp-dma.h"
#include "gxp-doorbell.h"
+#include "gxp-firmware.h"
#include "gxp-internal.h"
#include "gxp-lpm.h"
#include "gxp-pm.h"
+/* Don't attempt to touch the device when @busy_count equals this value. */
+#define BUSY_COUNT_OFF (~0ull)
+
#define DEBUGFS_BLK_POWERSTATE "blk_powerstate"
#define DEBUGFS_WAKELOCK "wakelock"
#define SHUTDOWN_DELAY_US_MIN 200
#define SHUTDOWN_DELAY_US_MAX 400
+/* TODO(b/279201155): set default to true once confirmed it works as expected */
+static bool gxp_slow_clk_on_idle;
+module_param_named(slow_clk, gxp_slow_clk_on_idle, bool, 0660);
+
/*
* The order of this array decides the voting priority, should be increasing in
* frequencies.
@@ -155,6 +166,24 @@ static void reset_cmu_mux_state(struct gxp_dev *gxp)
set_cmu_noc_user_mux_state(gxp, AUR_CMU_MUX_NORMAL);
}
+static void gxp_pm_can_busy(struct gxp_power_manager *mgr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mgr->busy_lock, flags);
+ mgr->busy_count = 0;
+ spin_unlock_irqrestore(&mgr->busy_lock, flags);
+}
+
+static void gxp_pm_no_busy(struct gxp_power_manager *mgr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mgr->busy_lock, flags);
+ mgr->busy_count = BUSY_COUNT_OFF;
+ spin_unlock_irqrestore(&mgr->busy_lock, flags);
+}
+
void gxp_pm_force_clkmux_normal(struct gxp_dev *gxp)
{
mutex_lock(&gxp->power_mgr->pm_lock);
@@ -241,6 +270,7 @@ int gxp_pm_blk_on(struct gxp_dev *gxp)
/* Startup TOP's PSM */
gxp_lpm_init(gxp);
gxp->power_mgr->blk_switch_count++;
+ gxp_pm_can_busy(gxp->power_mgr);
out:
mutex_unlock(&gxp->power_mgr->pm_lock);
@@ -261,6 +291,7 @@ int gxp_pm_blk_off(struct gxp_dev *gxp)
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
}
+ gxp_pm_no_busy(gxp->power_mgr);
/* Above has checked device is powered, it's safe to access the CMU regs. */
reset_cmu_mux_state(gxp);
@@ -849,6 +880,19 @@ DEFINE_DEBUGFS_ATTRIBUTE(debugfs_blk_powerstate_fops,
debugfs_blk_powerstate_get, debugfs_blk_powerstate_set,
"%llx\n");
+static void gxp_pm_on_busy(struct gxp_dev *gxp)
+{
+ set_cmu_pll_aur_mux_state(gxp, AUR_CMU_MUX_NORMAL);
+ /* TODO(b/279201155): set noc if the bug can be resolved */
+}
+
+static void gxp_pm_on_idle(struct gxp_dev *gxp)
+{
+ if (gxp_slow_clk_on_idle)
+ set_cmu_pll_aur_mux_state(gxp, AUR_CMU_MUX_LOW);
+ /* TODO(b/279201155): set noc if the bug can be resolved */
+}
+
int gxp_pm_init(struct gxp_dev *gxp)
{
struct gxp_power_manager *mgr;
@@ -897,6 +941,8 @@ int gxp_pm_init(struct gxp_dev *gxp)
create_singlethread_workqueue("gxp_power_work_queue");
gxp->power_mgr->force_mux_normal_count = 0;
gxp->power_mgr->blk_switch_count = 0l;
+ spin_lock_init(&gxp->power_mgr->busy_lock);
+ gxp->power_mgr->busy_count = BUSY_COUNT_OFF;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"pmu_aur_status");
@@ -976,3 +1022,34 @@ void gxp_pm_set_thermal_limit(struct gxp_dev *gxp, unsigned long thermal_limit)
mutex_unlock(&gxp->power_mgr->pm_lock);
}
+
+void gxp_pm_busy(struct gxp_dev *gxp)
+{
+ unsigned long flags;
+ struct gxp_power_manager *mgr = gxp->power_mgr;
+
+ spin_lock_irqsave(&mgr->busy_lock, flags);
+ /*
+ * We don't need to check BUSY_COUNT_OFF here, caller ensures the block is powered before
+ * calling this function.
+ */
+ ++mgr->busy_count;
+ if (mgr->busy_count == 1)
+ gxp_pm_on_busy(gxp);
+ spin_unlock_irqrestore(&mgr->busy_lock, flags);
+}
+
+void gxp_pm_idle(struct gxp_dev *gxp)
+{
+ unsigned long flags;
+ struct gxp_power_manager *mgr = gxp->power_mgr;
+
+ spin_lock_irqsave(&mgr->busy_lock, flags);
+ if (mgr->busy_count == BUSY_COUNT_OFF)
+ goto out;
+ --mgr->busy_count;
+ if (mgr->busy_count == 0)
+ gxp_pm_on_idle(gxp);
+out:
+ spin_unlock_irqrestore(&mgr->busy_lock, flags);
+}
diff --git a/gxp-pm.h b/gxp-pm.h
index 884f2b5..41ded8a 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -7,6 +7,10 @@
#ifndef __GXP_PM_H__
#define __GXP_PM_H__
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
#include <soc/google/bcl.h>
#include <soc/google/exynos_pm_qos.h>
@@ -155,6 +159,10 @@ struct gxp_power_manager {
u64 blk_switch_count;
/* PMU AUR_STATUS base address for block status, maybe NULL */
void __iomem *aur_status;
+ /* Protects @busy_count. */
+ spinlock_t busy_lock;
+ /* The number of ongoing requests to the firmware. */
+ u64 busy_count;
};
/**
@@ -341,6 +349,20 @@ void gxp_pm_resume_clkmux(struct gxp_dev *gxp);
void gxp_pm_set_thermal_limit(struct gxp_dev *gxp, unsigned long thermal_limit);
/**
+ * gxp_pm_busy() - Claim there is a request to the firmware.
+ * @gxp: The GXP device
+ *
+ * This function is used in pair with gxp_pm_idle().
+ * When there is no ongoing requests, we can put the device in a lower frequency to save power.
+ */
+void gxp_pm_busy(struct gxp_dev *gxp);
+/**
+ * gxp_pm_idle() - Reverts gxp_pm_busy().
+ * @gxp: The GXP device
+ */
+void gxp_pm_idle(struct gxp_dev *gxp);
+
+/**
* gxp_pm_chip_set_ops() - Set the operations to the power manager, i.e.
* @mgr->ops.
* @mgr: The power manager to be set operations to
diff --git a/gxp-uci.c b/gxp-uci.c
index cc1fd3b..ed2eee5 100644
--- a/gxp-uci.c
+++ b/gxp-uci.c
@@ -211,22 +211,6 @@ static void gxp_uci_set_resp_elem_seq(struct gcip_mailbox *mailbox, void *resp,
elem->seq = seq;
}
-static u16 gxp_uci_get_resp_elem_status(struct gcip_mailbox *mailbox,
- void *resp)
-{
- struct gxp_uci_response *elem = resp;
-
- return elem->code;
-}
-
-static void gxp_uci_set_resp_elem_status(struct gcip_mailbox *mailbox,
- void *resp, u16 status)
-{
- struct gxp_uci_response *elem = resp;
-
- elem->code = status;
-}
-
static int
gxp_uci_before_enqueue_wait_list(struct gcip_mailbox *mailbox, void *resp,
struct gcip_mailbox_resp_awaiter *awaiter)
@@ -267,6 +251,7 @@ gxp_uci_handle_awaiter_arrived(struct gcip_mailbox *mailbox,
if (!async_resp->wait_queue)
goto out;
+ async_resp->status = GXP_RESP_OK;
async_resp->wait_queue = NULL;
list_del(&async_resp->wait_list_entry);
@@ -308,11 +293,11 @@ gxp_uci_handle_awaiter_timedout(struct gcip_mailbox *mailbox,
return;
}
+ async_resp->status = GXP_RESP_CANCELLED;
async_resp->wait_queue = NULL;
list_del(&async_resp->wait_list_entry);
if (async_resp->dest_queue) {
- async_resp->resp.code = GXP_RESP_CANCELLED;
list_add_tail(&async_resp->dest_list_entry,
async_resp->dest_queue);
spin_unlock_irqrestore(async_resp->queue_lock, flags);
@@ -332,9 +317,15 @@ static void gxp_uci_release_awaiter_data(void *data)
{
struct gxp_uci_async_response *async_resp = data;
- gxp_vd_release_credit(async_resp->vd);
+ /*
+ * This function might be called when the VD is already released, don't do VD operations in
+ * this case.
+ */
+ if (async_resp->vd->state != GXP_VD_RELEASED)
+ gxp_vd_release_credit(async_resp->vd);
if (async_resp->eventfd)
gxp_eventfd_put(async_resp->eventfd);
+ gxp_vd_put(async_resp->vd);
kfree(async_resp);
}
@@ -355,8 +346,6 @@ static const struct gcip_mailbox_ops gxp_uci_gcip_mbx_ops = {
.release_resp_queue_lock = gxp_mailbox_gcip_ops_release_resp_queue_lock,
.get_resp_elem_seq = gxp_uci_get_resp_elem_seq,
.set_resp_elem_seq = gxp_uci_set_resp_elem_seq,
- .get_resp_elem_status = gxp_uci_get_resp_elem_status,
- .set_resp_elem_status = gxp_uci_set_resp_elem_status,
.acquire_wait_list_lock = gxp_mailbox_gcip_ops_acquire_wait_list_lock,
.release_wait_list_lock = gxp_mailbox_gcip_ops_release_wait_list_lock,
.wait_for_cmd_queue_not_full =
@@ -494,7 +483,7 @@ int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_virtual_device *vd,
}
async_resp->uci = uci;
- async_resp->vd = vd;
+ async_resp->vd = gxp_vd_get(vd);
async_resp->wait_queue = wait_queue;
/*
* If the command is a wakelock command, keep dest_queue as a null
@@ -537,6 +526,7 @@ int gxp_uci_wait_async_response(struct mailbox_resp_queue *uci_resp_queue,
{
long timeout;
struct gxp_uci_async_response *async_resp;
+ int ret = 0;
spin_lock_irq(&uci_resp_queue->lock);
@@ -564,22 +554,18 @@ int gxp_uci_wait_async_response(struct mailbox_resp_queue *uci_resp_queue,
spin_unlock_irq(&uci_resp_queue->lock);
*resp_seq = async_resp->resp.seq;
- switch (async_resp->resp.code) {
+ switch (async_resp->status) {
case GXP_RESP_OK:
- *error_code = GXP_RESPONSE_ERROR_NONE;
+ *error_code = async_resp->resp.code;
if (opaque)
- memcpy(opaque, async_resp->resp.opaque,
- sizeof(async_resp->resp.opaque));
- break;
- case GXP_RESP_CANCELLED:
- *error_code = GXP_RESPONSE_ERROR_TIMEOUT;
+ memcpy(opaque, async_resp->resp.opaque, sizeof(async_resp->resp.opaque));
+ if (*error_code)
+ dev_err(async_resp->uci->gxp->dev,
+ "Completed response with an error from the firmware side %hu\n",
+ *error_code);
break;
default:
- /* No other code values are valid at this point */
- dev_err(async_resp->uci->gxp->dev,
- "Completed response had invalid code %hu\n",
- async_resp->resp.code);
- *error_code = GXP_RESPONSE_ERROR_INTERNAL;
+ ret = -ETIMEDOUT;
break;
}
@@ -600,5 +586,5 @@ int gxp_uci_wait_async_response(struct mailbox_resp_queue *uci_resp_queue,
gcip_mailbox_cancel_awaiter_timeout(async_resp->awaiter);
gcip_mailbox_release_awaiter(async_resp->awaiter);
- return 0;
+ return ret;
}
diff --git a/gxp-uci.h b/gxp-uci.h
index 50d9ce0..66e49d3 100644
--- a/gxp-uci.h
+++ b/gxp-uci.h
@@ -119,6 +119,8 @@ struct gxp_uci_async_response {
struct gxp_virtual_device *vd;
/* Handles arrival, timeout of async response. */
struct gcip_mailbox_resp_awaiter *awaiter;
+ /* Status of the response. */
+ enum gxp_response_status status;
};
struct gxp_uci_wait_list {
diff --git a/gxp-usage-stats.c b/gxp-usage-stats.c
index f198f10..1e5d3f9 100644
--- a/gxp-usage-stats.c
+++ b/gxp-usage-stats.c
@@ -5,85 +5,130 @@
* Copyright (C) 2022 Google LLC
*/
+#include <linux/device.h>
+
+#include <gcip/gcip-usage-stats.h>
+
+#include "gxp-config.h"
+#include "gxp-mcu-platform.h"
+#include "gxp-mcu.h"
+#include "gxp-pm.h"
#include "gxp-usage-stats.h"
-void gxp_usage_stats_process_buffer(struct gxp_dev *gxp, void *buf)
+/* Core usage. */
+static GCIP_USAGE_STATS_ATTR_RW(GCIP_USAGE_STATS_METRIC_TYPE_CORE_USAGE, 0, 0, dsp_usage_0, NULL,
+ NULL);
+
+static GCIP_USAGE_STATS_ATTR_RW(GCIP_USAGE_STATS_METRIC_TYPE_CORE_USAGE, 0, 1, dsp_usage_1, NULL,
+ NULL);
+
+static GCIP_USAGE_STATS_ATTR_RW(GCIP_USAGE_STATS_METRIC_TYPE_CORE_USAGE, 0, 2, dsp_usage_2, NULL,
+ NULL);
+
+/* Counter. */
+static GCIP_USAGE_STATS_ATTR_RW(GCIP_USAGE_STATS_METRIC_TYPE_COUNTER,
+ GCIP_USAGE_STATS_COUNTER_WORKLOAD,
+ GCIP_USAGE_STATS_ATTR_ALL_SUBCOMPONENTS, dsp_workload_count, NULL,
+ NULL);
+
+static GCIP_USAGE_STATS_ATTR_RW(GCIP_USAGE_STATS_METRIC_TYPE_COUNTER,
+ GCIP_USAGE_STATS_COUNTER_CONTEXT_SWITCHES,
+ GCIP_USAGE_STATS_ATTR_ALL_SUBCOMPONENTS, context_switch_count, NULL,
+ NULL);
+
+static GCIP_USAGE_STATS_ATTR_RW(GCIP_USAGE_STATS_METRIC_TYPE_COUNTER,
+ GCIP_USAGE_STATS_COUNTER_CONTEXT_PREEMPTIONS,
+ GCIP_USAGE_STATS_ATTR_ALL_SUBCOMPONENTS, preempt_count, NULL, NULL);
+
+/* Thread statistics. */
+static GCIP_USAGE_STATS_ATTR_RW(GCIP_USAGE_STATS_METRIC_TYPE_THREAD_STATS, 0, 0, fw_thread_stats,
+ NULL, NULL);
+
+/* DVFS frequency info. */
+static GCIP_USAGE_STATS_ATTR_RO(GCIP_USAGE_STATS_METRIC_TYPE_DVFS_FREQUENCY_INFO, 0, 0,
+ scaling_available_frequencies, NULL);
+
+static struct gcip_usage_stats_attr *attrs[] = {
+ &gcip_usage_stats_attr_dsp_usage_0,
+ &gcip_usage_stats_attr_dsp_usage_1,
+ &gcip_usage_stats_attr_dsp_usage_2,
+ &gcip_usage_stats_attr_dsp_workload_count,
+ &gcip_usage_stats_attr_context_switch_count,
+ &gcip_usage_stats_attr_preempt_count,
+ &gcip_usage_stats_attr_fw_thread_stats,
+ &gcip_usage_stats_attr_scaling_available_frequencies,
+};
+
+static int update_usage_kci(void *data)
{
- struct gxp_usage_header *header = buf;
- struct gxp_usage_metric *metric =
- (struct gxp_usage_metric *)(header + 1);
- int i;
-
- dev_dbg(gxp->dev, "%s: n=%u sz=%u", __func__, header->num_metrics,
- header->metric_size);
- if (header->metric_size != sizeof(struct gxp_usage_metric)) {
- dev_dbg(gxp->dev, "%s: expected sz=%zu, discard", __func__,
- sizeof(struct gxp_usage_metric));
- return;
- }
+ struct gxp_usage_stats *ustats = data;
+ struct gxp_dev *gxp = ustats->gxp;
+ struct gxp_mcu *mcu = &to_mcu_dev(gxp)->mcu;
- for (i = 0; i < header->num_metrics; i++) {
- switch (metric->type) {
- /* TODO(b/237967242): Handle metrics according to their types. */
- default:
- dev_dbg(gxp->dev, "%s: %d: skip unknown type=%u",
- __func__, i, metric->type);
- break;
- }
- metric++;
- }
+ return gxp_kci_update_usage(&mcu->kci);
}
-/*
- * TODO(b/237967242): Implement device attributes and add them to the `usage_stats_dev_attrs`
- * below.
- */
+static int get_default_dvfs_freqs_num(void *data)
+{
+ return AUR_NUM_POWER_STATE;
+}
-static struct attribute *usage_stats_dev_attrs[] = {
- NULL,
-};
+static int get_default_dvfs_freq(int idx, void *data)
+{
+ if (idx >= AUR_NUM_POWER_STATE)
+ return 0;
+ return aur_power_state2rate[idx];
+}
-static const struct attribute_group usage_stats_attr_group = {
- .attrs = usage_stats_dev_attrs,
+static const struct gcip_usage_stats_ops stats_ops = {
+ .update_usage_kci = update_usage_kci,
+ .get_default_dvfs_freqs_num = get_default_dvfs_freqs_num,
+ .get_default_dvfs_freq = get_default_dvfs_freq,
};
+void gxp_usage_stats_process_buffer(struct gxp_dev *gxp, void *buf)
+{
+ if (!gxp->usage_stats)
+ return;
+ gcip_usage_stats_process_buffer(&gxp->usage_stats->ustats, buf);
+}
+
void gxp_usage_stats_init(struct gxp_dev *gxp)
{
struct gxp_usage_stats *ustats;
+ struct gcip_usage_stats_args args;
int ret;
ustats = devm_kzalloc(gxp->dev, sizeof(*gxp->usage_stats), GFP_KERNEL);
if (!ustats) {
- dev_warn(gxp->dev,
- "failed to allocate memory for usage stats\n");
+ dev_warn(gxp->dev, "failed to allocate memory for usage stats\n");
return;
}
- /*
- * TODO(b/237967242): Add initialization codes of member variables of `ustats` if needed
- * after the metrics are decided and implemented.
- */
- mutex_init(&ustats->usage_stats_lock);
- gxp->usage_stats = ustats;
+ args.version = GXP_USAGE_METRIC_VERSION;
+ args.dev = gxp->dev;
+ args.ops = &stats_ops;
+ args.attrs = attrs;
+ args.num_attrs = ARRAY_SIZE(attrs);
+ args.subcomponents = GXP_NUM_CORES;
+ args.data = ustats;
+ ustats->gxp = gxp;
- ret = device_add_group(gxp->dev, &usage_stats_attr_group);
- if (ret)
+ ret = gcip_usage_stats_init(&ustats->ustats, &args);
+ if (ret) {
dev_warn(gxp->dev, "failed to create the usage_stats attrs\n");
+ devm_kfree(gxp->dev, ustats);
+ return;
+ }
- dev_dbg(gxp->dev, "%s init\n", __func__);
+ gxp->usage_stats = ustats;
}
void gxp_usage_stats_exit(struct gxp_dev *gxp)
{
- struct gxp_usage_stats *ustats = gxp->usage_stats;
-
- if (ustats) {
- /*
- * TODO(b/237967242): Add releasing codes of member variables of `ustats` if needed
- * after the metrics are decided and implemented.
- */
- device_remove_group(gxp->dev, &usage_stats_attr_group);
+ if (gxp->usage_stats) {
+ gcip_usage_stats_exit(&gxp->usage_stats->ustats);
+ devm_kfree(gxp->dev, gxp->usage_stats);
}
-
- dev_dbg(gxp->dev, "%s exit\n", __func__);
+ gxp->usage_stats = NULL;
}
diff --git a/gxp-usage-stats.h b/gxp-usage-stats.h
index 8a5fdb3..2f63557 100644
--- a/gxp-usage-stats.h
+++ b/gxp-usage-stats.h
@@ -8,44 +8,16 @@
#ifndef __GXP_USAGE_STATS_H__
#define __GXP_USAGE_STATS_H__
-#include <linux/types.h>
+#include <gcip/gcip-usage-stats.h>
#include "gxp-internal.h"
-/* Header struct in the metric buffer. */
-/* Must be kept in sync with firmware struct UsageTrackerHeader */
-struct gxp_usage_header {
- uint32_t num_metrics; /* Number of metrics being reported */
- uint32_t metric_size; /* Size of each metric struct */
-};
-
-/* TODO(b/237967242): Add data structures after the interfaces of each metrics are decided. */
-
-/*
- * Must be kept in sync with firmware enum class UsageTrackerMetric::Type
- * TODO(b/237967242): Add metric types after they are decided.
- */
-enum gxp_usage_metric_type {
- GXP_METRIC_TYPE_RESERVED = 0,
-};
+#define GXP_USAGE_METRIC_VERSION GCIP_USAGE_STATS_V2
-/*
- * Encapsulates a single metric reported to the kernel.
- * Must be kept in sync with firmware struct UsageTrackerMetric.
- */
-struct gxp_usage_metric {
- uint32_t type;
- uint8_t reserved[4];
- union {
- };
-};
-
-/*
- * Stores the usage of DSP which is collected from the get_usage KCI metrics.
- * TODO(b/237967242): Add variables storing the usage if needed after the metrics are decided.
- */
+/* Stores the usage of DSP which is collected from the GET_USAGE KCI metrics. */
struct gxp_usage_stats {
- struct mutex usage_stats_lock;
+ struct gxp_dev *gxp;
+ struct gcip_usage_stats ustats;
};
/* Parses the buffer from the get_usage KCI and updates the usage_stats of @gxp. */
diff --git a/gxp-vd.c b/gxp-vd.c
index cb09846..d5c67a3 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -812,6 +812,12 @@ int gxp_vd_block_ready(struct gxp_virtual_device *vd)
}
}
+ /*
+ * We don't know when would the secure world issue requests. Using high frequency as long
+ * as a block wakelock is held by a secure VD.
+ */
+ if (vd->is_secure)
+ gxp_pm_busy(gxp);
trace_gxp_vd_block_ready_end(vd->vdid);
return 0;
@@ -831,6 +837,8 @@ void gxp_vd_block_unready(struct gxp_virtual_device *vd)
vd->state = GXP_VD_OFF;
gxp_dma_domain_detach_device(gxp, vd->domain);
+ if (vd->is_secure)
+ gxp_pm_idle(gxp);
trace_gxp_vd_block_unready_end(vd->vdid);
}
@@ -1407,10 +1415,12 @@ bool gxp_vd_has_and_use_credit(struct gxp_virtual_device *vd)
unsigned long flags;
spin_lock_irqsave(&vd->credit_lock, flags);
- if (vd->credit == 0)
+ if (vd->credit == 0) {
ret = false;
- else
+ } else {
vd->credit--;
+ gxp_pm_busy(vd->gxp);
+ }
spin_unlock_irqrestore(&vd->credit_lock, flags);
return ret;
@@ -1421,10 +1431,12 @@ void gxp_vd_release_credit(struct gxp_virtual_device *vd)
unsigned long flags;
spin_lock_irqsave(&vd->credit_lock, flags);
- if (unlikely(vd->credit >= GXP_COMMAND_CREDIT_PER_VD))
+ if (unlikely(vd->credit >= GXP_COMMAND_CREDIT_PER_VD)) {
dev_err(vd->gxp->dev, "unbalanced VD credit");
- else
+ } else {
+ gxp_pm_idle(vd->gxp);
vd->credit++;
+ }
spin_unlock_irqrestore(&vd->credit_lock, flags);
}
@@ -1436,8 +1448,8 @@ void gxp_vd_put(struct gxp_virtual_device *vd)
kfree(vd);
}
-void gxp_vd_invalidate_with_client_id(struct gxp_dev *gxp, int client_id,
- uint core_list)
+void gxp_vd_invalidate_with_client_id(struct gxp_dev *gxp, int client_id, uint core_list,
+ bool release_vmbox)
{
struct gxp_client *client = NULL, *c;
uint core;
@@ -1486,14 +1498,17 @@ void gxp_vd_invalidate_with_client_id(struct gxp_dev *gxp, int client_id,
}
gxp_vd_invalidate(gxp, client->vd);
+
/*
- * Release @client->semaphore first because the `gxp_vd_generate_debug_dump` function only
- * requires holding @gxp->vd_semaphore and holding @client->semaphore will block the client
- * calling ioctls for a while as generating debug dump taking long time.
+ * Release @client->semaphore first because we need this lock to block ioctls while
+ * changing the state of @client->vd to UNAVAILABLE which is already done above.
*/
up_write(&client->semaphore);
+ if (release_vmbox)
+ gxp_vd_release_vmbox(gxp, client->vd);
gxp_vd_generate_debug_dump(gxp, client->vd, core_list);
+
up_write(&gxp->vd_semaphore);
}
diff --git a/gxp-vd.h b/gxp-vd.h
index 4eb7ee8..7e894e7 100644
--- a/gxp-vd.h
+++ b/gxp-vd.h
@@ -413,9 +413,10 @@ void gxp_vd_put(struct gxp_virtual_device *vd);
* @gxp: The GXP device to obtain the handler for
* @client_id: client_id of the crashed vd.
* @core_list: A bitfield enumerating the physical cores on which crash is reported from firmware.
+ * @release_vmbox: Releases the vmbox of the vd after invalidating it.
*/
-void gxp_vd_invalidate_with_client_id(struct gxp_dev *gxp, int client_id,
- uint core_list);
+void gxp_vd_invalidate_with_client_id(struct gxp_dev *gxp, int client_id, uint core_list,
+ bool release_vmbox);
/*
* Changes the status of the @vd to GXP_VD_UNAVAILABLE.
diff --git a/gxp.h b/gxp.h
index e4db89e..a8e71b0 100644
--- a/gxp.h
+++ b/gxp.h
@@ -13,7 +13,7 @@
/* Interface Version */
#define GXP_INTERFACE_VERSION_MAJOR 1
-#define GXP_INTERFACE_VERSION_MINOR 14
+#define GXP_INTERFACE_VERSION_MINOR 15
#define GXP_INTERFACE_VERSION_BUILD 0
/*
@@ -918,10 +918,7 @@ struct gxp_mailbox_uci_response_ioctl {
__u64 sequence_number;
/*
* Output:
- * Driver error code.
- * Indicates if the response was obtained successfully,
- * `GXP_RESPONSE_ERROR_NONE`, or what error prevented the command
- * from completing successfully.
+ * Error code propagated from the MCU firmware side.
*/
__u16 error_code;
/* reserved fields */
@@ -929,7 +926,7 @@ struct gxp_mailbox_uci_response_ioctl {
/*
* Output:
* Is copied from the UCI response without modification.
- * Only valid if `error_code` == GXP_RESPONSE_ERROR_NONE
+ * Only valid if this IOCTL returns 0.
*/
__u8 opaque[16];
};
@@ -939,6 +936,12 @@ struct gxp_mailbox_uci_response_ioctl {
* is available.
*
* The client must hold a BLOCK wakelock.
+ *
+ * Returns:
+ * 0 - A response arrived from the MCU firmware. Note that this doesn't guarantee the
+ * success of the UCI command. The runtime must refer to @error_code field to check
+ * whether there was an error from the MCU side while processing the request.
+ * -ETIMEDOUT - MCU firmware is not responding.
*/
#define GXP_MAILBOX_UCI_RESPONSE \
_IOR(GXP_IOCTL_BASE, 31, struct gxp_mailbox_uci_response_ioctl)