summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurora pro automerger <aurora-pro-automerger@google.com>2022-08-09 03:58:01 +0000
committerJohn Scheible <johnscheible@google.com>2022-08-10 17:39:37 +0000
commit261baf5aa92fe1fb8a0a30d7e971117ed5b36de9 (patch)
treebee6c35ce8f9a700109886588d00a8519d894903
parent51345064a78c4fbee30e5c331a41c0dfab852fa0 (diff)
downloadgs201-261baf5aa92fe1fb8a0a30d7e971117ed5b36de9.tar.gz
[Copybara Auto Merge]
gxp: release TPU file after VD stop Bug: 241085004 gxp: increase the ref to TPU FD on TPU buffer map Bug: 241085004 (repeat) gxp: Fail to start a VD if initial mapping fails Bug: 241090227 gxp: Only (un)map telem if enabled on vd start/stop Bug: 241090227 (repeat) gxp: Cancel last worker when power req queue is full Bug: 240533763 gxp: Disallow /d/gxp/firmware_run if VDs are running Bug: 240764261 gxp: Cleanup FW data on VD creation failure Bug: 240999290 gxp: Review feedback from 7/21 release Bug: 240315433 gxp: Map telemetry buffs before writing descriptor Bug: 239640408 gxp: Re-enable telemetry mapping changes GitOrigin-RevId: c8adc76aa961c881ae2c41d71ec045308939e233 Change-Id: I17eb0436551f00fcf8ba41b24713002b947ab08f
-rw-r--r--gxp-client.c17
-rw-r--r--gxp-client.h10
-rw-r--r--gxp-config.h2
-rw-r--r--gxp-debugfs.c18
-rw-r--r--gxp-firmware.c18
-rw-r--r--gxp-platform.c85
-rw-r--r--gxp-pm.c90
-rw-r--r--gxp-pm.h4
-rw-r--r--gxp-telemetry.c259
-rw-r--r--gxp-telemetry.h1
-rw-r--r--gxp-vd.c122
-rw-r--r--gxp.h2
12 files changed, 396 insertions, 232 deletions
diff --git a/gxp-client.c b/gxp-client.c
index 6bdebfc..c9184d7 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -31,7 +31,6 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
client->requested_power_state = AUR_OFF;
client->requested_memory_power_state = 0;
client->vd = NULL;
- client->tpu_mbx_allocated = false;
client->requested_low_clkmux = false;
return client;
}
@@ -43,14 +42,6 @@ void gxp_client_destroy(struct gxp_client *client)
down_write(&gxp->vd_semaphore);
-#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
- /*
- * Unmap TPU buffers, if the mapping is already removed, this
- * is a no-op.
- */
- gxp_dma_unmap_tpu_buffer(gxp, client->vd, client->mbx_desc);
-#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
-
if (client->vd && client->vd->state != GXP_VD_OFF)
gxp_vd_stop(client->vd);
@@ -61,6 +52,14 @@ void gxp_client_destroy(struct gxp_client *client)
up_write(&gxp->vd_semaphore);
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
+ if (client->tpu_file) {
+ fput(client->tpu_file);
+ client->tpu_file = NULL;
+ gxp_dma_unmap_tpu_buffer(gxp, client->vd, client->mbx_desc);
+ }
+#endif
+
if (client->has_block_wakelock) {
gxp_wakelock_release(client->gxp);
gxp_pm_update_requested_power_states(
diff --git a/gxp-client.h b/gxp-client.h
index c0730e9..0d1f860 100644
--- a/gxp-client.h
+++ b/gxp-client.h
@@ -7,6 +7,7 @@
#ifndef __GXP_CLIENT_H__
#define __GXP_CLIENT_H__
+#include <linux/file.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/types.h>
@@ -36,7 +37,7 @@ struct gxp_client {
bool requested_low_clkmux;
struct gxp_virtual_device *vd;
- bool tpu_mbx_allocated;
+ struct file *tpu_file;
struct gxp_tpu_mbx_desc mbx_desc;
struct gxp_eventfd *mb_eventfds[GXP_NUM_CORES];
@@ -45,6 +46,13 @@ struct gxp_client {
pid_t tgid;
/* client process ID is really the thread ID, may be transient. */
pid_t pid;
+
+ /*
+ * Indicates whether the driver needs to disable telemetry when this
+ * client closes. For when the client fails to disable telemetry itself.
+ */
+ bool enabled_telemetry_logging;
+ bool enabled_telemetry_tracing;
};
/*
diff --git a/gxp-config.h b/gxp-config.h
index 813cad7..154e767 100644
--- a/gxp-config.h
+++ b/gxp-config.h
@@ -18,7 +18,7 @@
#endif /* unknown */
-#ifdef CONFIG_GXP_GEM5
+#if IS_ENABLED(CONFIG_GXP_GEM5)
#undef GXP_NUM_CORES
#define GXP_NUM_CORES 1
#endif
diff --git a/gxp-debugfs.c b/gxp-debugfs.c
index a04a3f2..e1b199b 100644
--- a/gxp-debugfs.c
+++ b/gxp-debugfs.c
@@ -77,6 +77,7 @@ static int gxp_firmware_run_set(void *data, u64 val)
struct gxp_dev *gxp = (struct gxp_dev *) data;
struct gxp_client *client;
int ret = 0;
+ uint core;
ret = gxp_firmware_request_if_needed(gxp);
if (ret) {
@@ -94,6 +95,22 @@ static int gxp_firmware_run_set(void *data, u64 val)
}
/*
+ * Since this debugfs node destroys, then creates new fw_data,
+ * and runs firmware on every DSP core, it cannot be run if
+ * any of the cores already has a VD running on it.
+ */
+ down_write(&gxp->vd_semaphore);
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core]) {
+ dev_err(gxp->dev,
+ "Unable to run firmware with debugfs while other clients are running\n");
+ ret = -EBUSY;
+ up_write(&gxp->vd_semaphore);
+ goto out;
+ }
+ }
+
+ /*
* Cleanup any bad state or corruption the device might've
* caused
*/
@@ -125,7 +142,6 @@ static int gxp_firmware_run_set(void *data, u64 val)
AUR_MEM_UNDEFINED,
AUR_MEM_UNDEFINED);
- down_write(&gxp->vd_semaphore);
ret = gxp_vd_start(gxp->debugfs_client->vd);
up_write(&gxp->vd_semaphore);
if (ret) {
diff --git a/gxp-firmware.c b/gxp-firmware.c
index d9e6cc6..eb31f23 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -372,7 +372,7 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
}
dev_notice(gxp->dev, "Core %u is alive!\n", core);
-#ifndef CONFIG_GXP_GEM5
+#if !IS_ENABLED(CONFIG_GXP_GEM5)
/*
* FW reads the INT_MASK0 register (written by the driver) to
* validate TOP access. The value read is echoed back by the FW to
@@ -396,7 +396,7 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
return -EIO;
}
dev_notice(gxp->dev, "TOP access from core %u successful!\n", core);
-#endif // !CONFIG_GXP_GEM5
+#endif
/* Stop bus performance monitors */
gxp_bpm_stop(gxp, core);
@@ -709,7 +709,7 @@ static void gxp_firmware_wakeup_cores(struct gxp_dev *gxp, uint core_list)
for (core = 0; core < GXP_NUM_CORES; core++) {
if (!(core_list & BIT(core)))
continue;
-#ifndef CONFIG_GXP_GEM5
+#if !IS_ENABLED(CONFIG_GXP_GEM5)
gxp_doorbell_enable_for_core(gxp, CORE_WAKEUP_DOORBELL(core),
core);
#endif
@@ -721,7 +721,7 @@ static int gxp_firmware_finish_startup(struct gxp_dev *gxp,
struct gxp_virtual_device *vd,
uint virt_core, uint core)
{
- int ret = 0;
+ int ret;
struct work_struct *work;
ret = gxp_firmware_handshake(gxp, core);
@@ -792,12 +792,14 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
int ret;
uint core, virt_core;
uint failed_cores = 0;
+ int failed_ret;
for (core = 0; core < GXP_NUM_CORES; core++) {
if (core_list & BIT(core)) {
ret = gxp_firmware_setup(gxp, core);
if (ret) {
failed_cores |= BIT(core);
+ failed_ret = ret;
dev_err(gxp->dev, "Failed to run firmware on core %u\n",
core);
}
@@ -816,9 +818,9 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
}
}
}
- goto out;
+ return failed_ret;
}
-#ifdef CONFIG_GXP_GEM5
+#if IS_ENABLED(CONFIG_GXP_GEM5)
/*
* GEM5 starts firmware after LPM is programmed, so we need to call
* gxp_doorbell_enable_for_core here to set GXP_REG_COMMON_INT_MASK_0
@@ -850,18 +852,20 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
}
if (failed_cores != 0) {
+ virt_core = 0;
for (core = 0; core < GXP_NUM_CORES; core++) {
if (core_list & BIT(core)) {
if (!(failed_cores & BIT(core))) {
gxp_firmware_stop_core(gxp, vd,
virt_core, core);
}
+ virt_core++;
}
}
}
/* Check if we need to set clock mux to low state as requested */
gxp_pm_resume_clkmux(gxp);
-out:
+
return ret;
}
diff --git a/gxp-platform.c b/gxp-platform.c
index 4988768..642d829 100644
--- a/gxp-platform.c
+++ b/gxp-platform.c
@@ -13,6 +13,7 @@
#include <linux/cred.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/file.h>
#include <linux/fs.h>
#include <linux/genalloc.h>
#include <linux/kthread.h>
@@ -183,6 +184,11 @@ static int gxp_release(struct inode *inode, struct file *file)
if (!client)
return 0;
+ if (client->enabled_telemetry_logging)
+ gxp_telemetry_disable(client->gxp, GXP_TELEMETRY_TYPE_LOGGING);
+ if (client->enabled_telemetry_tracing)
+ gxp_telemetry_disable(client->gxp, GXP_TELEMETRY_TYPE_TRACING);
+
mutex_lock(&client->gxp->client_list_lock);
list_del(&client->list_entry);
mutex_unlock(&client->gxp->client_list_lock);
@@ -986,6 +992,7 @@ static int gxp_enable_telemetry(struct gxp_client *client,
{
struct gxp_dev *gxp = client->gxp;
__u8 type;
+ int ret;
if (copy_from_user(&type, argp, sizeof(type)))
return -EFAULT;
@@ -994,13 +1001,25 @@ static int gxp_enable_telemetry(struct gxp_client *client,
type != GXP_TELEMETRY_TYPE_TRACING)
return -EINVAL;
- return gxp_telemetry_enable(gxp, type);
+ ret = gxp_telemetry_enable(gxp, type);
+
+ /*
+ * Record what telemetry types this client enabled so they can be
+ * cleaned-up if the client closes without disabling them.
+ */
+ if (!ret && type == GXP_TELEMETRY_TYPE_LOGGING)
+ client->enabled_telemetry_logging = true;
+ if (!ret && type == GXP_TELEMETRY_TYPE_TRACING)
+ client->enabled_telemetry_tracing = true;
+
+ return ret;
}
static int gxp_disable_telemetry(struct gxp_client *client, __u8 __user *argp)
{
struct gxp_dev *gxp = client->gxp;
__u8 type;
+ int ret;
if (copy_from_user(&type, argp, sizeof(type)))
return -EFAULT;
@@ -1009,7 +1028,14 @@ static int gxp_disable_telemetry(struct gxp_client *client, __u8 __user *argp)
type != GXP_TELEMETRY_TYPE_TRACING)
return -EINVAL;
- return gxp_telemetry_disable(gxp, type);
+ ret = gxp_telemetry_disable(gxp, type);
+
+ if (!ret && type == GXP_TELEMETRY_TYPE_LOGGING)
+ client->enabled_telemetry_logging = false;
+ if (!ret && type == GXP_TELEMETRY_TYPE_TRACING)
+ client->enabled_telemetry_tracing = false;
+
+ return ret;
}
static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
@@ -1063,9 +1089,8 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
goto out;
}
- if (client->tpu_mbx_allocated) {
- dev_err(gxp->dev, "%s: Mappings already exist for TPU mailboxes\n",
- __func__);
+ if (client->tpu_file) {
+ dev_err(gxp->dev, "Mappings already exist for TPU mailboxes");
ret = -EBUSY;
goto out_free;
}
@@ -1078,8 +1103,28 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
ALLOCATE_EXTERNAL_MAILBOX, &gxp_tpu_info,
mbx_info);
if (ret) {
- dev_err(gxp->dev, "%s: Failed to allocate ext tpu mailboxes %d\n",
- __func__, ret);
+ dev_err(gxp->dev, "Failed to allocate ext TPU mailboxes %d",
+ ret);
+ goto out_free;
+ }
+ /*
+ * If someone is attacking us through this interface -
+ * it's possible that ibuf.tpu_fd here is already a different file from
+ * the one passed to edgetpu_ext_driver_cmd() (if the runtime closes the
+ * FD and opens another file exactly between the TPU driver call above
+ * and the fget below).
+ * But the worst consequence of this attack is we fget() ourselves (GXP
+ * FD), which only leads to memory leak (because the file object has a
+ * reference to itself). The race is also hard to hit so we don't insist
+ * on preventing it.
+ */
+ client->tpu_file = fget(ibuf.tpu_fd);
+ if (!client->tpu_file) {
+ edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ FREE_EXTERNAL_MAILBOX, &gxp_tpu_info,
+ NULL);
+ ret = -EINVAL;
goto out_free;
}
/* Align queue size to page size for iommu map. */
@@ -1089,8 +1134,9 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
ret = gxp_dma_map_tpu_buffer(gxp, client->vd, virtual_core_list,
phys_core_list, mbx_info);
if (ret) {
- dev_err(gxp->dev, "%s: failed to map TPU mailbox buffer %d\n",
- __func__, ret);
+ dev_err(gxp->dev, "Failed to map TPU mailbox buffer %d", ret);
+ fput(client->tpu_file);
+ client->tpu_file = NULL;
edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
FREE_EXTERNAL_MAILBOX, &gxp_tpu_info,
@@ -1101,7 +1147,6 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
client->mbx_desc.virt_core_list = virtual_core_list;
client->mbx_desc.cmdq_size = mbx_info->cmdq_size;
client->mbx_desc.respq_size = mbx_info->respq_size;
- client->tpu_mbx_allocated = true;
out_free:
kfree(mbx_info);
@@ -1138,9 +1183,8 @@ static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
goto out;
}
- if (!client->tpu_mbx_allocated) {
- dev_err(gxp->dev, "%s: No mappings exist for TPU mailboxes\n",
- __func__);
+ if (!client->tpu_file) {
+ dev_err(gxp->dev, "No mappings exist for TPU mailboxes");
ret = -EINVAL;
goto out;
}
@@ -1148,16 +1192,11 @@ static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
gxp_dma_unmap_tpu_buffer(gxp, client->vd, client->mbx_desc);
gxp_tpu_info.tpu_fd = ibuf.tpu_fd;
- ret = edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
- EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
- FREE_EXTERNAL_MAILBOX, &gxp_tpu_info,
- NULL);
- if (ret) {
- dev_err(gxp->dev, "%s: Failed to free ext tpu mailboxes %d\n",
- __func__, ret);
- goto out;
- }
- client->tpu_mbx_allocated = false;
+ edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ FREE_EXTERNAL_MAILBOX, &gxp_tpu_info, NULL);
+ fput(client->tpu_file);
+ client->tpu_file = NULL;
out:
up_write(&client->semaphore);
diff --git a/gxp-pm.c b/gxp-pm.c
index 8a501b7..ca8c795 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -343,39 +343,56 @@ static int gxp_pm_req_state_locked(struct gxp_dev *gxp,
}
if (state == AUR_OFF)
return 0;
-retry:
+
if (state != gxp->power_mgr->curr_state ||
low_clkmux_vote != gxp->power_mgr->last_scheduled_low_clkmux) {
mutex_lock(&gxp->power_mgr->set_acpm_state_work_lock);
+ /* Look for an available worker */
for (i = 0; i < AUR_NUM_POWER_STATE_WORKER; i++) {
if (!gxp->power_mgr->set_acpm_state_work[i].using)
break;
}
- /* The workqueue is full, wait for it */
+
+ /*
+ * If the workqueue is full, cancel the last scheduled worker
+ * and use it for this request instead.
+ */
if (i == AUR_NUM_POWER_STATE_WORKER) {
- dev_warn(
- gxp->dev,
- "The workqueue for power state transition is full");
- mutex_unlock(&gxp->power_mgr->set_acpm_state_work_lock);
- mutex_unlock(&gxp->power_mgr->pm_lock);
- flush_workqueue(gxp->power_mgr->wq);
- mutex_lock(&gxp->power_mgr->pm_lock);
- goto retry;
+ dev_dbg(gxp->dev,
+ "The workqueue for power state transition was full");
+ i = gxp->power_mgr->last_set_acpm_state_worker;
+ /*
+ * The last worker's `prev_state` and `prev_low_clkmux`
+ * fields are already set to the values this request
+ * will be changing from.
+ */
+ } else {
+ gxp->power_mgr->set_acpm_state_work[i].prev_state =
+ gxp->power_mgr->curr_state;
+ gxp->power_mgr->set_acpm_state_work[i].prev_low_clkmux =
+ gxp->power_mgr->last_scheduled_low_clkmux;
}
+
gxp->power_mgr->set_acpm_state_work[i].state = state;
gxp->power_mgr->set_acpm_state_work[i].low_clkmux =
low_clkmux_vote;
- gxp->power_mgr->set_acpm_state_work[i].prev_state =
- gxp->power_mgr->curr_state;
- gxp->power_mgr->set_acpm_state_work[i].prev_low_clkmux =
- gxp->power_mgr->last_scheduled_low_clkmux;
- gxp->power_mgr->set_acpm_state_work[i].using = true;
- queue_work(gxp->power_mgr->wq,
- &gxp->power_mgr->set_acpm_state_work[i].work);
+ /*
+ * Schedule work to request the change, if not reusing an
+ * already scheduled worker.
+ */
+ if (!gxp->power_mgr->set_acpm_state_work[i].using) {
+ gxp->power_mgr->set_acpm_state_work[i].using = true;
+ queue_work(
+ gxp->power_mgr->wq,
+ &gxp->power_mgr->set_acpm_state_work[i].work);
+ }
+
+ /* Change the internal state */
gxp->power_mgr->curr_state = state;
gxp->power_mgr->last_scheduled_low_clkmux = low_clkmux_vote;
+ gxp->power_mgr->last_set_acpm_state_worker = i;
mutex_unlock(&gxp->power_mgr->set_acpm_state_work_lock);
}
@@ -524,33 +541,44 @@ static int gxp_pm_req_memory_state_locked(struct gxp_dev *gxp,
"Cannot request memory power state when BLK is off\n");
return -EBUSY;
}
-retry:
+
if (state != gxp->power_mgr->curr_memory_state) {
mutex_lock(&gxp->power_mgr->req_pm_qos_work_lock);
+ /* Look for an available worker */
for (i = 0; i < AUR_NUM_POWER_STATE_WORKER; i++) {
if (!gxp->power_mgr->req_pm_qos_work[i].using)
break;
}
- /* The workqueue is full, wait for it */
+
+ /*
+ * If the workqueue is full, cancel the last scheduled worker
+ * and use it for this request instead.
+ */
if (i == AUR_NUM_POWER_STATE_WORKER) {
- dev_warn(
- gxp->dev,
- "The workqueue for memory power state transition is full");
- mutex_unlock(&gxp->power_mgr->req_pm_qos_work_lock);
- mutex_unlock(&gxp->power_mgr->pm_lock);
- flush_workqueue(gxp->power_mgr->wq);
- mutex_lock(&gxp->power_mgr->pm_lock);
- goto retry;
+ dev_dbg(gxp->dev,
+ "The workqueue for memory power state transition was full");
+ i = gxp->power_mgr->last_req_pm_qos_worker;
}
- gxp->power_mgr->curr_memory_state = state;
+
int_val = aur_memory_state2int_table[state];
mif_val = aur_memory_state2mif_table[state];
gxp->power_mgr->req_pm_qos_work[i].int_val = int_val;
gxp->power_mgr->req_pm_qos_work[i].mif_val = mif_val;
- gxp->power_mgr->req_pm_qos_work[i].using = true;
- queue_work(gxp->power_mgr->wq,
- &gxp->power_mgr->req_pm_qos_work[i].work);
+
+ /*
+ * Schedule work to request the change, if not reusing an
+ * already scheduled worker.
+ */
+ if (!gxp->power_mgr->req_pm_qos_work[i].using) {
+ gxp->power_mgr->req_pm_qos_work[i].using = true;
+ queue_work(gxp->power_mgr->wq,
+ &gxp->power_mgr->req_pm_qos_work[i].work);
+ }
+
+ /* Change the internal state */
+ gxp->power_mgr->curr_memory_state = state;
+ gxp->power_mgr->last_req_pm_qos_worker = i;
mutex_unlock(&gxp->power_mgr->req_pm_qos_work_lock);
}
diff --git a/gxp-pm.h b/gxp-pm.h
index f9031ca..c214a8b 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -64,7 +64,7 @@ enum aur_power_cmu_mux_state {
#define AUR_MAX_ALLOW_STATE AUR_UD_PLUS
#define AUR_MAX_ALLOW_MEMORY_STATE AUR_MEM_MAX
-#define AUR_NUM_POWER_STATE_WORKER 16
+#define AUR_NUM_POWER_STATE_WORKER 4
struct gxp_pm_device_ops {
int (*pre_blk_powerup)(struct gxp_dev *gxp);
@@ -113,7 +113,9 @@ struct gxp_power_manager {
set_acpm_state_work[AUR_NUM_POWER_STATE_WORKER];
/* Serializes searching for an open worker in set_acpm_state_work[] */
struct mutex set_acpm_state_work_lock;
+ uint last_set_acpm_state_worker;
struct gxp_req_pm_qos_work req_pm_qos_work[AUR_NUM_POWER_STATE_WORKER];
+ uint last_req_pm_qos_worker;
/* Serializes searching for an open worker in req_pm_qos_work[] */
struct mutex req_pm_qos_work_lock;
struct workqueue_struct *wq;
diff --git a/gxp-telemetry.c b/gxp-telemetry.c
index 19351fd..93b301b 100644
--- a/gxp-telemetry.c
+++ b/gxp-telemetry.c
@@ -78,67 +78,51 @@ int gxp_telemetry_init(struct gxp_dev *gxp)
/* Wrapper struct to be used by the telemetry vma_ops. */
struct telemetry_vma_data {
struct gxp_dev *gxp;
- struct buffer_data *data;
+ struct buffer_data *buff_data;
u8 type;
+ refcount_t ref_count;
};
static void gxp_telemetry_vma_open(struct vm_area_struct *vma)
{
- struct gxp_dev *gxp;
- struct buffer_data *data;
-
- gxp = ((struct telemetry_vma_data *)vma->vm_private_data)->gxp;
- data = ((struct telemetry_vma_data *)vma->vm_private_data)->data;
+ struct telemetry_vma_data *vma_data =
+ (struct telemetry_vma_data *)vma->vm_private_data;
+ struct gxp_dev *gxp = vma_data->gxp;
mutex_lock(&gxp->telemetry_mgr->lock);
- refcount_inc(&data->ref_count);
+ refcount_inc(&vma_data->ref_count);
mutex_unlock(&gxp->telemetry_mgr->lock);
}
-/*
- * Forward declaration of telemetry_disable_locked() so that
- * gxp_telemetry_vma_close() can invoke the locked version without having to
- * release `telemetry_mgr->lock` and calling gxp_telemetry_disable().
- */
-static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type);
+static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data);
static void gxp_telemetry_vma_close(struct vm_area_struct *vma)
{
- struct gxp_dev *gxp;
- struct buffer_data *data;
- u8 type;
- int i;
- uint virt_core;
- struct gxp_virtual_device *vd;
-
- gxp = ((struct telemetry_vma_data *)vma->vm_private_data)->gxp;
- data = ((struct telemetry_vma_data *)vma->vm_private_data)->data;
- type = ((struct telemetry_vma_data *)vma->vm_private_data)->type;
+ struct telemetry_vma_data *vma_data =
+ (struct telemetry_vma_data *)vma->vm_private_data;
+ struct gxp_dev *gxp = vma_data->gxp;
+ struct buffer_data *buff_data = vma_data->buff_data;
+ u8 type = vma_data->type;
mutex_lock(&gxp->telemetry_mgr->lock);
- down_read(&gxp->vd_semaphore);
- if (refcount_dec_and_test(&data->ref_count)) {
- if (data->host_status & GXP_TELEMETRY_HOST_STATUS_ENABLED)
- telemetry_disable_locked(gxp, type);
-
- for (i = 0; i < GXP_NUM_CORES; i++) {
- vd = gxp->core_to_vd[i];
- if (vd != NULL) {
- virt_core =
- gxp_vd_phys_core_to_virt_core(vd, i);
- gxp_dma_free_coherent(gxp, vd, BIT(virt_core),
- data->size,
- data->buffers[i],
- data->buffer_daddrs[i]);
- } else {
- gxp_dma_free_coherent(gxp, NULL, 0, data->size,
- data->buffers[i],
- data->buffer_daddrs[i]);
- }
- }
+ if (!refcount_dec_and_test(&vma_data->ref_count))
+ goto out;
+
+ /*
+ * Free the telemetry buffers if they are no longer in use.
+ *
+ * If a client enabled telemetry, then closed their VMA without
+ * disabling it, firmware will still be expecting those buffers to be
+ * mapped. If this is the case, telemetry will be disabled, and the
+ * buffers freed, when the client is closed.
+ *
+ * We cannot disable telemetry here, since attempting to lock the
+ * `vd_semaphore` while holding the mmap lock can lead to deadlocks.
+ */
+ if (refcount_dec_and_test(&buff_data->ref_count)) {
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
gxp->telemetry_mgr->logging_buff_data = NULL;
@@ -150,11 +134,12 @@ static void gxp_telemetry_vma_close(struct vm_area_struct *vma)
dev_warn(gxp->dev, "%s called with invalid type %u\n",
__func__, type);
}
- kfree(data);
- kfree(vma->vm_private_data);
+ free_telemetry_buffers(gxp, buff_data);
}
- up_read(&gxp->vd_semaphore);
+ kfree(vma_data);
+
+out:
mutex_unlock(&gxp->telemetry_mgr->lock);
}
@@ -202,7 +187,6 @@ static int check_telemetry_type_availability(struct gxp_dev *gxp, u8 type)
* @size: The size of buffer to allocate for each core
*
* Caller must hold the telemetry_manager's lock.
- * Caller must hold gxp->vd_semaphore for reading.
*
* Return: A pointer to the `struct buffer_data` if successful, NULL otherwise
*/
@@ -211,8 +195,10 @@ static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
{
struct buffer_data *data;
int i;
- uint virt_core;
- struct gxp_virtual_device *vd;
+ void *buf;
+ dma_addr_t daddr;
+
+ size = size < PAGE_SIZE ? PAGE_SIZE : size;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
@@ -220,55 +206,57 @@ static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
/* Allocate cache-coherent buffers for logging/tracing to */
for (i = 0; i < GXP_NUM_CORES; i++) {
- /*
- * If the core is not allocated, we cannot map the buffer on
- * that core.
- */
- vd = gxp->core_to_vd[i];
- if (vd != NULL) {
- virt_core = gxp_vd_phys_core_to_virt_core(vd, i);
- data->buffers[i] = gxp_dma_alloc_coherent(
- gxp, vd, BIT(virt_core), size,
- &data->buffer_daddrs[i], GFP_KERNEL, 0);
- } else {
- data->buffers[i] =
- gxp_dma_alloc_coherent(gxp, NULL, 0, size,
- &data->buffer_daddrs[i],
- GFP_KERNEL, 0);
- }
- if (!data->buffers[i])
+ /* Allocate a coherent buffer in the default domain */
+ buf = dma_alloc_coherent(gxp->dev, size, &daddr, GFP_KERNEL);
+ if (!buf) {
+ dev_err(gxp->dev,
+ "Failed to allocate coherent buffer\n");
goto err_alloc;
+ }
+ data->buffers[i] = buf;
+ data->buffer_daddrs[i] = daddr;
}
data->size = size;
refcount_set(&data->ref_count, 1);
+ data->is_enabled = false;
return data;
err_alloc:
- for (; i > 0; i--) {
- vd = gxp->core_to_vd[i-1];
- if (vd != NULL) {
- virt_core = gxp_vd_phys_core_to_virt_core(vd, i);
- gxp_dma_free_coherent(gxp, vd, BIT(virt_core), size,
- data->buffers[i - 1],
- data->buffer_daddrs[i - 1]);
- } else {
- gxp_dma_free_coherent(gxp, NULL, 0, size,
- data->buffers[i - 1],
- data->buffer_daddrs[i - 1]);
- }
- }
+ while (i--)
+ dma_free_coherent(gxp->dev, size, data->buffers[i],
+ data->buffer_daddrs[i]);
kfree(data);
return NULL;
}
/**
+ * free_telemetry_buffers() - Unmap and free a `struct buffer_data`
+ * @gxp: The GXP device the buffers were allocated for
+ * @data: The descriptor of the buffers to unmap and free
+ *
+ * Caller must hold the telemetry_manager's lock.
+ */
+static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data)
+{
+ int i;
+
+ lockdep_assert_held(&gxp->telemetry_mgr->lock);
+
+ for (i = 0; i < GXP_NUM_CORES; i++)
+ dma_free_coherent(gxp->dev, data->size, data->buffers[i],
+ data->buffer_daddrs[i]);
+
+ kfree(data);
+}
+
+/**
* remap_telemetry_buffers() - Remaps a set of telemetry buffers into a
* user-space vm_area.
* @gxp: The GXP device the buffers were allocated for
* @vma: A vm area to remap the buffers into
- * @data: The data describing a set of telemetry buffers to remap
+ * @buff_data: The data describing a set of telemetry buffers to remap
*
* Caller must hold the telemetry_manager's lock.
*
@@ -278,7 +266,7 @@ err_alloc:
*/
static int remap_telemetry_buffers(struct gxp_dev *gxp,
struct vm_area_struct *vma,
- struct buffer_data *data)
+ struct buffer_data *buff_data)
{
unsigned long orig_pgoff = vma->vm_pgoff;
int i;
@@ -296,7 +284,7 @@ static int remap_telemetry_buffers(struct gxp_dev *gxp,
* Remap each core's buffer a page at a time, in case it is not
* physically contiguous.
*/
- for (offset = 0; offset < data->size; offset += PAGE_SIZE) {
+ for (offset = 0; offset < buff_data->size; offset += PAGE_SIZE) {
/*
* `virt_to_phys()` does not work on memory allocated
* by `dma_alloc_coherent()`, so we have to use
@@ -307,9 +295,10 @@ static int remap_telemetry_buffers(struct gxp_dev *gxp,
*/
phys = iommu_iova_to_phys(
iommu_get_domain_for_dev(gxp->dev),
- data->buffer_daddrs[i] + offset);
+ buff_data->buffer_daddrs[i] + offset);
ret = remap_pfn_range(
- vma, vma->vm_start + data->size * i + offset,
+ vma,
+ vma->vm_start + buff_data->size * i + offset,
phys >> PAGE_SHIFT, PAGE_SIZE,
vma->vm_page_prot);
if (ret)
@@ -331,10 +320,8 @@ int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
struct telemetry_vma_data *vma_data;
size_t total_size = vma->vm_end - vma->vm_start;
size_t size = total_size / GXP_NUM_CORES;
- struct buffer_data *data;
+ struct buffer_data *buff_data;
int i;
- struct gxp_virtual_device *vd;
- uint virt_core;
if (!gxp->telemetry_mgr)
return -ENODEV;
@@ -344,7 +331,6 @@ int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
return -EINVAL;
mutex_lock(&gxp->telemetry_mgr->lock);
- down_read(&gxp->vd_semaphore);
ret = check_telemetry_type_availability(gxp, type);
if (ret)
@@ -356,53 +342,43 @@ int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
goto err;
}
- data = allocate_telemetry_buffers(gxp, size);
- if (!data) {
+ buff_data = allocate_telemetry_buffers(gxp, size);
+ if (!buff_data) {
ret = -ENOMEM;
goto err_free_vma_data;
}
- ret = remap_telemetry_buffers(gxp, vma, data);
+ ret = remap_telemetry_buffers(gxp, vma, buff_data);
if (ret)
goto err_free_buffers;
vma_data->gxp = gxp;
- vma_data->data = data;
+ vma_data->buff_data = buff_data;
vma_data->type = type;
+ refcount_set(&vma_data->ref_count, 1);
vma->vm_private_data = vma_data;
/* Save book-keeping on the buffers in the telemetry manager */
if (type == GXP_TELEMETRY_TYPE_LOGGING)
- gxp->telemetry_mgr->logging_buff_data = data;
+ gxp->telemetry_mgr->logging_buff_data = buff_data;
else /* type == GXP_TELEMETRY_TYPE_TRACING */
- gxp->telemetry_mgr->tracing_buff_data = data;
+ gxp->telemetry_mgr->tracing_buff_data = buff_data;
- up_read(&gxp->vd_semaphore);
mutex_unlock(&gxp->telemetry_mgr->lock);
return 0;
err_free_buffers:
- for (i = 0; i < GXP_NUM_CORES; i++) {
- vd = gxp->core_to_vd[i];
- if (vd != NULL) {
- virt_core = gxp_vd_phys_core_to_virt_core(vd, i);
- gxp_dma_free_coherent(gxp, vd, BIT(virt_core),
- data->size, data->buffers[i],
- data->buffer_daddrs[i]);
- } else {
- gxp_dma_free_coherent(gxp, NULL, 0, data->size,
- data->buffers[i],
- data->buffer_daddrs[i]);
- }
- }
- kfree(data);
+ for (i = 0; i < GXP_NUM_CORES; i++)
+ dma_free_coherent(gxp->dev, buff_data->size,
+ buff_data->buffers[i],
+ buff_data->buffer_daddrs[i]);
+ kfree(buff_data);
err_free_vma_data:
kfree(vma_data);
err:
- up_read(&gxp->vd_semaphore);
mutex_unlock(&gxp->telemetry_mgr->lock);
return ret;
}
@@ -411,7 +387,8 @@ int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type)
{
struct buffer_data *data;
int ret = 0;
- uint core;
+ uint core, virt_core;
+ struct gxp_virtual_device *vd;
mutex_lock(&gxp->telemetry_mgr->lock);
@@ -432,20 +409,49 @@ int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type)
goto out;
}
+ /* Map the buffers for any cores already running */
+ down_read(&gxp->vd_semaphore);
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ vd = gxp->core_to_vd[core];
+ if (vd != NULL) {
+ virt_core = gxp_vd_phys_core_to_virt_core(vd, core);
+ ret = gxp_dma_map_allocated_coherent_buffer(
+ gxp, data->buffers[core], vd, BIT(virt_core),
+ data->size, data->buffer_daddrs[core], 0);
+ if (ret)
+ goto err;
+ }
+ }
+
/* Populate the buffer fields in firmware-data */
data->host_status |= GXP_TELEMETRY_HOST_STATUS_ENABLED;
gxp_fw_data_set_telemetry_descriptors(gxp, type, data->host_status,
data->buffer_daddrs, data->size);
/* Notify any running cores that firmware-data was updated */
- down_read(&gxp->vd_semaphore);
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp_is_fw_running(gxp, core))
gxp_notification_send(gxp, core,
CORE_NOTIF_TELEMETRY_STATUS);
}
- up_read(&gxp->vd_semaphore);
+ refcount_inc(&data->ref_count);
+ data->is_enabled = true;
+
+ goto up_sem;
+err:
+ while (core--) {
+ vd = gxp->core_to_vd[core];
+ if (vd != NULL) {
+ virt_core = gxp_vd_phys_core_to_virt_core(vd, core);
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd, BIT(virt_core), data->size,
+ data->buffer_daddrs[core]);
+ }
+ }
+
+up_sem:
+ up_read(&gxp->vd_semaphore);
out:
mutex_unlock(&gxp->telemetry_mgr->lock);
@@ -529,7 +535,8 @@ static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
struct buffer_data *data;
int ret = 0;
dma_addr_t null_daddrs[GXP_NUM_CORES] = {0};
- uint core;
+ uint core, virt_core;
+ struct gxp_virtual_device *vd;
/* Cleanup telemetry manager's book-keeping */
switch (type) {
@@ -549,6 +556,8 @@ static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
if (!(data->host_status & GXP_TELEMETRY_HOST_STATUS_ENABLED))
return 0;
+ data->is_enabled = false;
+
/* Clear the log buffer fields in firmware-data */
data->host_status &= ~GXP_TELEMETRY_HOST_STATUS_ENABLED;
gxp_fw_data_set_telemetry_descriptors(gxp, type, data->host_status,
@@ -564,6 +573,28 @@ static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
"%s: core%u failed to disable telemetry (type=%u, ret=%d)\n",
__func__, core, type, ret);
}
+ vd = gxp->core_to_vd[core];
+ if (vd != NULL) {
+ virt_core = gxp_vd_phys_core_to_virt_core(vd, core);
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd, BIT(virt_core), data->size,
+ data->buffer_daddrs[core]);
+ }
+ }
+
+ if (refcount_dec_and_test(&data->ref_count)) {
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ gxp->telemetry_mgr->logging_buff_data = NULL;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ gxp->telemetry_mgr->tracing_buff_data = NULL;
+ break;
+ default:
+ /* NO-OP, we returned above if `type` was invalid */
+ break;
+ }
+ free_telemetry_buffers(gxp, data);
}
return 0;
diff --git a/gxp-telemetry.h b/gxp-telemetry.h
index 92d12df..d2e63de 100644
--- a/gxp-telemetry.h
+++ b/gxp-telemetry.h
@@ -27,6 +27,7 @@ struct gxp_telemetry_manager {
dma_addr_t buffer_daddrs[GXP_NUM_CORES];
u32 size;
refcount_t ref_count;
+ bool is_enabled;
} *logging_buff_data, *tracing_buff_data;
/* Protects logging_buff_data and tracing_buff_data */
struct mutex lock;
diff --git a/gxp-vd.c b/gxp-vd.c
index c22e30d..70a4f18 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -159,56 +159,72 @@ void gxp_vd_release(struct gxp_virtual_device *vd)
kfree(vd);
}
-static void map_telemetry_buffers(struct gxp_dev *gxp,
+static int map_telemetry_buffers(struct gxp_dev *gxp,
struct gxp_virtual_device *vd, uint virt_core,
uint core)
{
- if (gxp->telemetry_mgr->logging_buff_data)
- gxp_dma_map_allocated_coherent_buffer(
- gxp,
- gxp->telemetry_mgr->logging_buff_data->buffers[core],
- vd, BIT(virt_core),
- gxp->telemetry_mgr->logging_buff_data->size,
- gxp->telemetry_mgr->logging_buff_data
- ->buffer_daddrs[core],
- 0);
- if (gxp->telemetry_mgr->tracing_buff_data)
- gxp_dma_map_allocated_coherent_buffer(
- gxp,
- gxp->telemetry_mgr->tracing_buff_data->buffers[core],
- vd, BIT(virt_core),
- gxp->telemetry_mgr->tracing_buff_data->size,
- gxp->telemetry_mgr->tracing_buff_data
- ->buffer_daddrs[core],
- 0);
+ int ret = 0;
+ struct buffer_data *buff_data;
+
+ /* Map logging buffers if logging is enabled */
+ buff_data = gxp->telemetry_mgr->logging_buff_data;
+ if (buff_data && buff_data->is_enabled) {
+ ret = gxp_dma_map_allocated_coherent_buffer(
+ gxp, buff_data->buffers[core], vd, BIT(virt_core),
+ buff_data->size, buff_data->buffer_daddrs[core], 0);
+ /* Don't bother checking tracing if logging fails */
+ if (ret)
+ return ret;
+ }
+
+ /* Map tracing buffers if tracing is enabled */
+ buff_data = gxp->telemetry_mgr->tracing_buff_data;
+ if (buff_data && buff_data->is_enabled) {
+ ret = gxp_dma_map_allocated_coherent_buffer(
+ gxp, buff_data->buffers[core], vd, BIT(virt_core),
+ buff_data->size, buff_data->buffer_daddrs[core], 0);
+ /* If tracing fails, unmap logging if it was enabled */
+ if (ret) {
+ buff_data = gxp->telemetry_mgr->logging_buff_data;
+ if (buff_data && buff_data->is_enabled)
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd, BIT(virt_core),
+ buff_data->size,
+ buff_data->buffer_daddrs[core]);
+ }
+ }
+
+ return ret;
}
static void unmap_telemetry_buffers(struct gxp_dev *gxp,
struct gxp_virtual_device *vd,
uint virt_core, uint core)
{
- if (gxp->telemetry_mgr->logging_buff_data)
+ struct buffer_data *buff_data;
+
+ buff_data = gxp->telemetry_mgr->logging_buff_data;
+ if (buff_data && buff_data->is_enabled)
gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core),
- gxp->telemetry_mgr->logging_buff_data->size,
- gxp->telemetry_mgr->logging_buff_data
- ->buffer_daddrs[core]);
- if (gxp->telemetry_mgr->tracing_buff_data)
+ gxp, vd, BIT(virt_core), buff_data->size,
+ buff_data->buffer_daddrs[core]);
+
+ buff_data = gxp->telemetry_mgr->tracing_buff_data;
+ if (buff_data && buff_data->is_enabled)
gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core),
- gxp->telemetry_mgr->tracing_buff_data->size,
- gxp->telemetry_mgr->tracing_buff_data
- ->buffer_daddrs[core]);
+ gxp, vd, BIT(virt_core), buff_data->size,
+ buff_data->buffer_daddrs[core]);
}
-static void map_debug_dump_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core)
+static int map_debug_dump_buffer(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core,
+ uint core)
{
+ /* If debug-dump is not enabled, nothing to map */
if (!gxp->debug_dump_mgr)
- return;
+ return 0;
- gxp_dma_map_allocated_coherent_buffer(
+ return gxp_dma_map_allocated_coherent_buffer(
gxp, gxp->debug_dump_mgr->buf.vaddr, vd, BIT(virt_core),
gxp->debug_dump_mgr->buf.size, gxp->debug_dump_mgr->buf.daddr,
0);
@@ -260,33 +276,53 @@ int gxp_vd_start(struct gxp_virtual_device *vd)
if (core_list & BIT(core)) {
gxp->core_to_vd[core] = vd;
cores_remaining--;
- gxp_dma_domain_attach_device(gxp, vd, virt_core, core);
- gxp_dma_map_core_resources(gxp, vd, virt_core, core);
- map_telemetry_buffers(gxp, vd, virt_core, core);
- map_debug_dump_buffer(gxp, vd, virt_core, core);
+ ret = gxp_dma_domain_attach_device(gxp, vd, virt_core,
+ core);
+ if (ret)
+ goto err_clean_all_cores;
+ ret = gxp_dma_map_core_resources(gxp, vd, virt_core,
+ core);
+ if (ret)
+ goto err_detach_domain;
+ ret = map_telemetry_buffers(gxp, vd, virt_core, core);
+ if (ret)
+ goto err_unmap_res;
+ ret = map_debug_dump_buffer(gxp, vd, virt_core, core);
+ if (ret)
+ goto err_unmap_telem;
virt_core++;
}
}
ret = gxp_firmware_run(gxp, vd, core_list);
if (ret)
- goto error;
+ goto err_clean_all_cores;
vd->state = GXP_VD_RUNNING;
return ret;
-error:
- virt_core = 0;
- for (core = 0; core < GXP_NUM_CORES; core++) {
+err_unmap_telem:
+ unmap_telemetry_buffers(gxp, vd, virt_core, core);
+err_unmap_res:
+ gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
+err_detach_domain:
+ gxp_dma_domain_detach_device(gxp, vd, virt_core);
+err_clean_all_cores:
+ gxp->core_to_vd[core] = NULL;
+ virt_core--;
+ while (core > 0) {
+ core--;
if (core_list & BIT(core)) {
unmap_debug_dump_buffer(gxp, vd, virt_core, core);
unmap_telemetry_buffers(gxp, vd, virt_core, core);
gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
gxp_dma_domain_detach_device(gxp, vd, virt_core);
gxp->core_to_vd[core] = NULL;
- virt_core++;
+ virt_core--;
}
}
+ gxp_fw_data_destroy_app(gxp, vd->fw_app);
+
return ret;
}
diff --git a/gxp.h b/gxp.h
index ed0a78d..a7ce8ee 100644
--- a/gxp.h
+++ b/gxp.h
@@ -156,7 +156,7 @@ struct gxp_virtual_device_ioctl {
* The client can request low frequency clkmux vote by this flag, which means
* the kernel driver will switch the CLKMUX clocks to save more power.
*
- * Note: The kernel driver keep seperate track of low frequency clkmux votes
+ * Note: The kernel driver keep separate track of low frequency clkmux votes
* and normal votes, and the low frequency clkmux votes will have lower priority
* than all normal votes.
* For example, if the kerenl driver has two votes, one is GXP_POWER_STATE_UUD