summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurora pro automerger <aurora-pro-automerger@google.com>2022-04-22 17:10:21 +0800
committerSermin Aydin <sermin@google.com>2022-05-12 04:51:44 +0000
commitfa5cf5721220d5b97544ea56b91bd9f2590debac (patch)
tree649f29e1c371a1c02c87ec038290c507ff2e1b12
parent27bed782f3a828674c0f1584cf355bf592c382be (diff)
downloadgs201-fa5cf5721220d5b97544ea56b91bd9f2590debac.tar.gz
[Copybara Auto Merge] Merge branch 'gs201-release' into 'android13-gs-pixel-5.10'
Cherry-pick of Suspend/Resume support gxp: reset CMU regs on blk_off Bug: 231759324 gxp: detach all aux domains when we fail to resume Bug: 231707796 gxp: Hold cores failing to suspend in reset Bug: 231663916 gxp: fix multicore resume if blk is restarted Bug: 231681021 gxp: Release pm_lock before flushing pm workers Bug: 231266703 gxp: update minor version for suspend/resume Bug: 209083969 gxp: handle suspend/resume failure gxp: implement core suspend/resume gxp: reset CMU on PM init Bug: 231291191 gxp: protect mailbox registers from corruption Bug: 231265938 gxp: only set curr_state on non-OFF req Bug: 231291187 gxp: Log clients holding wakelocks on suspend Bug: 230931995 gxp: set boot mode to cold boot on firmware run Bug: 230818196 gxp: change default off LPM state from PS2 to PS3 Bug: 229801235 GitOrigin-RevId: 2520913f6599abeefee27275b056d73f15e2178d Change-Id: I1c15420e06771dff507f29053bc5bba010e46314
-rw-r--r--gxp-client.c3
-rw-r--r--gxp-client.h3
-rw-r--r--gxp-csrs.h1
-rw-r--r--gxp-dma-iommu-gem5.c6
-rw-r--r--gxp-firmware.c48
-rw-r--r--gxp-firmware.h6
-rw-r--r--gxp-host-device-structs.h44
-rw-r--r--gxp-hw-mailbox-driver.c117
-rw-r--r--gxp-internal.h2
-rw-r--r--gxp-lpm.c66
-rw-r--r--gxp-lpm.h27
-rw-r--r--gxp-mailbox-driver.h3
-rw-r--r--gxp-mailbox.c12
-rw-r--r--gxp-mailbox.h2
-rw-r--r--gxp-notification.h1
-rw-r--r--gxp-platform.c185
-rw-r--r--gxp-pm.c95
-rw-r--r--gxp-pm.h20
-rw-r--r--gxp-vd.c219
-rw-r--r--gxp-vd.h40
-rw-r--r--gxp-wakelock.c38
-rw-r--r--gxp.h2
22 files changed, 804 insertions, 136 deletions
diff --git a/gxp-client.c b/gxp-client.c
index 5838d82..5802287 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -5,6 +5,7 @@
* Copyright (C) 2021 Google LLC
*/
+#include <linux/list.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -50,7 +51,7 @@ void gxp_client_destroy(struct gxp_client *client)
gxp_dma_unmap_tpu_buffer(gxp, client->vd, client->mbx_desc);
#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
- if (client->has_vd_wakelock)
+ if (client->vd && client->vd->state != GXP_VD_OFF)
gxp_vd_stop(client->vd);
for (core = 0; core < GXP_NUM_CORES; core++) {
diff --git a/gxp-client.h b/gxp-client.h
index 3b0719b..c3dacf3 100644
--- a/gxp-client.h
+++ b/gxp-client.h
@@ -16,6 +16,7 @@
/* Holds state belonging to a client */
struct gxp_client {
+ struct list_head list_entry;
struct gxp_dev *gxp;
/*
@@ -38,6 +39,8 @@ struct gxp_client {
struct gxp_tpu_mbx_desc mbx_desc;
struct gxp_eventfd *mb_eventfds[GXP_NUM_CORES];
+
+ pid_t pid;
};
/*
diff --git a/gxp-csrs.h b/gxp-csrs.h
index 739e41f..d6a6e9f 100644
--- a/gxp-csrs.h
+++ b/gxp-csrs.h
@@ -50,6 +50,7 @@ enum gxp_csrs {
enum gxp_core_csrs {
GXP_REG_INST_BPM = 0x0000,
+ GXP_REG_BOOT_MODE = 0x2080,
GXP_REG_PROFILING_CONDITION = 0x4000,
GXP_REG_PROCESSOR_ID = 0x4004,
GXP_REG_ALT_RESET_VECTOR = 0x4008,
diff --git a/gxp-dma-iommu-gem5.c b/gxp-dma-iommu-gem5.c
index 86a2c06..8368dcb 100644
--- a/gxp-dma-iommu-gem5.c
+++ b/gxp-dma-iommu-gem5.c
@@ -371,6 +371,12 @@ void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
}
#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
+int gxp_dma_ssmt_program(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core, uint core)
+{
+ /* NO-OP when aux domains are not supported */
+ return 0;
+}
int gxp_dma_domain_attach_device(struct gxp_dev *gxp,
struct gxp_virtual_device *vd, uint virt_core,
uint core)
diff --git a/gxp-firmware.c b/gxp-firmware.c
index df4e192..6e079fb 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -20,6 +20,7 @@
#include "gxp-debug-dump.h"
#include "gxp-doorbell.h"
#include "gxp-firmware.h"
+#include "gxp-host-device-structs.h"
#include "gxp-internal.h"
#include "gxp-lpm.h"
#include "gxp-mailbox.h"
@@ -215,9 +216,27 @@ gxp_firmware_load_authenticated(struct gxp_dev *gxp, const struct firmware *fw,
/* Forward declaration for usage inside gxp_firmware_load(..). */
static void gxp_firmware_unload(struct gxp_dev *gxp, uint core);
+static void gxp_program_reset_vector(struct gxp_dev *gxp, uint core, bool verbose)
+{
+ u32 reset_vec;
+
+ reset_vec = gxp_read_32_core(gxp, core,
+ GXP_REG_ALT_RESET_VECTOR);
+ if (verbose)
+ dev_notice(gxp->dev,
+ "Current Aurora reset vector for core %u: 0x%x\n",
+ core, reset_vec);
+ gxp_write_32_core(gxp, core, GXP_REG_ALT_RESET_VECTOR,
+ gxp->fwbufs[core].daddr);
+ if (verbose)
+ dev_notice(gxp->dev,
+ "New Aurora reset vector for core %u: 0x%llx\n",
+ core, gxp->fwbufs[core].daddr);
+}
+
static int gxp_firmware_load(struct gxp_dev *gxp, uint core)
{
- u32 reset_vec, offset;
+ u32 offset;
void __iomem *core_scratchpad_base;
int ret;
@@ -279,16 +298,6 @@ static int gxp_firmware_load(struct gxp_dev *gxp, uint core)
"ELF loaded at virtual: %pK and physical: 0x%llx\n",
gxp->fwbufs[core].vaddr, gxp->fwbufs[core].paddr);
- /* Program reset vector */
- reset_vec = gxp_read_32_core(gxp, core,
- GXP_REG_ALT_RESET_VECTOR);
- dev_notice(gxp->dev, "Current Aurora reset vector for core %u: 0x%x\n",
- core, reset_vec);
- gxp_write_32_core(gxp, core, GXP_REG_ALT_RESET_VECTOR,
- gxp->fwbufs[core].daddr);
- dev_notice(gxp->dev, "New Aurora reset vector for core %u: 0x%llx\n",
- core, gxp->fwbufs[core].daddr);
-
/* Configure bus performance monitors */
gxp_bpm_configure(gxp, core, INST_BPM_OFFSET, BPM_EVENT_READ_XFER);
gxp_bpm_configure(gxp, core, DATA_BPM_OFFSET, BPM_EVENT_WRITE_XFER);
@@ -491,8 +500,13 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
return ret;
}
+ /* Mark this as a cold boot */
+ gxp_write_32_core(gxp, core, GXP_REG_BOOT_MODE,
+ GXP_BOOT_MODE_REQUEST_COLD_BOOT);
+
gxp_doorbell_set_listening_core(gxp, CORE_WAKEUP_DOORBELL, core);
- ret = gxp_pm_core_on(gxp, core);
+ ret = gxp_firmware_setup_hw_after_block_off(gxp, core,
+ /*verbose=*/true);
if (ret) {
dev_err(gxp->dev, "Failed to power up core %u\n", core);
goto out_firmware_unload;
@@ -546,6 +560,13 @@ out_firmware_unload:
return ret;
}
+int gxp_firmware_setup_hw_after_block_off(struct gxp_dev *gxp, uint core,
+ bool verbose)
+{
+ gxp_program_reset_vector(gxp, core, verbose);
+ return gxp_pm_core_on(gxp, core, verbose);
+}
+
void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
uint virt_core, uint core)
{
@@ -563,6 +584,7 @@ void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
gxp->mailbox_mgr->mailboxes[core]);
dev_notice(gxp->dev, "Mailbox %u released\n", core);
- gxp_pm_core_off(gxp, core);
+ if (vd->state == GXP_VD_RUNNING)
+ gxp_pm_core_off(gxp, core);
gxp_firmware_unload(gxp, core);
}
diff --git a/gxp-firmware.h b/gxp-firmware.h
index 6b1dff0..775e83f 100644
--- a/gxp-firmware.h
+++ b/gxp-firmware.h
@@ -61,5 +61,11 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
*/
void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
uint virt_core, uint core);
+/*
+ * Re-program the reset vector and power on the core's LPM if the block had
+ * been shut down.
+ */
+int gxp_firmware_setup_hw_after_block_off(struct gxp_dev *gxp, uint core,
+ bool verbose);
#endif /* __GXP_FIRMWARE_H__ */
diff --git a/gxp-host-device-structs.h b/gxp-host-device-structs.h
index c8d4b44..8182138 100644
--- a/gxp-host-device-structs.h
+++ b/gxp-host-device-structs.h
@@ -25,6 +25,50 @@
/* There was an attempt to use the buffers but their content was invalid. */
#define GXP_TELEMETRY_DEVICE_STATUS_SANITY_CHECK_FAILED (1 << 1)
+/* Definitions for host->device boot mode requests */
+/*
+ * Request that the core performs a normal cold boot on the next power-on event.
+ * This does not actually wake the core up, but's required before powering the
+ * core up if cold boot is desired.
+ * Core power-on could be performed using any wake-up source like the doorbells.
+ */
+#define GXP_BOOT_MODE_REQUEST_COLD_BOOT 0
+
+/*
+ * Request that the core suspends on the next suspend signal arrival. This does
+ * not trigger a suspend operation. A subsequent mailbox command or notification
+ * is needed to trigger the actual transition.
+ */
+#define GXP_BOOT_MODE_REQUEST_SUSPEND 1
+
+/*
+ * Request the core resumes on the next power on-event. This does not trigger a
+ * resume operation, but's required before powering the core up if warm
+ * boot/resume is desired.
+ * Core power-on could be performed using any wake-up source like direct LPM
+ * transition into PS0.
+ */
+#define GXP_BOOT_MODE_REQUEST_RESUME 2
+
+/* Cold boot status definitions */
+#define GXP_BOOT_MODE_STATUS_COLD_BOOT_PENDING 0
+#define GXP_BOOT_MODE_STATUS_COLD_BOOT_COMPLETED 3
+
+/* Core suspend status definitions */
+#define GXP_BOOT_MODE_STATUS_SUSPEND_PENDING 1
+#define GXP_BOOT_MODE_STATUS_SUSPEND_STARTED 4
+#define GXP_BOOT_MODE_STATUS_SUSPEND_COMPLETED 5
+#define GXP_BOOT_MODE_STATUS_SUSPEND_ABORTED 6
+
+/* Core resume/warm boot status definitions */
+#define GXP_BOOT_MODE_STATUS_RESUME_PENDING 2
+#define GXP_BOOT_MODE_STATUS_RESUME_STARTED 7
+#define GXP_BOOT_MODE_STATUS_RESUME_COMPLETED 8
+#define GXP_BOOT_MODE_STATUS_RESUME_FAILED 9
+
+/* Invalid boot mode request code */
+#define GXP_BOOT_MODE_STATUS_INVALID_MODE 10
+
/* A structure describing the state of the doorbells on the system. */
struct gxp_doorbells_descriptor {
/* The app this descriptor belongs to. */
diff --git a/gxp-hw-mailbox-driver.c b/gxp-hw-mailbox-driver.c
index c07047f..8430a65 100644
--- a/gxp-hw-mailbox-driver.c
+++ b/gxp-hw-mailbox-driver.c
@@ -5,10 +5,12 @@
* Copyright (C) 2021 Google LLC
*/
+#include <asm/barrier.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/of_irq.h>
+#include <linux/spinlock.h>
#include "gxp-mailbox-driver.h"
#include "gxp-mailbox-regs.h"
@@ -111,11 +113,22 @@ static void unregister_irq(struct gxp_mailbox *mailbox)
void gxp_mailbox_driver_init(struct gxp_mailbox *mailbox)
{
- register_irq(mailbox);
+ spin_lock_init(&mailbox->cmd_tail_resp_head_lock);
+ spin_lock_init(&mailbox->cmd_head_resp_tail_lock);
}
void gxp_mailbox_driver_exit(struct gxp_mailbox *mailbox)
{
+ /* Nothing to cleanup */
+}
+
+void gxp_mailbox_driver_enable_interrupts(struct gxp_mailbox *mailbox)
+{
+ register_irq(mailbox);
+}
+
+void gxp_mailbox_driver_disable_interrupts(struct gxp_mailbox *mailbox)
+{
unregister_irq(mailbox);
}
@@ -139,6 +152,16 @@ void gxp_mailbox_reset_hw(struct gxp_mailbox *mailbox)
void gxp_mailbox_generate_device_interrupt(struct gxp_mailbox *mailbox,
u32 int_mask)
{
+ /*
+ * Ensure all memory writes have been committed to memory before
+ * signalling to the device to read from them. This avoids the scenario
+ * where the interrupt trigger write gets delivered to the MBX HW before
+ * the DRAM transactions made it to DRAM since they're Normal
+ * transactions and can be re-ordered and backed off behind other
+ * transfers.
+ */
+ wmb();
+
csr_write(mailbox, MBOX_INTGR0_OFFSET, int_mask);
}
@@ -177,72 +200,124 @@ void gxp_mailbox_write_descriptor(struct gxp_mailbox *mailbox,
void gxp_mailbox_write_cmd_queue_tail(struct gxp_mailbox *mailbox, u16 val)
{
- u32 current_resp_head =
- data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET) &
- RESP_HEAD_MASK;
- u32 new_cmd_tail = (u32)val << CMD_TAIL_SHIFT;
+ u32 current_resp_head;
+ u32 new_cmd_tail;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
+ current_resp_head = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET) &
+ RESP_HEAD_MASK;
+ new_cmd_tail = (u32)val << CMD_TAIL_SHIFT;
data_write(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET,
new_cmd_tail | current_resp_head);
+
+ spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
}
void gxp_mailbox_write_resp_queue_head(struct gxp_mailbox *mailbox, u16 val)
{
- u32 current_cmd_tail =
- data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET) &
- CMD_TAIL_MASK;
- u32 new_resp_head = (u32)val << RESP_HEAD_SHIFT;
+ u32 current_cmd_tail;
+ u32 new_resp_head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
+ current_cmd_tail = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET) &
+ CMD_TAIL_MASK;
+ new_resp_head = (u32)val << RESP_HEAD_SHIFT;
data_write(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET,
current_cmd_tail | new_resp_head);
+
+ spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
}
u16 gxp_mailbox_read_cmd_queue_head(struct gxp_mailbox *mailbox)
{
- u32 reg_val = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET);
+ u32 reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ reg_val = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET);
+
+ spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
return (u16)((reg_val & CMD_HEAD_MASK) >> CMD_HEAD_SHIFT);
}
u16 gxp_mailbox_read_resp_queue_tail(struct gxp_mailbox *mailbox)
{
- u32 reg_val = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET);
+ u32 reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ reg_val = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET);
+
+ spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
return (u16)((reg_val & RESP_TAIL_MASK) >> RESP_TAIL_SHIFT);
}
void gxp_mailbox_write_cmd_queue_head(struct gxp_mailbox *mailbox, u16 val)
{
- u32 current_resp_tail =
- data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET) &
- RESP_TAIL_MASK;
- u32 new_cmd_head = (u32)val << CMD_HEAD_SHIFT;
+ u32 current_resp_tail;
+ u32 new_cmd_head;
+ unsigned long flags;
+ spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ current_resp_tail = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET) &
+ RESP_TAIL_MASK;
+ new_cmd_head = (u32)val << CMD_HEAD_SHIFT;
data_write(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET,
new_cmd_head | current_resp_tail);
+
+ spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
}
void gxp_mailbox_write_resp_queue_tail(struct gxp_mailbox *mailbox, u16 val)
{
- u32 current_cmd_head =
- data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET) &
- CMD_HEAD_MASK;
- u32 new_resp_tail = (u32)val << RESP_TAIL_SHIFT;
+ u32 current_cmd_head;
+ u32 new_resp_tail;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
+ current_cmd_head = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET) &
+ CMD_HEAD_MASK;
+ new_resp_tail = (u32)val << RESP_TAIL_SHIFT;
data_write(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET,
current_cmd_head | new_resp_tail);
+
+ spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
}
u16 gxp_mailbox_read_cmd_queue_tail(struct gxp_mailbox *mailbox)
{
- u32 reg_val = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET);
+ u32 reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ reg_val = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET);
+
+ spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
return (u16)((reg_val & CMD_TAIL_MASK) >> CMD_TAIL_SHIFT);
}
u16 gxp_mailbox_read_resp_queue_head(struct gxp_mailbox *mailbox)
{
- u32 reg_val = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET);
+ u32 reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ reg_val = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET);
+
+ spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
return (u16)((reg_val & RESP_HEAD_MASK) >> RESP_HEAD_SHIFT);
}
diff --git a/gxp-internal.h b/gxp-internal.h
index 4b5bb7c..01df49e 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -100,6 +100,8 @@ struct gxp_dev {
*/
struct device *gsa_dev;
u32 memory_per_core;
+ struct list_head client_list;
+ struct mutex client_list_lock;
};
/* GXP device IO functions */
diff --git a/gxp-lpm.c b/gxp-lpm.c
index 8367375..348590f 100644
--- a/gxp-lpm.c
+++ b/gxp-lpm.c
@@ -19,12 +19,28 @@
#include "gxp-internal.h"
#include "gxp-lpm.h"
-static void enable_state(struct gxp_dev *gxp, uint psm, uint state)
+#define gxp_lpm_wait_until(lpm_state, condition) \
+ do { \
+ int i = 100000; \
+ while (i) { \
+ lpm_state = \
+ lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET) & \
+ PSM_CURR_STATE_MASK; \
+ if (condition) \
+ break; \
+ udelay(1 * GXP_TIME_DELAY_FACTOR); \
+ i--; \
+ } \
+ return i != 0; \
+ } while (0)
+
+void gxp_lpm_enable_state(struct gxp_dev *gxp, uint psm, uint state)
{
uint offset = LPM_REG_ENABLE_STATE_0 + (LPM_STATE_TABLE_SIZE * state);
/* PS0 should always be enabled */
- WARN_ON(state == 0);
+ if (state == 0)
+ return;
/* Disable all low power states */
lpm_write_32_psm(gxp, psm, LPM_REG_ENABLE_STATE_1, 0x0);
@@ -60,7 +76,7 @@ bool gxp_lpm_is_powered(struct gxp_dev *gxp, uint psm)
return state == LPM_ACTIVE_STATE || state == LPM_CG_STATE;
}
-static uint get_state(struct gxp_dev *gxp, uint psm)
+uint gxp_lpm_get_state(struct gxp_dev *gxp, uint psm)
{
u32 status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
@@ -88,16 +104,16 @@ static int set_state_internal(struct gxp_dev *gxp, uint psm, uint target_state)
}
if (!i) {
- dev_err(gxp->dev, "Failed to switch to PS%u\n", target_state);
+ dev_err(gxp->dev, "Failed to switch PSM%u to PS%u\n", psm, target_state);
return -EIO;
}
return 0;
}
-static int set_state(struct gxp_dev *gxp, uint psm, uint target_state)
+int gxp_lpm_set_state(struct gxp_dev *gxp, uint psm, uint target_state)
{
- uint curr_state = get_state(gxp, psm);
+ uint curr_state = gxp_lpm_get_state(gxp, psm);
if (curr_state == target_state)
return 0;
@@ -106,7 +122,7 @@ static int set_state(struct gxp_dev *gxp, uint psm, uint target_state)
target_state, psm,
lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET));
- enable_state(gxp, psm, target_state);
+ gxp_lpm_enable_state(gxp, psm, target_state);
if ((curr_state != LPM_ACTIVE_STATE)
&& (target_state != LPM_ACTIVE_STATE)) {
@@ -117,7 +133,7 @@ static int set_state(struct gxp_dev *gxp, uint psm, uint target_state)
set_state_internal(gxp, psm, target_state);
dev_warn(gxp->dev, "Finished forced transition on core %u. target: PS%u, actual: PS%u, status: %x\n",
- psm, target_state, get_state(gxp, psm),
+ psm, target_state, gxp_lpm_get_state(gxp, psm),
lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET));
/* Set HW sequencing mode */
@@ -133,8 +149,8 @@ static int psm_enable(struct gxp_dev *gxp, uint psm)
/* Return early if LPM is already initialized */
if (gxp_lpm_is_initialized(gxp, psm)) {
if (psm != LPM_TOP_PSM) {
- /* Ensure core is in PS2 */
- return set_state(gxp, psm, LPM_PG_W_RET_STATE);
+ /* Ensure core is in PS3 */
+ return gxp_lpm_set_state(gxp, psm, LPM_PG_STATE);
}
return 0;
@@ -173,7 +189,7 @@ void gxp_lpm_init(struct gxp_dev *gxp)
void gxp_lpm_destroy(struct gxp_dev *gxp)
{
/* (b/171063370) Put Top PSM in ACTIVE state before block shutdown */
- dev_notice(gxp->dev, "Kicking Top PSM out of ACG\n");
+ dev_dbg(gxp->dev, "Kicking Top PSM out of ACG\n");
/* Disable all low-power states for TOP */
lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_1, 0x0);
@@ -194,7 +210,7 @@ int gxp_lpm_up(struct gxp_dev *gxp, uint core)
dev_notice(gxp->dev, "Enabled\n");
/* Enable PS1 (Clk Gated) */
- enable_state(gxp, core, LPM_CG_STATE);
+ gxp_lpm_enable_state(gxp, core, LPM_CG_STATE);
gxp_bpm_start(gxp, core);
@@ -203,10 +219,12 @@ int gxp_lpm_up(struct gxp_dev *gxp, uint core)
void gxp_lpm_down(struct gxp_dev *gxp, uint core)
{
- /* Enable PS2 (Pwr Gated w/Ret) */
- enable_state(gxp, core, LPM_PG_W_RET_STATE);
+ if (gxp_lpm_get_state(gxp, core) == LPM_PG_STATE)
+ return;
+ /* Enable PS3 (Pwr Gated) */
+ gxp_lpm_enable_state(gxp, core, LPM_PG_STATE);
- /* Set wakeup doorbell to trigger an automatic transition to PS2 */
+ /* Set wakeup doorbell to trigger an automatic transition to PS3 */
gxp_doorbell_set_listening_core(gxp, CORE_WAKEUP_DOORBELL, core);
gxp_doorbell_set(gxp, CORE_WAKEUP_DOORBELL);
msleep(25 * GXP_TIME_DELAY_FACTOR);
@@ -218,6 +236,20 @@ void gxp_lpm_down(struct gxp_dev *gxp, uint core)
gxp_write_32_core(gxp, core, GXP_REG_COMMON_INT_MASK_0, 0);
gxp_doorbell_clear(gxp, CORE_WAKEUP_DOORBELL);
- /* Ensure core is in PS2 */
- set_state(gxp, core, LPM_PG_W_RET_STATE);
+ /* Ensure core is in PS3 */
+ gxp_lpm_set_state(gxp, core, LPM_PG_STATE);
+}
+
+bool gxp_lpm_wait_state_ne(struct gxp_dev *gxp, uint psm, uint state)
+{
+ uint lpm_state;
+
+ gxp_lpm_wait_until(lpm_state, lpm_state != state);
+}
+
+bool gxp_lpm_wait_state_eq(struct gxp_dev *gxp, uint psm, uint state)
+{
+ uint lpm_state;
+
+ gxp_lpm_wait_until(lpm_state, lpm_state == state);
}
diff --git a/gxp-lpm.h b/gxp-lpm.h
index 0858104..dc87817 100644
--- a/gxp-lpm.h
+++ b/gxp-lpm.h
@@ -82,6 +82,33 @@ bool gxp_lpm_is_initialized(struct gxp_dev *gxp, uint psm);
*/
bool gxp_lpm_is_powered(struct gxp_dev *gxp, uint psm);
+/*
+ * Wait for the specified @psm to be in any state other than @state
+ * Return whether the waiting is successful or the timeout occurs.
+ */
+bool gxp_lpm_wait_state_ne(struct gxp_dev *gxp, uint psm, uint state);
+
+/*
+ * Wait for the specified @psm to be in the specified @state
+ * Return whether the waiting is successful or the timeout occurs.
+ */
+bool gxp_lpm_wait_state_eq(struct gxp_dev *gxp, uint psm, uint state);
+
+/*
+ * Force a state transition on the specified PSM.
+ */
+int gxp_lpm_set_state(struct gxp_dev *gxp, uint psm, uint target_state);
+
+/*
+ * Get current LPM state of the specified PSM.
+ */
+uint gxp_lpm_get_state(struct gxp_dev *gxp, uint psm);
+
+/*
+ * Enable a state on the specified PSM.
+ */
+void gxp_lpm_enable_state(struct gxp_dev *gxp, uint psm, uint state);
+
static inline u32 lpm_read_32(struct gxp_dev *gxp, uint reg_offset)
{
uint offset = GXP_LPM_BASE + reg_offset;
diff --git a/gxp-mailbox-driver.h b/gxp-mailbox-driver.h
index 6e10b18..9271694 100644
--- a/gxp-mailbox-driver.h
+++ b/gxp-mailbox-driver.h
@@ -12,6 +12,9 @@
void gxp_mailbox_driver_init(struct gxp_mailbox *mailbox);
void gxp_mailbox_driver_exit(struct gxp_mailbox *mailbox);
+void gxp_mailbox_driver_enable_interrupts(struct gxp_mailbox *mailbox);
+void gxp_mailbox_driver_disable_interrupts(struct gxp_mailbox *mailbox);
+
void __iomem *gxp_mailbox_get_csr_base(struct gxp_dev *gxp, uint index);
void __iomem *gxp_mailbox_get_data_base(struct gxp_dev *gxp, uint index);
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index 4b32aa4..b9c9c7e 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -460,6 +460,9 @@ static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
if (!mailbox->response_wq)
goto err_workqueue;
+ /* Initialize driver before interacting with its registers */
+ gxp_mailbox_driver_init(mailbox);
+
return mailbox;
err_workqueue:
@@ -499,6 +502,8 @@ static void enable_mailbox(struct gxp_mailbox *mailbox)
mutex_init(&mailbox->wait_list_lock);
INIT_WORK(&mailbox->response_work, gxp_mailbox_consume_responses_work);
+ /* Only enable interrupts once everything has been setup */
+ gxp_mailbox_driver_enable_interrupts(mailbox);
/* Enable the mailbox */
gxp_mailbox_write_status(mailbox, 1);
/* TODO(b/190868834) define interrupt bits */
@@ -537,12 +542,12 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
}
/*
- * Halt the mailbox driver.
+ * Halt the mailbox driver by preventing any incoming requests..
* This must happen before the mailbox itself is cleaned-up/released
* to make sure the mailbox does not disappear out from under the
* mailbox driver. This also halts all incoming responses/interrupts.
*/
- gxp_mailbox_driver_exit(mailbox);
+ gxp_mailbox_driver_disable_interrupts(mailbox);
/* Halt and flush any traffic */
cancel_work_sync(&mailbox->response_work);
@@ -600,6 +605,9 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
/* Reset the mailbox HW */
gxp_mailbox_reset_hw(mailbox);
+ /* Cleanup now that all mailbox interactions are finished */
+ gxp_mailbox_driver_exit(mailbox);
+
/*
* At this point all users of the mailbox have been halted or are
* waiting on gxp->vd_semaphore, which this function's caller has
diff --git a/gxp-mailbox.h b/gxp-mailbox.h
index 6900607..486264f 100644
--- a/gxp-mailbox.h
+++ b/gxp-mailbox.h
@@ -128,6 +128,8 @@ struct gxp_mailbox {
void (*handle_irq)(struct gxp_mailbox *mailbox);
struct work_struct *interrupt_handlers[GXP_MAILBOX_INT_BIT_COUNT];
unsigned int interrupt_virq;
+ spinlock_t cmd_tail_resp_head_lock;
+ spinlock_t cmd_head_resp_tail_lock;
struct task_struct *to_host_poll_task;
/* Protects to_host_poll_task while it holds a sync barrier */
struct mutex polling_lock;
diff --git a/gxp-notification.h b/gxp-notification.h
index 1c18a30..a4e4fd3 100644
--- a/gxp-notification.h
+++ b/gxp-notification.h
@@ -22,6 +22,7 @@ enum gxp_notification_to_core_type {
CORE_NOTIF_MAILBOX_COMMAND = 0,
CORE_NOTIF_GENERATE_DEBUG_DUMP = 1,
CORE_NOTIF_TELEMETRY_STATUS = 2,
+ CORE_NOTIF_SUSPEND_REQUEST = 3,
CORE_NOTIF_MAX
};
diff --git a/gxp-platform.c b/gxp-platform.c
index 68f04d0..2bfdb28 100644
--- a/gxp-platform.c
+++ b/gxp-platform.c
@@ -44,6 +44,48 @@
#include "gxp-vd.h"
#include "gxp-wakelock.h"
+/* Caller needs to hold client->semaphore for reading */
+static bool check_client_has_available_vd(struct gxp_client *client,
+ char *ioctl_name)
+{
+ struct gxp_dev *gxp = client->gxp;
+
+ lockdep_assert_held_read(&client->semaphore);
+ if (!client->vd) {
+ dev_err(gxp->dev,
+ "%s requires the client allocate a VIRTUAL_DEVICE\n",
+ ioctl_name);
+ return false;
+ }
+ if (client->vd->state == GXP_VD_UNAVAILABLE) {
+ dev_err(gxp->dev, "Cannot do %s on a broken virtual device\n",
+ ioctl_name);
+ return false;
+ }
+ return true;
+}
+
+/* Caller needs to hold client->semaphore for reading */
+static bool check_client_has_available_vd_wakelock(struct gxp_client *client,
+ char *ioctl_name)
+{
+ struct gxp_dev *gxp = client->gxp;
+
+ lockdep_assert_held_read(&client->semaphore);
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "%s requires the client hold a VIRTUAL_DEVICE wakelock\n",
+ ioctl_name);
+ return false;
+ }
+ if (client->vd->state == GXP_VD_UNAVAILABLE) {
+ dev_err(gxp->dev, "Cannot do %s on a broken virtual device\n",
+ ioctl_name);
+ return false;
+ }
+ return true;
+}
+
#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
static struct sscd_platform_data gxp_sscd_pdata;
@@ -83,8 +125,14 @@ static int gxp_open(struct inode *inode, struct file *file)
if (IS_ERR(client))
return PTR_ERR(client);
+ client->pid = current->pid;
+
file->private_data = client;
+ mutex_lock(&gxp->client_list_lock);
+ list_add(&client->list_entry, &gxp->client_list);
+ mutex_unlock(&gxp->client_list_lock);
+
return 0;
}
@@ -98,6 +146,10 @@ static int gxp_release(struct inode *inode, struct file *file)
if (!client)
return 0;
+ mutex_lock(&client->gxp->client_list_lock);
+ list_del(&client->list_entry);
+ mutex_unlock(&client->gxp->client_list_lock);
+
/*
* TODO (b/184572070): Unmap buffers and drop mailbox responses
* belonging to the client
@@ -145,9 +197,7 @@ static int gxp_map_buffer(struct gxp_client *client,
down_read(&client->semaphore);
- if (!client->vd) {
- dev_err(gxp->dev,
- "GXP_MAP_BUFFER requires the client allocate a VIRTUAL_DEVICE\n");
+ if (!check_client_has_available_vd(client, "GXP_MAP_BUFFER")) {
ret = -ENODEV;
goto out;
}
@@ -285,11 +335,10 @@ gxp_mailbox_command_compat(struct gxp_client *client,
/* Caller must hold VIRTUAL_DEVICE wakelock */
down_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
- dev_err(gxp->dev,
- "GXP_MAILBOX_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
- up_read(&client->semaphore);
- return -ENODEV;
+ if (!check_client_has_available_vd_wakelock(client,
+ "GXP_MAILBOX_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semphore;
}
down_read(&gxp->vd_semaphore);
@@ -353,6 +402,7 @@ gxp_mailbox_command_compat(struct gxp_client *client,
out:
up_read(&gxp->vd_semaphore);
+out_unlock_client_semphore:
up_read(&client->semaphore);
return ret;
@@ -394,11 +444,10 @@ static int gxp_mailbox_command(struct gxp_client *client,
/* Caller must hold VIRTUAL_DEVICE wakelock */
down_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
- dev_err(gxp->dev,
- "GXP_MAILBOX_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
- up_read(&client->semaphore);
- return -ENODEV;
+ if (!check_client_has_available_vd_wakelock(client,
+ "GXP_MAILBOX_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semphore;
}
down_read(&gxp->vd_semaphore);
@@ -463,6 +512,7 @@ static int gxp_mailbox_command(struct gxp_client *client,
out:
up_read(&gxp->vd_semaphore);
+out_unlock_client_semphore:
up_read(&client->semaphore);
return ret;
@@ -484,9 +534,8 @@ static int gxp_mailbox_response(struct gxp_client *client,
/* Caller must hold VIRTUAL_DEVICE wakelock */
down_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
- dev_err(gxp->dev,
- "GXP_MAILBOX_RESPONSE requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ if (!check_client_has_available_vd_wakelock(client,
+ "GXP_MAILBOX_RESPONSE")) {
ret = -ENODEV;
goto out;
}
@@ -663,11 +712,10 @@ gxp_etm_trace_start_command(struct gxp_client *client,
/* Caller must hold VIRTUAL_DEVICE wakelock */
down_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
- dev_err(gxp->dev,
- "GXP_ETM_TRACE_START_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
- up_read(&client->semaphore);
- return -ENODEV;
+ if (!check_client_has_available_vd_wakelock(
+ client, "GXP_ETM_TRACE_START_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semphore;
}
down_read(&gxp->vd_semaphore);
@@ -689,6 +737,7 @@ gxp_etm_trace_start_command(struct gxp_client *client,
out:
up_read(&gxp->vd_semaphore);
+out_unlock_client_semphore:
up_read(&client->semaphore);
return ret;
@@ -708,11 +757,10 @@ static int gxp_etm_trace_sw_stop_command(struct gxp_client *client,
/* Caller must hold VIRTUAL_DEVICE wakelock */
down_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
- dev_err(gxp->dev,
- "GXP_ETM_TRACE_SW_STOP_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
- up_read(&client->semaphore);
- return -ENODEV;
+ if (!check_client_has_available_vd_wakelock(
+ client, "GXP_ETM_TRACE_SW_STOP_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semphore;
}
down_read(&gxp->vd_semaphore);
@@ -733,6 +781,7 @@ static int gxp_etm_trace_sw_stop_command(struct gxp_client *client,
out:
up_read(&gxp->vd_semaphore);
+out_unlock_client_semphore:
up_read(&client->semaphore);
return ret;
@@ -752,11 +801,10 @@ static int gxp_etm_trace_cleanup_command(struct gxp_client *client,
/* Caller must hold VIRTUAL_DEVICE wakelock */
down_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
- dev_err(gxp->dev,
- "GXP_ETM_TRACE_CLEANUP_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
- up_read(&client->semaphore);
- return -ENODEV;
+ if (!check_client_has_available_vd_wakelock(
+ client, "GXP_ETM_TRACE_CLEANUP_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semphore;
}
down_read(&gxp->vd_semaphore);
@@ -777,6 +825,7 @@ static int gxp_etm_trace_cleanup_command(struct gxp_client *client,
out:
up_read(&gxp->vd_semaphore);
+out_unlock_client_semphore:
up_read(&client->semaphore);
return ret;
@@ -802,11 +851,10 @@ gxp_etm_get_trace_info_command(struct gxp_client *client,
/* Caller must hold VIRTUAL_DEVICE wakelock */
down_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
- dev_err(gxp->dev,
- "GXP_ETM_GET_TRACE_INFO_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
- up_read(&client->semaphore);
- return -ENODEV;
+ if (!check_client_has_available_vd_wakelock(
+ client, "GXP_ETM_GET_TRACE_INFO_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semphore;
}
down_read(&gxp->vd_semaphore);
@@ -858,6 +906,7 @@ out_free_header:
out:
up_read(&gxp->vd_semaphore);
+out_unlock_client_semphore:
up_read(&client->semaphore);
return ret;
@@ -918,11 +967,9 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
down_write(&client->semaphore);
- if (!client->vd) {
- dev_err(gxp->dev,
- "GXP_MAP_TPU_MBX_QUEUE requires the client allocate a VIRTUAL_DEVICE\n");
- up_read(&client->semaphore);
- return -ENODEV;
+ if (!check_client_has_available_vd(client, "GXP_MAP_TPU_MBX_QUEUE")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semphore;
}
down_read(&gxp->vd_semaphore);
@@ -992,6 +1039,7 @@ out_free:
out:
up_read(&gxp->vd_semaphore);
+out_unlock_client_semphore:
up_write(&client->semaphore);
return ret;
@@ -1166,6 +1214,12 @@ static int gxp_acquire_wake_lock_compat(
}
client->has_block_wakelock = true;
+
+ /*
+ * Update client's PID in case the process that opened /dev/gxp
+ * is not the one that called this IOCTL.
+ */
+ client->pid = current->pid;
}
/* Acquire a VIRTUAL_DEVICE wakelock if requested */
@@ -1178,9 +1232,19 @@ static int gxp_acquire_wake_lock_compat(
}
+ if (client->vd->state == GXP_VD_UNAVAILABLE) {
+ dev_err(gxp->dev,
+ "Cannot acquire VIRTUAL_DEVICE wakelock on a broken virtual device\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
if (!client->has_vd_wakelock) {
down_write(&gxp->vd_semaphore);
- ret = gxp_vd_start(client->vd);
+ if (client->vd->state == GXP_VD_OFF)
+ ret = gxp_vd_start(client->vd);
+ else
+ ret = gxp_vd_resume(client->vd);
up_write(&gxp->vd_semaphore);
}
@@ -1286,9 +1350,19 @@ static int gxp_acquire_wake_lock(struct gxp_client *client,
}
+ if (client->vd->state == GXP_VD_UNAVAILABLE) {
+ dev_err(gxp->dev,
+ "Cannot acquire VIRTUAL_DEVICE wakelock on a broken virtual device\n");
+ ret = -ENODEV;
+ goto err_acquiring_vd_wl;
+ }
+
if (!client->has_vd_wakelock) {
down_write(&gxp->vd_semaphore);
- ret = gxp_vd_start(client->vd);
+ if (client->vd->state == GXP_VD_OFF)
+ ret = gxp_vd_start(client->vd);
+ else
+ ret = gxp_vd_resume(client->vd);
up_write(&gxp->vd_semaphore);
}
@@ -1355,9 +1429,17 @@ static int gxp_release_wake_lock(struct gxp_client *client, __u32 __user *argp)
goto out;
}
- down_write(&gxp->vd_semaphore);
- gxp_vd_stop(client->vd);
- up_write(&gxp->vd_semaphore);
+ /*
+ * Currently VD state will not be GXP_VD_UNAVAILABLE if
+ * has_vd_wakelock is true. Add this check just in case
+ * GXP_VD_UNAVAILABLE will occur in more scenarios in the
+ * future.
+ */
+ if (client->vd->state != GXP_VD_UNAVAILABLE) {
+ down_write(&gxp->vd_semaphore);
+ gxp_vd_suspend(client->vd);
+ up_write(&gxp->vd_semaphore);
+ }
client->has_vd_wakelock = false;
}
@@ -1416,9 +1498,7 @@ static int gxp_map_dmabuf(struct gxp_client *client,
down_read(&client->semaphore);
- if (!client->vd) {
- dev_err(gxp->dev,
- "GXP_MAP_DMABUF requires the client allocate a VIRTUAL_DEVICE\n");
+ if (!check_client_has_available_vd(client, "GXP_MAP_DMABUF")) {
ret = -ENODEV;
goto out;
}
@@ -1487,9 +1567,7 @@ static int gxp_register_mailbox_eventfd(
down_write(&client->semaphore);
- if (!client->vd) {
- dev_err(client->gxp->dev,
- "GXP_REGISTER_MAILBOX_EVENTFD requires the client allocate a VIRTUAL_DEVICE\n");
+ if (!check_client_has_available_vd(client, "GXP_REGISTER_MAILBOX_EVENTFD")) {
ret = -ENODEV;
goto out;
}
@@ -1912,6 +1990,9 @@ static int gxp_platform_probe(struct platform_device *pdev)
dev_err(dev, "Failed to init thermal driver\n");
dev_dbg(dev, "Probe finished\n");
+ INIT_LIST_HEAD(&gxp->client_list);
+ mutex_init(&gxp->client_list_lock);
+
return 0;
err_vd_destroy:
diff --git a/gxp-pm.c b/gxp-pm.c
index 29f6c7e..45590a5 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -17,6 +17,7 @@
#include <soc/google/exynos_pm_qos.h>
#include "gxp-bpm.h"
+#include "gxp-client.h"
#include "gxp-doorbell.h"
#include "gxp-internal.h"
#include "gxp-lpm.h"
@@ -122,12 +123,20 @@ int gxp_pm_blk_set_rate_acpm(struct gxp_dev *gxp, unsigned long rate)
static void set_cmu_noc_user_mux_state(struct gxp_dev *gxp, u32 val)
{
- writel(val << 4, gxp->cmu.vaddr + PLL_CON0_NOC_USER);
+ if (!IS_ERR_OR_NULL(gxp->cmu.vaddr))
+ writel(val << 4, gxp->cmu.vaddr + PLL_CON0_NOC_USER);
}
static void set_cmu_pll_aur_mux_state(struct gxp_dev *gxp, u32 val)
{
- writel(val << 4, gxp->cmu.vaddr + PLL_CON0_PLL_AUR);
+ if (!IS_ERR_OR_NULL(gxp->cmu.vaddr))
+ writel(val << 4, gxp->cmu.vaddr + PLL_CON0_PLL_AUR);
+}
+
+static void reset_cmu_mux_state(struct gxp_dev *gxp)
+{
+ set_cmu_pll_aur_mux_state(gxp, AUR_CMU_MUX_NORMAL);
+ set_cmu_noc_user_mux_state(gxp, AUR_CMU_MUX_NORMAL);
}
void gxp_pm_force_cmu_noc_user_mux_normal(struct gxp_dev *gxp)
@@ -211,6 +220,7 @@ int gxp_pm_blk_on(struct gxp_dev *gxp)
/* Startup TOP's PSM */
gxp_lpm_init(gxp);
+ gxp->power_mgr->blk_switch_count++;
mutex_unlock(&gxp->power_mgr->pm_lock);
@@ -231,15 +241,16 @@ int gxp_pm_blk_off(struct gxp_dev *gxp)
mutex_unlock(&gxp->power_mgr->pm_lock);
return -EBUSY;
}
+ /*
+ * Shouldn't happen unless this function has been called twice without blk_on
+ * first.
+ */
if (gxp->power_mgr->curr_state == AUR_OFF) {
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
}
- /* Reset MUX frequency from AUR_READY state */
- if (gxp->power_mgr->curr_state == AUR_READY) {
- set_cmu_pll_aur_mux_state(gxp, AUR_CMU_MUX_NORMAL);
- set_cmu_noc_user_mux_state(gxp, AUR_CMU_MUX_NORMAL);
- }
+ /* Above has checked device is powered, it's safe to access the CMU regs. */
+ reset_cmu_mux_state(gxp);
/* Shutdown TOP's PSM */
gxp_lpm_destroy(gxp);
@@ -251,6 +262,21 @@ int gxp_pm_blk_off(struct gxp_dev *gxp)
return ret;
}
+int gxp_pm_get_blk_switch_count(struct gxp_dev *gxp)
+{
+ int ret;
+
+ if (!gxp->power_mgr) {
+ dev_err(gxp->dev, "%s: No PM found\n", __func__);
+ return -ENODEV;
+ }
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ ret = gxp->power_mgr->blk_switch_count;
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+
+ return ret;
+}
+
int gxp_pm_get_blk_state(struct gxp_dev *gxp)
{
int ret;
@@ -266,7 +292,7 @@ int gxp_pm_get_blk_state(struct gxp_dev *gxp)
return ret;
}
-int gxp_pm_core_on(struct gxp_dev *gxp, uint core)
+int gxp_pm_core_on(struct gxp_dev *gxp, uint core, bool verbose)
{
int ret = 0;
@@ -285,7 +311,8 @@ int gxp_pm_core_on(struct gxp_dev *gxp, uint core)
mutex_unlock(&gxp->power_mgr->pm_lock);
- dev_notice(gxp->dev, "%s: Core %d up\n", __func__, core);
+ if (verbose)
+ dev_notice(gxp->dev, "%s: Core %d up\n", __func__, core);
return ret;
}
@@ -318,10 +345,9 @@ static int gxp_pm_req_state_locked(struct gxp_dev *gxp,
}
if (state != gxp->power_mgr->curr_state ||
aggressor_vote != gxp->power_mgr->curr_aggressor_vote) {
- if (state == AUR_OFF) {
- dev_warn(gxp->dev,
- "It is not supported to request AUR_OFF\n");
- } else {
+ if (state != AUR_OFF) {
+ mutex_lock(&gxp->power_mgr->set_acpm_state_work_lock);
+
for (i = 0; i < AUR_NUM_POWER_STATE_WORKER; i++) {
if (!gxp->power_mgr->set_acpm_state_work[i]
.using)
@@ -332,7 +358,20 @@ static int gxp_pm_req_state_locked(struct gxp_dev *gxp,
dev_warn(
gxp->dev,
"The workqueue for power state transition is full");
+ mutex_unlock(&gxp->power_mgr->pm_lock);
flush_workqueue(gxp->power_mgr->wq);
+ mutex_lock(&gxp->power_mgr->pm_lock);
+
+ /* Verify that a request is still needed */
+ if (state == gxp->power_mgr->curr_state &&
+ aggressor_vote ==
+ gxp->power_mgr->curr_aggressor_vote) {
+ mutex_unlock(
+ &gxp->power_mgr
+ ->set_acpm_state_work_lock);
+ return 0;
+ }
+
/*
* All set_acpm_state_work should be available
* now, pick the first one.
@@ -348,9 +387,12 @@ static int gxp_pm_req_state_locked(struct gxp_dev *gxp,
queue_work(
gxp->power_mgr->wq,
&gxp->power_mgr->set_acpm_state_work[i].work);
+
+ gxp->power_mgr->curr_state = state;
+ gxp->power_mgr->curr_aggressor_vote = aggressor_vote;
+
+ mutex_unlock(&gxp->power_mgr->set_acpm_state_work_lock);
}
- gxp->power_mgr->curr_state = state;
- gxp->power_mgr->curr_aggressor_vote = aggressor_vote;
}
return 0;
@@ -475,7 +517,8 @@ static void gxp_pm_req_pm_qos_async(struct work_struct *work)
mutex_unlock(&req_pm_qos_work->gxp->power_mgr->pm_lock);
}
-static int gxp_pm_req_memory_state_locked(struct gxp_dev *gxp, enum aur_memory_power_state state)
+static int gxp_pm_req_memory_state_locked(struct gxp_dev *gxp,
+ enum aur_memory_power_state state)
{
s32 int_val = 0, mif_val = 0;
uint i;
@@ -485,6 +528,8 @@ static int gxp_pm_req_memory_state_locked(struct gxp_dev *gxp, enum aur_memory_p
return -EINVAL;
}
if (state != gxp->power_mgr->curr_memory_state) {
+ mutex_lock(&gxp->power_mgr->req_pm_qos_work_lock);
+
for (i = 0; i < AUR_NUM_POWER_STATE_WORKER; i++) {
if (!gxp->power_mgr->req_pm_qos_work[i].using)
break;
@@ -494,7 +539,17 @@ static int gxp_pm_req_memory_state_locked(struct gxp_dev *gxp, enum aur_memory_p
dev_warn(
gxp->dev,
"The workqueue for memory power state transition is full");
+ mutex_unlock(&gxp->power_mgr->pm_lock);
flush_workqueue(gxp->power_mgr->wq);
+ mutex_lock(&gxp->power_mgr->pm_lock);
+
+ /* Verify that a request is still needed */
+ if (state == gxp->power_mgr->curr_memory_state) {
+ mutex_unlock(
+ &gxp->power_mgr->req_pm_qos_work_lock);
+ return 0;
+ }
+
/*
* All req_pm_qos_work should be available
* now, pick the first one.
@@ -507,7 +562,10 @@ static int gxp_pm_req_memory_state_locked(struct gxp_dev *gxp, enum aur_memory_p
gxp->power_mgr->req_pm_qos_work[i].int_val = int_val;
gxp->power_mgr->req_pm_qos_work[i].mif_val = mif_val;
gxp->power_mgr->req_pm_qos_work[i].using = true;
- queue_work(gxp->power_mgr->wq, &gxp->power_mgr->req_pm_qos_work[i].work);
+ queue_work(gxp->power_mgr->wq,
+ &gxp->power_mgr->req_pm_qos_work[i].work);
+
+ mutex_unlock(&gxp->power_mgr->req_pm_qos_work_lock);
}
return 0;
@@ -635,9 +693,12 @@ int gxp_pm_init(struct gxp_dev *gxp)
INIT_WORK(&mgr->req_pm_qos_work[i].work,
gxp_pm_req_pm_qos_async);
}
+ mutex_init(&mgr->set_acpm_state_work_lock);
+ mutex_init(&mgr->req_pm_qos_work_lock);
gxp->power_mgr->wq =
create_singlethread_workqueue("gxp_power_work_queue");
gxp->power_mgr->force_noc_mux_normal_count = 0;
+ gxp->power_mgr->blk_switch_count = 0l;
#if defined(CONFIG_GXP_CLOUDRIPPER) && !defined(CONFIG_GXP_TEST)
pm_runtime_enable(gxp->dev);
diff --git a/gxp-pm.h b/gxp-pm.h
index 647f99a..25b4792 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -96,13 +96,19 @@ struct gxp_power_manager {
int curr_memory_state;
refcount_t blk_wake_ref;
struct gxp_pm_device_ops *ops;
- struct gxp_set_acpm_state_work set_acpm_state_work[AUR_NUM_POWER_STATE_WORKER];
+ struct gxp_set_acpm_state_work
+ set_acpm_state_work[AUR_NUM_POWER_STATE_WORKER];
+ /* Serializes searching for an open worker in set_acpm_state_work[] */
+ struct mutex set_acpm_state_work_lock;
struct gxp_req_pm_qos_work req_pm_qos_work[AUR_NUM_POWER_STATE_WORKER];
+ /* Serializes searching for an open worker in req_pm_qos_work[] */
+ struct mutex req_pm_qos_work_lock;
struct workqueue_struct *wq;
/* INT/MIF requests for memory bandwidth */
struct exynos_pm_qos_request int_min;
struct exynos_pm_qos_request mif_min;
int force_noc_mux_normal_count;
+ u64 blk_switch_count;
};
/**
@@ -136,15 +142,25 @@ int gxp_pm_blk_off(struct gxp_dev *gxp);
int gxp_pm_get_blk_state(struct gxp_dev *gxp);
/**
+ * gxp_pm_get_blk_switch_count() - Get the blk switch count number
+ * @gxp: The GXP device to switch the blk
+ *
+ * Return:
+ * * count - Switch count number after the module initialization.
+ */
+int gxp_pm_get_blk_switch_count(struct gxp_dev *gxp);
+
+/**
* gxp_pm_core_on() - Turn on a core on GXP device
* @gxp: The GXP device to operate
* @core: The core ID to turn on
+ * @verbose: A boolean flag to indicate whether to print the log
*
* Return:
* * 0 - Core on process finished successfully
* * -ETIMEDOUT - Core on process timed-out.
*/
-int gxp_pm_core_on(struct gxp_dev *gxp, uint core);
+int gxp_pm_core_on(struct gxp_dev *gxp, uint core, bool verbose);
/**
* gxp_pm_core_off() - Turn off a core on GXP device
diff --git a/gxp-vd.c b/gxp-vd.c
index 9c5c805..a413091 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -11,10 +11,21 @@
#include "gxp-dma.h"
#include "gxp-firmware.h"
#include "gxp-firmware-data.h"
+#include "gxp-host-device-structs.h"
#include "gxp-internal.h"
+#include "gxp-lpm.h"
#include "gxp-mailbox.h"
+#include "gxp-notification.h"
+#include "gxp-pm.h"
#include "gxp-telemetry.h"
#include "gxp-vd.h"
+#include "gxp-wakelock.h"
+
+static inline void hold_core_in_reset(struct gxp_dev *gxp, uint core)
+{
+ gxp_write_32_core(gxp, core, GXP_REG_ETM_PWRCTL,
+ 1 << GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT);
+}
int gxp_vd_init(struct gxp_dev *gxp)
{
@@ -58,6 +69,7 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
vd->gxp = gxp;
vd->num_cores = requested_cores;
+ vd->state = GXP_VD_OFF;
vd->core_domains =
kcalloc(requested_cores, sizeof(*vd->core_domains), GFP_KERNEL);
@@ -238,6 +250,7 @@ int gxp_vd_start(struct gxp_virtual_device *vd)
ret = -EIO;
goto out_vd_stop;
}
+ vd->state = GXP_VD_RUNNING;
return ret;
@@ -253,15 +266,19 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
struct gxp_dev *gxp = vd->gxp;
uint core;
uint virt_core = 0;
+ uint lpm_state;
- /*
- * Put all cores in the VD into reset so they can not wake each other up
- */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
- gxp_write_32_core(
- gxp, core, GXP_REG_ETM_PWRCTL,
- 1 << GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT);
+ if ((vd->state == GXP_VD_OFF || vd->state == GXP_VD_RUNNING) &&
+ gxp_pm_get_blk_state(gxp) != AUR_OFF) {
+ /*
+ * Put all cores in the VD into reset so they can not wake each other up
+ */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core] == vd) {
+ lpm_state = gxp_lpm_get_state(gxp, core);
+ if (lpm_state != LPM_PG_STATE)
+ hold_core_in_reset(gxp, core);
+ }
}
}
@@ -270,7 +287,8 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
gxp_firmware_stop(gxp, vd, virt_core, core);
unmap_telemetry_buffers(gxp, vd, virt_core, core);
gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
- gxp_dma_domain_detach_device(gxp, vd, virt_core);
+ if (vd->state == GXP_VD_RUNNING)
+ gxp_dma_domain_detach_device(gxp, vd, virt_core);
gxp->core_to_vd[core] = NULL;
virt_core++;
}
@@ -282,6 +300,189 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
}
}
+/*
+ * Caller must have locked `gxp->vd_semaphore` for writing.
+ */
+void gxp_vd_suspend(struct gxp_virtual_device *vd)
+{
+ uint core;
+ struct gxp_dev *gxp = vd->gxp;
+ u32 boot_state;
+ uint failed_cores = 0;
+ uint virt_core;
+
+ lockdep_assert_held_write(&gxp->vd_semaphore);
+ if (vd->state == GXP_VD_SUSPENDED) {
+ dev_err(gxp->dev,
+ "Attempt to suspend a virtual device twice\n");
+ return;
+ }
+ gxp_pm_force_cmu_noc_user_mux_normal(gxp);
+ /*
+ * Start the suspend process for all of this VD's cores without waiting
+ * for completion.
+ */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core] == vd) {
+ if (!gxp_lpm_wait_state_ne(gxp, core, LPM_ACTIVE_STATE)) {
+ vd->state = GXP_VD_UNAVAILABLE;
+ failed_cores |= BIT(core);
+ hold_core_in_reset(gxp, core);
+ dev_err(gxp->dev, "Core %u stuck at LPM_ACTIVE_STATE", core);
+ continue;
+ }
+ /* Mark the boot mode as a suspend event */
+ gxp_write_32_core(gxp, core, GXP_REG_BOOT_MODE,
+ GXP_BOOT_MODE_REQUEST_SUSPEND);
+ /*
+ * Request a suspend event by sending a mailbox
+ * notification.
+ */
+ gxp_notification_send(gxp, core,
+ CORE_NOTIF_SUSPEND_REQUEST);
+ }
+ }
+ virt_core = 0;
+ /* Wait for all cores to complete core suspension. */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core] == vd) {
+ if (!(failed_cores & BIT(core))) {
+ if (!gxp_lpm_wait_state_eq(gxp, core,
+ LPM_PG_STATE)) {
+ boot_state = gxp_read_32_core(
+ gxp, core, GXP_REG_BOOT_MODE);
+ if (boot_state !=
+ GXP_BOOT_MODE_STATUS_SUSPEND_COMPLETED) {
+ dev_err(gxp->dev,
+ "Suspension request on core %u failed (status: %u)",
+ core, boot_state);
+ vd->state = GXP_VD_UNAVAILABLE;
+ failed_cores |= BIT(core);
+ hold_core_in_reset(gxp, core);
+ }
+ } else {
+ /* Re-set PS1 as the default low power state. */
+ gxp_lpm_enable_state(gxp, core,
+ LPM_CG_STATE);
+ }
+ }
+ gxp_dma_domain_detach_device(gxp, vd, virt_core);
+ virt_core++;
+ }
+ }
+ if (vd->state == GXP_VD_UNAVAILABLE) {
+ /* shutdown all cores if virtual device is unavailable */
+ for (core = 0; core < GXP_NUM_CORES; core++)
+ if (gxp->core_to_vd[core] == vd)
+ gxp_pm_core_off(gxp, core);
+ } else {
+ vd->blk_switch_count_when_suspended =
+ gxp_pm_get_blk_switch_count(gxp);
+ vd->state = GXP_VD_SUSPENDED;
+ }
+ gxp_pm_check_cmu_noc_user_mux(gxp);
+}
+
+/*
+ * Caller must have locked `gxp->vd_semaphore` for writing.
+ */
+int gxp_vd_resume(struct gxp_virtual_device *vd)
+{
+ int ret = 0;
+ uint core;
+ uint virt_core = 0;
+ uint timeout;
+ u32 boot_state;
+ struct gxp_dev *gxp = vd->gxp;
+ u64 curr_blk_switch_count;
+ uint failed_cores = 0;
+
+ lockdep_assert_held_write(&gxp->vd_semaphore);
+ if (vd->state != GXP_VD_SUSPENDED) {
+ dev_err(gxp->dev,
+ "Attempt to resume a virtual device which was not suspended\n");
+ return -EBUSY;
+ }
+ gxp_pm_force_cmu_noc_user_mux_normal(gxp);
+ curr_blk_switch_count = gxp_pm_get_blk_switch_count(gxp);
+ /*
+ * Start the resume process for all of this VD's cores without waiting
+ * for completion.
+ */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core] == vd) {
+ gxp_dma_domain_attach_device(gxp, vd, virt_core, core);
+ /*
+ * The comparison is to check if blk_switch_count is
+ * changed. If it's changed, it means the block is rebooted and
+ * therefore we need to set up the hardware again.
+ */
+ if (vd->blk_switch_count_when_suspended != curr_blk_switch_count) {
+ ret = gxp_firmware_setup_hw_after_block_off(
+ gxp, core, false);
+ if (ret) {
+ vd->state = GXP_VD_UNAVAILABLE;
+ failed_cores |= BIT(core);
+ virt_core++;
+ dev_err(gxp->dev, "Failed to power up core %u\n", core);
+ continue;
+ }
+ }
+ /* Mark this as a resume power-up event. */
+ gxp_write_32_core(gxp, core, GXP_REG_BOOT_MODE,
+ GXP_BOOT_MODE_REQUEST_RESUME);
+ /*
+ * Power on the core by explicitly switching its PSM to
+ * PS0 (LPM_ACTIVE_STATE).
+ */
+ gxp_lpm_set_state(gxp, core, LPM_ACTIVE_STATE);
+ virt_core++;
+ }
+ }
+ /* Wait for all cores to complete core resumption. */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core] == vd) {
+ if (!(failed_cores & BIT(core))) {
+ /* in microseconds */
+ timeout = 1000000;
+ while (--timeout) {
+ boot_state = gxp_read_32_core(
+ gxp, core, GXP_REG_BOOT_MODE);
+ if (boot_state ==
+ GXP_BOOT_MODE_STATUS_RESUME_COMPLETED)
+ break;
+ udelay(1 * GXP_TIME_DELAY_FACTOR);
+ }
+ if (timeout == 0 &&
+ boot_state !=
+ GXP_BOOT_MODE_STATUS_RESUME_COMPLETED) {
+ dev_err(gxp->dev,
+ "Resume request on core %u failed (status: %u)",
+ core, boot_state);
+ ret = -EBUSY;
+ vd->state = GXP_VD_UNAVAILABLE;
+ failed_cores |= BIT(core);
+ }
+ }
+ }
+ }
+ if (vd->state == GXP_VD_UNAVAILABLE) {
+ /* shutdown all cores if virtual device is unavailable */
+ virt_core = 0;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core] == vd) {
+ gxp_dma_domain_detach_device(gxp, vd, virt_core);
+ gxp_pm_core_off(gxp, core);
+ virt_core++;
+ }
+ }
+ } else {
+ vd->state = GXP_VD_RUNNING;
+ }
+ gxp_pm_check_cmu_noc_user_mux(gxp);
+ return ret;
+}
+
/* Caller must have locked `gxp->vd_semaphore` for reading */
int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core)
{
diff --git a/gxp-vd.h b/gxp-vd.h
index 2193d77..e973638 100644
--- a/gxp-vd.h
+++ b/gxp-vd.h
@@ -24,12 +24,32 @@ struct mailbox_resp_queue {
wait_queue_head_t waitq;
};
+enum gxp_virtual_device_state {
+ GXP_VD_OFF = 0,
+ GXP_VD_RUNNING = 1,
+ GXP_VD_SUSPENDED = 2,
+ /*
+ * If the virtual device is in the unavailable state, it won't be changed
+ * back no matter what we do.
+ * Note: this state will only be set on suspend/resume failure.
+ */
+ GXP_VD_UNAVAILABLE = 3,
+};
+
struct gxp_virtual_device {
struct gxp_dev *gxp;
uint num_cores;
void *fw_app;
struct iommu_domain **core_domains;
struct mailbox_resp_queue *mailbox_resp_queues;
+ enum gxp_virtual_device_state state;
+ /*
+ * Record the gxp->power_mgr->blk_switch_count when the vd was
+ * suspended. Use this information to know whether the block has been
+ * restarted and therefore we need to re-program CSRs in the resume
+ * process.
+ */
+ u64 blk_switch_count_when_suspended;
};
/*
@@ -116,4 +136,24 @@ uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_virtual_device *vd,
*/
int gxp_vd_phys_core_to_virt_core(struct gxp_virtual_device *vd, u16 phys_core);
+/**
+ * gxp_vd_suspend() - Suspend a running virtual device
+ * @vd: The virtual device to suspend
+ *
+ * The caller must have locked gxp->vd_semaphore for writing.
+ */
+void gxp_vd_suspend(struct gxp_virtual_device *vd);
+
+/**
+ * gxp_vd_resume() - Resume a suspended virtual device
+ * @vd: The virtual device to resume
+ *
+ * The caller must have locked gxp->vd_semaphore for writing.
+ *
+ * Return:
+ * * 0 - Success
+ * * -ETIMEDOUT - Fail to power on physical cores
+ */
+int gxp_vd_resume(struct gxp_virtual_device *vd);
+
#endif /* __GXP_VD_H__ */
diff --git a/gxp-wakelock.c b/gxp-wakelock.c
index 9344f21..7f0d392 100644
--- a/gxp-wakelock.c
+++ b/gxp-wakelock.c
@@ -5,6 +5,7 @@
* Copyright (C) 2022 Google LLC
*/
+#include "gxp-client.h"
#include "gxp-dma.h"
#include "gxp-pm.h"
#include "gxp-wakelock.h"
@@ -92,13 +93,48 @@ int gxp_wakelock_suspend(struct gxp_dev *gxp)
{
struct gxp_wakelock_manager *mgr = gxp->wakelock_mgr;
int ret;
+ struct gxp_client *client;
- mutex_lock(&mgr->lock);
+ if (!mutex_trylock(&mgr->lock))
+ return -EAGAIN;
/* Can't suspend if there are any active clients */
mgr->suspended = mgr->count == 0;
ret = mgr->suspended ? 0 : -EAGAIN;
+ /* Suspend successful. Can exit now. */
+ if (!ret)
+ goto out;
+
+ /* Log clients currently holding a wakelock */
+ if (!mutex_trylock(&gxp->client_list_lock)) {
+ dev_warn_ratelimited(
+ gxp->dev,
+ "Unable to get client list lock on suspend failure\n");
+ goto out;
+ }
+
+ list_for_each_entry(client, &gxp->client_list, list_entry) {
+ if (!down_read_trylock(&client->semaphore)) {
+ dev_warn_ratelimited(
+ gxp->dev,
+ "Unable to acquire client lock (pid=%d)\n",
+ client->pid);
+ continue;
+ }
+
+ if (client->has_block_wakelock)
+ dev_warn_ratelimited(
+ gxp->dev,
+ "Cannot suspend with client holding wakelock (pid=%d)\n",
+ client->pid);
+
+ up_read(&client->semaphore);
+ }
+
+ mutex_unlock(&gxp->client_list_lock);
+
+out:
mutex_unlock(&mgr->lock);
return ret;
diff --git a/gxp.h b/gxp.h
index 9f08925..c320878 100644
--- a/gxp.h
+++ b/gxp.h
@@ -12,7 +12,7 @@
/* Interface Version */
#define GXP_INTERFACE_VERSION_MAJOR 1
-#define GXP_INTERFACE_VERSION_MINOR 0
+#define GXP_INTERFACE_VERSION_MINOR 2
#define GXP_INTERFACE_VERSION_BUILD 0
/*