summaryrefslogtreecommitdiff
path: root/gxp-mailbox.c
diff options
context:
space:
mode:
Diffstat (limited to 'gxp-mailbox.c')
-rw-r--r--gxp-mailbox.c1015
1 files changed, 299 insertions, 716 deletions
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index aa28fc0..0e0b365 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -14,344 +14,28 @@
#include <linux/slab.h>
#include <uapi/linux/sched/types.h>
+#include "gxp-config.h" /* GXP_USE_LEGACY_MAILBOX */
#include "gxp-dma.h"
#include "gxp-internal.h"
#include "gxp-mailbox.h"
#include "gxp-mailbox-driver.h"
#include "gxp-pm.h"
+#include "gxp.h"
+
+#if GXP_USE_LEGACY_MAILBOX
+#include "gxp-mailbox-impl.h"
+#else
+#include <gcip/gcip-mailbox.h>
+#include <gcip/gcip-kci.h>
+
+#include "gxp-kci.h"
+#include "gxp-mcu-telemetry.h"
+#endif
/* Timeout of 1s by default */
int gxp_mbx_timeout = 1000;
module_param_named(mbx_timeout, gxp_mbx_timeout, int, 0660);
-/* Utilities of circular queue operations */
-
-#define CIRCULAR_QUEUE_WRAP_BIT BIT(15)
-#define CIRCULAR_QUEUE_INDEX_MASK (CIRCULAR_QUEUE_WRAP_BIT - 1)
-#define CIRCULAR_QUEUE_WRAPPED(idx) ((idx) & CIRCULAR_QUEUE_WRAP_BIT)
-#define CIRCULAR_QUEUE_REAL_INDEX(idx) ((idx) & CIRCULAR_QUEUE_INDEX_MASK)
-
-#define MBOX_CMD_QUEUE_NUM_ENTRIES 1024
-#define MBOX_CMD_QUEUE_SIZE \
- (sizeof(struct gxp_command) * MBOX_CMD_QUEUE_NUM_ENTRIES)
-
-#define MBOX_RESP_QUEUE_NUM_ENTRIES 1024
-#define MBOX_RESP_QUEUE_SIZE \
- (sizeof(struct gxp_response) * MBOX_RESP_QUEUE_NUM_ENTRIES)
-
-/*
- * Returns the number of elements in a circular queue given its @head, @tail,
- * and @queue_size.
- */
-static inline u32 circular_queue_count(u32 head, u32 tail, u32 queue_size)
-{
- if (CIRCULAR_QUEUE_WRAPPED(tail) != CIRCULAR_QUEUE_WRAPPED(head))
- return queue_size - CIRCULAR_QUEUE_REAL_INDEX(head) +
- CIRCULAR_QUEUE_REAL_INDEX(tail);
- else
- return tail - head;
-}
-
-/* Increases @index of a circular queue by @inc. */
-static inline u32 circular_queue_inc(u32 index, u32 inc, u32 queue_size)
-{
- u32 new_index = CIRCULAR_QUEUE_REAL_INDEX(index) + inc;
-
- if (new_index >= queue_size)
- return (index + inc - queue_size) ^ CIRCULAR_QUEUE_WRAP_BIT;
- else
- return index + inc;
-}
-
-/* Sets mailbox->cmd_queue_tail and corresponding CSR on device. */
-static void gxp_mailbox_set_cmd_queue_tail(struct gxp_mailbox *mailbox,
- u32 value)
-{
- mailbox->cmd_queue_tail = value;
- gxp_mailbox_write_cmd_queue_tail(mailbox, value);
-}
-
-/* Sets mailbox->resp_queue_head and corresponding CSR on device. */
-static void gxp_mailbox_set_resp_queue_head(struct gxp_mailbox *mailbox,
- u32 value)
-{
- mailbox->resp_queue_head = value;
- gxp_mailbox_write_resp_queue_head(mailbox, value);
-}
-
-/*
- * Increases the command queue tail by @inc.
- *
- * The queue uses the mirrored circular buffer arrangement. Each index (head and
- * tail) has a wrap bit, represented by the constant CIRCULAR_QUEUE_WRAP_BIT.
- * Whenever an index is increased and will exceed the end of the queue, the wrap
- * bit is xor-ed.
- *
- * This method will update both mailbox->cmd_queue_tail and CSR on device.
- *
- * Returns 0 on success.
- * If command queue tail will exceed command queue head after adding @inc,
- * -EBUSY is returned and all fields remain unchanged. The caller should
- * handle this case and implement a mechanism to wait until the consumer
- * consumes commands.
- *
- * Caller must hold cmd_queue_lock.
- */
-static int gxp_mailbox_inc_cmd_queue_tail(struct gxp_mailbox *mailbox, u32 inc)
-{
- u32 head;
- u32 remain_size;
- u32 new_tail;
-
- lockdep_assert_held(&mailbox->cmd_queue_lock);
-
- if (inc > mailbox->cmd_queue_size)
- return -EINVAL;
-
- head = gxp_mailbox_read_cmd_queue_head(mailbox);
- remain_size = mailbox->cmd_queue_size -
- circular_queue_count(head, mailbox->cmd_queue_tail,
- mailbox->cmd_queue_size);
- /* no enough space left */
- if (inc > remain_size)
- return -EBUSY;
-
- new_tail = circular_queue_inc(mailbox->cmd_queue_tail, inc,
- mailbox->cmd_queue_size);
- gxp_mailbox_set_cmd_queue_tail(mailbox, new_tail);
- return 0;
-}
-
-/*
- * Increases the response queue head by @inc.
- *
- * The queue uses the mirrored circular buffer arrangement. Each index (head and
- * tail) has a wrap bit, represented by the constant CIRCULAR_QUEUE_WRAP_BIT.
- * Whenever an index is increased and will exceed the end of the queue, the wrap
- * bit is xor-ed.
- *
- * This method will update both mailbox->resp_queue_head and CSR on device.
- *
- * Returns 0 on success.
- * -EINVAL is returned if the queue head will exceed tail of queue, and no
- * fields or CSR is updated in this case.
- *
- * Caller must hold resp_queue_lock.
- */
-static int gxp_mailbox_inc_resp_queue_head(struct gxp_mailbox *mailbox, u32 inc)
-{
- u32 tail;
- u32 size;
- u32 new_head;
-
- lockdep_assert_held(&mailbox->resp_queue_lock);
-
- if (inc > mailbox->resp_queue_size)
- return -EINVAL;
-
- tail = gxp_mailbox_read_resp_queue_tail(mailbox);
- size = circular_queue_count(mailbox->resp_queue_head, tail,
- mailbox->resp_queue_size);
- if (inc > size)
- return -EINVAL;
- new_head = circular_queue_inc(mailbox->resp_queue_head, inc,
- mailbox->resp_queue_size);
- gxp_mailbox_set_resp_queue_head(mailbox, new_head);
-
- return 0;
-}
-
-struct gxp_mailbox_manager *gxp_mailbox_create_manager(struct gxp_dev *gxp,
- uint num_cores)
-{
- struct gxp_mailbox_manager *mgr;
-
- mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return ERR_PTR(-ENOMEM);
-
- mgr->gxp = gxp;
- mgr->num_cores = num_cores;
- mgr->get_mailbox_csr_base = gxp_mailbox_get_csr_base;
- mgr->get_mailbox_data_base = gxp_mailbox_get_data_base;
-
- mgr->mailboxes = devm_kcalloc(gxp->dev, mgr->num_cores,
- sizeof(*mgr->mailboxes), GFP_KERNEL);
- if (!mgr->mailboxes)
- return ERR_PTR(-ENOMEM);
-
- return mgr;
-}
-
-/*
- * Pops the wait_list until the sequence number of @resp is found, and copies
- * @resp to the found entry.
- *
- * Entries in wait_list should have sequence number in increasing order, but
- * the responses arriving and being handled may be out-of-order.
- *
- * Iterate over the wait_list, comparing #cur->resp->seq with @resp->seq:
- * 1. #cur->resp->seq > @resp->seq:
- * - Nothing to do, either @resp is invalid or its command timed out.
- * - We're done.
- * 2. #cur->resp->seq == @resp->seq:
- * - Copy @resp, pop the head.
- * - If #cur->resp has a destination queue, push it to that queue
- * - We're done.
- * 3. #cur->resp->seq < @resp->seq:
- * - @resp has arrived out of sequence order.
- * - Leave #cur->resp in the wait_list.
- * - Keep iterating unless the list is exhausted.
- */
-static void gxp_mailbox_handle_response(struct gxp_mailbox *mailbox,
- const struct gxp_response *resp)
-{
- struct gxp_mailbox_wait_list *cur, *nxt;
- struct gxp_async_response *async_resp;
- unsigned long flags;
-
- mutex_lock(&mailbox->wait_list_lock);
-
- list_for_each_entry_safe(cur, nxt, &mailbox->wait_list, list) {
- if (cur->resp->seq > resp->seq) {
- /*
- * This response has already timed out and been removed
- * from the wait list (or this is an invalid response).
- * Drop it.
- */
- break;
- }
- if (cur->resp->seq == resp->seq) {
- memcpy(cur->resp, resp, sizeof(*resp));
- list_del(&cur->list);
- if (cur->is_async) {
- async_resp =
- container_of(cur->resp,
- struct gxp_async_response,
- resp);
-
- cancel_delayed_work(&async_resp->timeout_work);
- gxp_pm_update_requested_power_states(
- async_resp->mailbox->gxp,
- async_resp->gxp_power_state,
- async_resp->requested_low_clkmux,
- AUR_OFF, false,
- async_resp->memory_power_state,
- AUR_MEM_UNDEFINED);
-
- spin_lock_irqsave(async_resp->dest_queue_lock,
- flags);
-
- list_add_tail(&async_resp->list_entry,
- async_resp->dest_queue);
- /*
- * Marking the dest_queue as NULL indicates the
- * response was handled in case its timeout
- * handler fired between acquiring the
- * wait_list_lock and cancelling the timeout.
- */
- async_resp->dest_queue = NULL;
-
- /*
- * Don't release the dest_queue_lock until both
- * any eventfd has been signaled and any waiting
- * thread has been woken. Otherwise one thread
- * might consume and free the response before
- * this function is done with it.
- */
- if (async_resp->eventfd) {
- gxp_eventfd_signal(async_resp->eventfd);
- gxp_eventfd_put(async_resp->eventfd);
- }
-
- wake_up(async_resp->dest_queue_waitq);
-
- spin_unlock_irqrestore(
- async_resp->dest_queue_lock, flags);
-
- }
- kfree(cur);
- break;
- }
- }
-
- mutex_unlock(&mailbox->wait_list_lock);
-}
-
-/*
- * Fetches elements in the response queue.
- *
- * Returns the pointer of fetched response elements.
- * @total_ptr will be the number of elements fetched.
- *
- * Returns -ENOMEM if failed on memory allocation.
- * Returns NULL if the response queue is empty.
- */
-static struct gxp_response *
-gxp_mailbox_fetch_responses(struct gxp_mailbox *mailbox, u32 *total_ptr)
-{
- u32 head;
- u32 tail;
- u32 count;
- u32 i;
- u32 j;
- u32 total = 0;
- const u32 size = mailbox->resp_queue_size;
- const struct gxp_response *queue = mailbox->resp_queue;
- struct gxp_response *ret = NULL;
- struct gxp_response *prev_ptr = NULL;
-
- mutex_lock(&mailbox->resp_queue_lock);
-
- head = mailbox->resp_queue_head;
- /* loop until our head equals to CSR tail */
- while (1) {
- tail = gxp_mailbox_read_resp_queue_tail(mailbox);
- count = circular_queue_count(head, tail, size);
- if (count == 0)
- break;
-
- prev_ptr = ret;
- ret = krealloc(prev_ptr, (total + count) * sizeof(*queue),
- GFP_KERNEL);
- /*
- * Out-of-memory, we can return the previously fetched responses
- * if any, or ENOMEM otherwise.
- */
- if (!ret) {
- if (!prev_ptr)
- ret = ERR_PTR(-ENOMEM);
- else
- ret = prev_ptr;
- break;
- }
- /* copy responses */
- j = CIRCULAR_QUEUE_REAL_INDEX(head);
- for (i = 0; i < count; i++) {
- memcpy(&ret[total], &queue[j], sizeof(*queue));
- ret[total].status = GXP_RESP_OK;
- j = (j + 1) % size;
- total++;
- }
- head = circular_queue_inc(head, count, size);
- }
- gxp_mailbox_inc_resp_queue_head(mailbox, total);
-
- mutex_unlock(&mailbox->resp_queue_lock);
- /*
- * Now that the response queue has been drained, send an interrupt
- * to the device in case firmware was waiting for us to consume
- * responses.
- */
- if (total == size) {
- /* TODO(b/190868834) define interrupt bits */
- gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
- }
-
- *total_ptr = total;
- return ret;
-}
-
/*
* Fetches and handles responses, then wakes up threads that are waiting for a
* response.
@@ -364,26 +48,21 @@ static void gxp_mailbox_consume_responses_work(struct kthread_work *work)
{
struct gxp_mailbox *mailbox =
container_of(work, struct gxp_mailbox, response_work);
- struct gxp_response *responses;
- u32 i;
- u32 count = 0;
-
- /* fetch responses and bump RESP_QUEUE_HEAD */
- responses = gxp_mailbox_fetch_responses(mailbox, &count);
- if (IS_ERR(responses)) {
- dev_err(mailbox->gxp->dev,
- "GXP Mailbox failed on fetching responses: %ld",
- PTR_ERR(responses));
- return;
- }
- for (i = 0; i < count; i++)
- gxp_mailbox_handle_response(mailbox, &responses[i]);
- /*
- * Responses handled, wake up threads that are waiting for a response.
- */
- wake_up(&mailbox->wait_list_waitq);
- kfree(responses);
+#if GXP_USE_LEGACY_MAILBOX
+ gxp_mailbox_consume_responses(mailbox);
+#else
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ gcip_mailbox_consume_responses_work(mailbox->mbx_impl.gcip_mbx);
+ break;
+ case GXP_MBOX_TYPE_KCI:
+ gcip_kci_handle_irq(mailbox->mbx_impl.gcip_kci);
+ gxp_mcu_telemetry_irq_handler(
+ ((struct gxp_kci *)mailbox->data)->mcu);
+ break;
+ }
+#endif
}
/*
@@ -398,8 +77,8 @@ static inline void gxp_mailbox_handle_irq(struct gxp_mailbox *mailbox)
/* Priority level for realtime worker threads */
#define GXP_RT_THREAD_PRIORITY 2
-static struct task_struct *
-create_response_rt_thread(struct device *dev, void *data, int core_id)
+static struct task_struct *create_response_rt_thread(struct device *dev,
+ void *data, int core_id)
{
static const struct sched_param param = {
.sched_priority = GXP_RT_THREAD_PRIORITY,
@@ -420,66 +99,72 @@ create_response_rt_thread(struct device *dev, void *data, int core_id)
return task;
}
+static int gxp_mailbox_set_ops(struct gxp_mailbox *mailbox,
+ struct gxp_mailbox_ops *ops)
+{
+ if (!ops) {
+ dev_err(mailbox->gxp->dev, "Incomplete gxp_mailbox ops.\n");
+ return -EINVAL;
+ }
+
+ mailbox->ops = ops;
+
+ return 0;
+}
+
+static inline void gxp_mailbox_set_data(struct gxp_mailbox *mailbox, void *data)
+{
+ mailbox->data = data;
+}
+
static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd,
- uint virt_core, u8 core_id)
+ uint virt_core, u8 core_id,
+ const struct gxp_mailbox_args *args)
{
struct gxp_mailbox *mailbox;
+ int ret;
+
+ if (!args) {
+ dev_err(mgr->gxp->dev, "Incomplete gxp_mailbox args.\n");
+ ret = -EINVAL;
+ goto err_args;
+ }
mailbox = kzalloc(sizeof(*mailbox), GFP_KERNEL);
- if (!mailbox)
+ if (!mailbox) {
+ ret = -ENOMEM;
goto err_mailbox;
+ }
mailbox->core_id = core_id;
mailbox->gxp = mgr->gxp;
mailbox->csr_reg_base = mgr->get_mailbox_csr_base(mgr->gxp, core_id);
mailbox->data_reg_base = mgr->get_mailbox_data_base(mgr->gxp, core_id);
+ mailbox->type = args->type;
+ mailbox->queue_wrap_bit = args->queue_wrap_bit;
+ mailbox->cmd_elem_size = args->cmd_elem_size;
+ mailbox->resp_elem_size = args->resp_elem_size;
+ mailbox->ignore_seq_order = args->ignore_seq_order;
+ gxp_mailbox_set_data(mailbox, args->data);
+
+ ret = gxp_mailbox_set_ops(mailbox, args->ops);
+ if (ret)
+ goto err_set_ops;
- /* Allocate and initialize the command queue */
- mailbox->cmd_queue = (struct gxp_command *)gxp_dma_alloc_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_command) * MBOX_CMD_QUEUE_NUM_ENTRIES,
- &(mailbox->cmd_queue_device_addr), GFP_KERNEL, 0);
- if (!mailbox->cmd_queue)
- goto err_cmd_queue;
+ ret = mailbox->ops->allocate_resources(mailbox, vd, virt_core);
+ if (ret)
+ goto err_allocate_resources;
- mailbox->cmd_queue_size = MBOX_CMD_QUEUE_NUM_ENTRIES;
- mailbox->cmd_queue_tail = 0;
mutex_init(&mailbox->cmd_queue_lock);
-
- /* Allocate and initialize the response queue */
- mailbox->resp_queue = (struct gxp_response *)gxp_dma_alloc_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_response) * MBOX_RESP_QUEUE_NUM_ENTRIES,
- &(mailbox->resp_queue_device_addr), GFP_KERNEL, 0);
- if (!mailbox->resp_queue)
- goto err_resp_queue;
-
- mailbox->resp_queue_size = MBOX_RESP_QUEUE_NUM_ENTRIES;
- mailbox->resp_queue_head = 0;
mutex_init(&mailbox->resp_queue_lock);
-
- /* Allocate and initialize the mailbox descriptor */
- mailbox->descriptor =
- (struct gxp_mailbox_descriptor *)gxp_dma_alloc_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_mailbox_descriptor),
- &(mailbox->descriptor_device_addr), GFP_KERNEL, 0);
- if (!mailbox->descriptor)
- goto err_descriptor;
-
- mailbox->descriptor->cmd_queue_device_addr =
- mailbox->cmd_queue_device_addr;
- mailbox->descriptor->resp_queue_device_addr =
- mailbox->resp_queue_device_addr;
- mailbox->descriptor->cmd_queue_size = mailbox->cmd_queue_size;
- mailbox->descriptor->resp_queue_size = mailbox->resp_queue_size;
-
kthread_init_worker(&mailbox->response_worker);
mailbox->response_thread = create_response_rt_thread(
mailbox->gxp->dev, &mailbox->response_worker, core_id);
- if (IS_ERR(mailbox->response_thread))
+ if (IS_ERR(mailbox->response_thread)) {
+ ret = -ENOMEM;
goto err_thread;
+ }
/* Initialize driver before interacting with its registers */
gxp_mailbox_driver_init(mailbox);
@@ -487,73 +172,236 @@ static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
return mailbox;
err_thread:
- gxp_dma_free_coherent(mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_mailbox_descriptor),
- mailbox->descriptor,
- mailbox->descriptor_device_addr);
-err_descriptor:
- gxp_dma_free_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_response) * mailbox->resp_queue_size,
- mailbox->resp_queue, mailbox->resp_queue_device_addr);
-err_resp_queue:
- gxp_dma_free_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_command) * mailbox->cmd_queue_size,
- mailbox->cmd_queue, mailbox->cmd_queue_device_addr);
-err_cmd_queue:
+ mailbox->ops->release_resources(mailbox, vd, virt_core);
+err_allocate_resources:
+err_set_ops:
kfree(mailbox);
err_mailbox:
- return ERR_PTR(-ENOMEM);
+err_args:
+ return ERR_PTR(ret);
+}
+
+static void release_mailbox(struct gxp_mailbox *mailbox,
+ struct gxp_virtual_device *vd, uint virt_core)
+{
+ if (IS_GXP_TEST && !mailbox)
+ return;
+ mailbox->ops->release_resources(mailbox, vd, virt_core);
+ kthread_flush_worker(&mailbox->response_worker);
+ if (mailbox->response_thread)
+ kthread_stop(mailbox->response_thread);
+ kfree(mailbox);
+}
+
+#if !GXP_USE_LEGACY_MAILBOX
+static int init_gcip_mailbox(struct gxp_mailbox *mailbox)
+{
+ const struct gcip_mailbox_args args = {
+ .dev = mailbox->gxp->dev,
+ .queue_wrap_bit = mailbox->queue_wrap_bit,
+ .cmd_queue = mailbox->cmd_queue_buf.vaddr,
+ .cmd_elem_size = mailbox->cmd_elem_size,
+ .resp_queue = mailbox->resp_queue_buf.vaddr,
+ .resp_elem_size = mailbox->resp_elem_size,
+ .timeout = MAILBOX_TIMEOUT,
+ .ops = mailbox->ops->gcip_ops.mbx,
+ .data = mailbox,
+ .ignore_seq_order = mailbox->ignore_seq_order,
+ };
+ struct gcip_mailbox *gcip_mbx;
+ int ret;
+
+ gcip_mbx = kzalloc(sizeof(*gcip_mbx), GFP_KERNEL);
+ if (!gcip_mbx)
+ return -ENOMEM;
+
+ /* Initialize gcip_mailbox */
+ ret = gcip_mailbox_init(gcip_mbx, &args);
+ if (ret) {
+ kfree(gcip_mbx);
+ return ret;
+ }
+
+ mailbox->mbx_impl.gcip_mbx = gcip_mbx;
+
+ return 0;
+}
+
+static void release_gcip_mailbox(struct gxp_mailbox *mailbox)
+{
+ struct gcip_mailbox *gcip_mbx = mailbox->mbx_impl.gcip_mbx;
+
+ if (gcip_mbx == NULL)
+ return;
+
+ gcip_mailbox_release(gcip_mbx);
+ kfree(gcip_mbx);
+ mailbox->mbx_impl.gcip_mbx = NULL;
+}
+
+static int init_gcip_kci(struct gxp_mailbox *mailbox)
+{
+ const struct gcip_kci_args args = {
+ .dev = mailbox->gxp->dev,
+ .cmd_queue = mailbox->cmd_queue_buf.vaddr,
+ .resp_queue = mailbox->resp_queue_buf.vaddr,
+ .queue_wrap_bit = mailbox->queue_wrap_bit,
+ .rkci_buffer_size = GXP_REVERSE_KCI_BUFFER_SIZE,
+ .timeout = GXP_KCI_TIMEOUT,
+ .ops = mailbox->ops->gcip_ops.kci,
+ .data = mailbox,
+ };
+ struct gcip_kci *gcip_kci;
+ int ret;
+
+ gcip_kci = kzalloc(sizeof(*gcip_kci), GFP_KERNEL);
+ if (!gcip_kci)
+ return -ENOMEM;
+
+ ret = gcip_kci_init(gcip_kci, &args);
+ if (ret) {
+ kfree(gcip_kci);
+ return ret;
+ }
+
+ mailbox->mbx_impl.gcip_kci = gcip_kci;
+
+ return 0;
+}
+
+static void release_gcip_kci(struct gxp_mailbox *mailbox)
+{
+ struct gcip_kci *gcip_kci = mailbox->mbx_impl.gcip_kci;
+
+ if (gcip_kci == NULL)
+ return;
+
+ gcip_kci_cancel_work_queues(gcip_kci);
+ gcip_kci_release(gcip_kci);
+ kfree(gcip_kci);
+ mailbox->mbx_impl.gcip_kci = NULL;
+}
+#endif /* !GXP_USE_LEGACY_MAILBOX */
+
+/*
+ * Initializes @mailbox->mbx_impl to start waiting and consuming responses.
+ * This will initializes GCIP mailbox modules according to the type of @mailbox.
+ * - GENERAL: will initialize @mailbox->mbx_impl.gcip_mbx
+ * - KCI: will initialize @mailbox->mbx_impl.kci_mbx
+ *
+ * Note: On `GXP_USE_LEGACY_MAILBOX`, it will initialize @mailbox itself as its
+ * queuing logic is implemented in `gxp-mailbox-impl.c`.
+ */
+static int init_mailbox_impl(struct gxp_mailbox *mailbox)
+{
+ int ret;
+
+#if GXP_USE_LEGACY_MAILBOX
+ if (mailbox->type != GXP_MBOX_TYPE_GENERAL)
+ return -EOPNOTSUPP;
+
+ ret = gxp_mailbox_init_consume_responses(mailbox);
+ if (ret)
+ return ret;
+#else
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ ret = init_gcip_mailbox(mailbox);
+ if (ret)
+ return ret;
+ break;
+ case GXP_MBOX_TYPE_KCI:
+ ret = init_gcip_kci(mailbox);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+#endif /* GXP_USE_LEGACY_MAILBOX */
+
+ return 0;
}
-static void enable_mailbox(struct gxp_mailbox *mailbox)
+static int enable_mailbox(struct gxp_mailbox *mailbox)
{
- gxp_mailbox_write_descriptor(mailbox, mailbox->descriptor_device_addr);
+ int ret;
+
+ gxp_mailbox_write_descriptor(mailbox, mailbox->descriptor_buf.dsp_addr);
gxp_mailbox_write_cmd_queue_head(mailbox, 0);
gxp_mailbox_write_cmd_queue_tail(mailbox, 0);
gxp_mailbox_write_resp_queue_head(mailbox, 0);
gxp_mailbox_write_resp_queue_tail(mailbox, 0);
+ ret = init_mailbox_impl(mailbox);
+ if (ret)
+ return ret;
+
mailbox->handle_irq = gxp_mailbox_handle_irq;
- mailbox->cur_seq = 0;
- init_waitqueue_head(&mailbox->wait_list_waitq);
- INIT_LIST_HEAD(&mailbox->wait_list);
mutex_init(&mailbox->wait_list_lock);
- kthread_init_work(&mailbox->response_work, gxp_mailbox_consume_responses_work);
+ kthread_init_work(&mailbox->response_work,
+ gxp_mailbox_consume_responses_work);
/* Only enable interrupts once everything has been setup */
gxp_mailbox_driver_enable_interrupts(mailbox);
/* Enable the mailbox */
gxp_mailbox_write_status(mailbox, 1);
- /* TODO(b/190868834) define interrupt bits */
- gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
+
+ return 0;
}
struct gxp_mailbox *gxp_mailbox_alloc(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd,
- uint virt_core, u8 core_id)
+ uint virt_core, u8 core_id,
+ const struct gxp_mailbox_args *args)
{
struct gxp_mailbox *mailbox;
+ int ret;
- mailbox = create_mailbox(mgr, vd, virt_core, core_id);
+ mailbox = create_mailbox(mgr, vd, virt_core, core_id, args);
if (IS_ERR(mailbox))
return mailbox;
- enable_mailbox(mailbox);
+ ret = enable_mailbox(mailbox);
+ if (ret) {
+ release_mailbox(mailbox, vd, virt_core);
+ return ERR_PTR(ret);
+ }
return mailbox;
}
+/*
+ * Releases the @mailbox->mbx_impl to flush all pending responses in the wait
+ * list.
+ * This releases GCIP mailbox modules according to the type of @mailbox.
+ * - GENERAL: will release @mailbox->mbx_impl.gcip_mbx
+ * - KCI: will release @mailbox->mbx_impl.kci_mbx
+ *
+ * Note: On `GXP_USE_LEGACY_MAILBOX`, it will release @mailbox itself as its
+ * queuing logic is implemented in `gxp-mailbox-impl.c`.
+ */
+static void release_mailbox_impl(struct gxp_mailbox *mailbox)
+{
+#if GXP_USE_LEGACY_MAILBOX
+ gxp_mailbox_release_consume_responses(mailbox);
+#else
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ release_gcip_mailbox(mailbox);
+ break;
+ case GXP_MBOX_TYPE_KCI:
+ release_gcip_kci(mailbox);
+ break;
+ }
+#endif
+}
+
void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd, uint virt_core,
struct gxp_mailbox *mailbox)
{
int i;
- struct gxp_mailbox_wait_list *cur, *nxt;
- struct gxp_async_response *async_resp;
- struct list_head resps_to_flush;
- unsigned long flags;
if (!mailbox) {
dev_err(mgr->gxp->dev,
@@ -576,51 +424,7 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
cancel_work_sync(mailbox->interrupt_handlers[i]);
}
- /*
- * At this point only async responses should be pending. Flush them all
- * from the `wait_list` at once so any remaining timeout workers
- * waiting on `wait_list_lock` will know their responses have been
- * handled already.
- */
- INIT_LIST_HEAD(&resps_to_flush);
- mutex_lock(&mailbox->wait_list_lock);
- list_for_each_entry_safe(cur, nxt, &mailbox->wait_list, list) {
- list_del(&cur->list);
- if (cur->is_async) {
- list_add_tail(&cur->list, &resps_to_flush);
- /*
- * Clear the response's destination queue so that if the
- * timeout worker is running, it won't try to process
- * this response after `wait_list_lock` is released.
- */
- async_resp = container_of(
- cur->resp, struct gxp_async_response, resp);
- spin_lock_irqsave(async_resp->dest_queue_lock, flags);
- async_resp->dest_queue = NULL;
- spin_unlock_irqrestore(async_resp->dest_queue_lock,
- flags);
-
- } else {
- dev_warn(
- mailbox->gxp->dev,
- "Unexpected synchronous command pending on mailbox release\n");
- kfree(cur);
- }
- }
- mutex_unlock(&mailbox->wait_list_lock);
-
- /*
- * Cancel the timeout timer of and free any responses that were still in
- * the `wait_list` above.
- */
- list_for_each_entry_safe(cur, nxt, &resps_to_flush, list) {
- list_del(&cur->list);
- async_resp = container_of(cur->resp, struct gxp_async_response,
- resp);
- cancel_delayed_work_sync(&async_resp->timeout_work);
- kfree(async_resp);
- kfree(cur);
- }
+ release_mailbox_impl(mailbox);
/* Reset the mailbox HW */
gxp_mailbox_reset_hw(mailbox);
@@ -637,21 +441,7 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
mgr->mailboxes[mailbox->core_id] = NULL;
/* Clean up resources */
- gxp_dma_free_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_command) * mailbox->cmd_queue_size,
- mailbox->cmd_queue, mailbox->cmd_queue_device_addr);
- gxp_dma_free_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_response) * mailbox->resp_queue_size,
- mailbox->resp_queue, mailbox->resp_queue_device_addr);
- gxp_dma_free_coherent(mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_mailbox_descriptor),
- mailbox->descriptor,
- mailbox->descriptor_device_addr);
- kthread_flush_worker(&mailbox->response_worker);
- kthread_stop(mailbox->response_thread);
- kfree(mailbox);
+ release_mailbox(mailbox, vd, virt_core);
}
void gxp_mailbox_reset(struct gxp_mailbox *mailbox)
@@ -659,241 +449,6 @@ void gxp_mailbox_reset(struct gxp_mailbox *mailbox)
dev_notice(mailbox->gxp->dev, "%s not yet implemented\n", __func__);
}
-/*
- * Adds @resp to @mailbox->wait_list.
- *
- * wait_list is a FIFO queue, with sequence number in increasing order.
- *
- * Returns 0 on success, or -ENOMEM if failed on allocation.
- */
-static int gxp_mailbox_push_wait_resp(struct gxp_mailbox *mailbox,
- struct gxp_response *resp, bool is_async)
-{
- struct gxp_mailbox_wait_list *entry =
- kzalloc(sizeof(*entry), GFP_KERNEL);
-
- if (!entry)
- return -ENOMEM;
- entry->resp = resp;
- entry->is_async = is_async;
- mutex_lock(&mailbox->wait_list_lock);
- list_add_tail(&entry->list, &mailbox->wait_list);
- mutex_unlock(&mailbox->wait_list_lock);
-
- return 0;
-}
-
-/*
- * Removes the response previously pushed with gxp_mailbox_push_wait_resp().
- *
- * This is used when the kernel gives up waiting for the response.
- */
-static void gxp_mailbox_del_wait_resp(struct gxp_mailbox *mailbox,
- struct gxp_response *resp)
-{
- struct gxp_mailbox_wait_list *cur;
-
- mutex_lock(&mailbox->wait_list_lock);
-
- list_for_each_entry(cur, &mailbox->wait_list, list) {
- if (cur->resp->seq > resp->seq) {
- /*
- * Sequence numbers in wait_list are in increasing
- * order. This case implies no entry in the list
- * matches @resp's sequence number.
- */
- break;
- }
- if (cur->resp->seq == resp->seq) {
- list_del(&cur->list);
- kfree(cur);
- break;
- }
- }
-
- mutex_unlock(&mailbox->wait_list_lock);
-}
-
-static int gxp_mailbox_enqueue_cmd(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd,
- struct gxp_response *resp,
- bool resp_is_async)
-{
- int ret;
- u32 tail;
-
- mutex_lock(&mailbox->cmd_queue_lock);
-
- cmd->seq = mailbox->cur_seq;
- /*
- * The lock ensures mailbox->cmd_queue_tail cannot be changed by
- * other processes (this method should be the only one to modify the
- * value of tail), therefore we can remember its value here and use it
- * in various places below.
- */
- tail = mailbox->cmd_queue_tail;
-
- /*
- * If the cmd queue is full, it's up to the caller to retry.
- */
- if (gxp_mailbox_read_cmd_queue_head(mailbox) ==
- (tail ^ CIRCULAR_QUEUE_WRAP_BIT)) {
- ret = -EAGAIN;
- goto out;
- }
-
- if (resp) {
- /*
- * Add @resp to the wait_list only if the cmd can be pushed
- * successfully.
- */
- resp->seq = cmd->seq;
- resp->status = GXP_RESP_WAITING;
- ret = gxp_mailbox_push_wait_resp(mailbox, resp, resp_is_async);
- if (ret)
- goto out;
- }
- /* size of cmd_queue is a multiple of sizeof(*cmd) */
- memcpy(mailbox->cmd_queue + CIRCULAR_QUEUE_REAL_INDEX(tail), cmd,
- sizeof(*cmd));
- gxp_mailbox_inc_cmd_queue_tail(mailbox, 1);
- /* triggers doorbell */
- /* TODO(b/190868834) define interrupt bits */
- gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
- /* bumps sequence number after the command is sent */
- mailbox->cur_seq++;
- ret = 0;
-out:
- mutex_unlock(&mailbox->cmd_queue_lock);
- if (ret)
- dev_err(mailbox->gxp->dev, "%s: ret=%d", __func__, ret);
-
- return ret;
-}
-
-int gxp_mailbox_execute_cmd(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd, struct gxp_response *resp)
-{
- int ret;
-
- ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, resp,
- /* resp_is_async = */ false);
- if (ret)
- return ret;
- ret = wait_event_timeout(mailbox->wait_list_waitq,
- resp->status != GXP_RESP_WAITING,
- msecs_to_jiffies(MAILBOX_TIMEOUT));
- if (!ret) {
- dev_notice(mailbox->gxp->dev, "%s: event wait timeout",
- __func__);
- gxp_mailbox_del_wait_resp(mailbox, resp);
- return -ETIMEDOUT;
- }
- if (resp->status != GXP_RESP_OK) {
- dev_notice(mailbox->gxp->dev, "%s: resp status=%u", __func__,
- resp->status);
- return -ENOMSG;
- }
-
- return resp->retval;
-}
-
-static void async_cmd_timeout_work(struct work_struct *work)
-{
- struct gxp_async_response *async_resp = container_of(
- work, struct gxp_async_response, timeout_work.work);
- unsigned long flags;
-
- /*
- * This function will acquire the mailbox wait_list_lock. This means if
- * response processing is in progress, it will complete before this
- * response can be removed from the wait list.
- *
- * Once this function has the wait_list_lock, no future response
- * processing will begin until this response has been removed.
- */
- gxp_mailbox_del_wait_resp(async_resp->mailbox, &async_resp->resp);
-
- /*
- * Check if this response still has a valid destination queue, in case
- * an in-progress call to `gxp_mailbox_handle_response()` completed
- * the response while `gxp_mailbox_del_wait_resp()` was waiting for
- * the wait_list_lock.
- */
- spin_lock_irqsave(async_resp->dest_queue_lock, flags);
- if (async_resp->dest_queue) {
- async_resp->resp.status = GXP_RESP_CANCELLED;
- list_add_tail(&async_resp->list_entry, async_resp->dest_queue);
- spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
-
- gxp_pm_update_requested_power_states(
- async_resp->mailbox->gxp, async_resp->gxp_power_state,
- async_resp->requested_low_clkmux, AUR_OFF, false,
- async_resp->memory_power_state, AUR_MEM_UNDEFINED);
-
- if (async_resp->eventfd) {
- gxp_eventfd_signal(async_resp->eventfd);
- gxp_eventfd_put(async_resp->eventfd);
- }
-
- wake_up(async_resp->dest_queue_waitq);
- } else {
- spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
- }
-}
-
-int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd,
- struct list_head *resp_queue,
- spinlock_t *queue_lock,
- wait_queue_head_t *queue_waitq,
- uint gxp_power_state, uint memory_power_state,
- bool requested_low_clkmux,
- struct gxp_eventfd *eventfd)
-{
- struct gxp_async_response *async_resp;
- int ret;
-
- async_resp = kzalloc(sizeof(*async_resp), GFP_KERNEL);
- if (!async_resp)
- return -ENOMEM;
-
- async_resp->mailbox = mailbox;
- async_resp->dest_queue = resp_queue;
- async_resp->dest_queue_lock = queue_lock;
- async_resp->dest_queue_waitq = queue_waitq;
- async_resp->gxp_power_state = gxp_power_state;
- async_resp->memory_power_state = memory_power_state;
- async_resp->requested_low_clkmux = requested_low_clkmux;
- if (eventfd && gxp_eventfd_get(eventfd))
- async_resp->eventfd = eventfd;
- else
- async_resp->eventfd = NULL;
-
- INIT_DELAYED_WORK(&async_resp->timeout_work, async_cmd_timeout_work);
- schedule_delayed_work(&async_resp->timeout_work,
- msecs_to_jiffies(MAILBOX_TIMEOUT));
-
- gxp_pm_update_requested_power_states(
- mailbox->gxp, AUR_OFF, false, gxp_power_state,
- requested_low_clkmux, AUR_MEM_UNDEFINED, memory_power_state);
- ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, &async_resp->resp,
- /* resp_is_async = */ true);
- if (ret)
- goto err_free_resp;
-
- return 0;
-
-err_free_resp:
- gxp_pm_update_requested_power_states(mailbox->gxp, gxp_power_state,
- requested_low_clkmux, AUR_OFF, false,
- memory_power_state,
- AUR_MEM_UNDEFINED);
- cancel_delayed_work_sync(&async_resp->timeout_work);
- kfree(async_resp);
- return ret;
-}
-
int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
u32 int_bit,
struct work_struct *handler)
@@ -908,7 +463,7 @@ int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
}
int gxp_mailbox_unregister_interrupt_handler(struct gxp_mailbox *mailbox,
- u32 int_bit)
+ u32 int_bit)
{
/* Bit 0 is reserved for incoming mailbox responses */
if (int_bit == 0 || int_bit >= GXP_MAILBOX_INT_BIT_COUNT)
@@ -918,3 +473,31 @@ int gxp_mailbox_unregister_interrupt_handler(struct gxp_mailbox *mailbox,
return 0;
}
+
+#if !GXP_USE_LEGACY_MAILBOX
+int gxp_mailbox_send_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp)
+{
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ return gcip_mailbox_send_cmd(mailbox->mbx_impl.gcip_mbx, cmd,
+ resp);
+ case GXP_MBOX_TYPE_KCI:
+ return gcip_kci_send_cmd(mailbox->mbx_impl.gcip_kci, cmd);
+ }
+ return -EOPNOTSUPP;
+}
+
+struct gcip_mailbox_resp_awaiter *
+gxp_mailbox_put_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp,
+ void *data)
+{
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ return gcip_mailbox_put_cmd(mailbox->mbx_impl.gcip_mbx, cmd,
+ resp, data);
+ default:
+ break;
+ }
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif /* !GXP_USE_LEGACY_MAILBOX */