summaryrefslogtreecommitdiff
path: root/gcip-kernel-driver/include
diff options
context:
space:
mode:
Diffstat (limited to 'gcip-kernel-driver/include')
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-alloc-helper.h50
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-common-image-header.h67
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-domain-pool.h49
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-firmware.h49
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-image-config.h153
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-kci.h387
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-mailbox.h538
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-mem-pool.h71
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-telemetry.h123
9 files changed, 1487 insertions, 0 deletions
diff --git a/gcip-kernel-driver/include/gcip/gcip-alloc-helper.h b/gcip-kernel-driver/include/gcip/gcip-alloc-helper.h
new file mode 100644
index 0000000..3d2c110
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-alloc-helper.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP helpers for allocating memories.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_ALLOC_HELPER_H__
+#define __GCIP_ALLOC_HELPER_H__
+
+#include <linux/device.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+/*
+ * The actual return value from the alloc_noncontiguous function.
+ * The user should only care about @sgt. @pages is used internally for freeing memory.
+ */
+struct gcip_sgt_handle {
+ struct sg_table sgt;
+ void *mem;
+};
+
+/*
+ * Allocates non-contiguous memory with size @size bytes.
+ *
+ * @dev: pointer to device structure. Is used for logging or the NUMA node for page allocation.
+ * @size: Total size in bytes. Will be page aligned.
+ * @gfp: The GFP flag for malloc internal structures.
+ *
+ * Returns the SG table represents the non-contiguous region.
+ * Returns NULL on any error.
+ */
+struct sg_table *gcip_alloc_noncontiguous(struct device *dev, size_t size, gfp_t gfp);
+/* Frees the memory allocated by gcip_alloc_noncontiguous. */
+void gcip_free_noncontiguous(struct sg_table *sgt);
+
+/*
+ * Returns the virtual memory that was used to allocate @sgt.
+ *
+ * @sgt must be the return pointer of gcip_alloc_noncontiguous.
+ */
+static inline void *gcip_noncontiguous_sgt_to_mem(struct sg_table *sgt)
+{
+ struct gcip_sgt_handle *sh = container_of(sgt, struct gcip_sgt_handle, sgt);
+
+ return sh->mem;
+}
+
+#endif /* __GCIP_ALLOC_HELPER_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-common-image-header.h b/gcip-kernel-driver/include/gcip/gcip-common-image-header.h
new file mode 100644
index 0000000..d986fbc
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-common-image-header.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common authenticated image format for Google SoCs
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_COMMON_IMAGE_HEADER_H__
+#define __GCIP_COMMON_IMAGE_HEADER_H__
+
+#include <linux/types.h>
+
+#include "gcip-image-config.h"
+
+#define GCIP_FW_HEADER_SIZE (0x1000)
+
+struct gcip_common_image_sub_header_common {
+ uint32_t magic;
+ uint32_t generation;
+ uint32_t rollback_info;
+ uint32_t length;
+ uint8_t flags[16];
+};
+
+struct gcip_common_image_sub_header_gen1 {
+ uint8_t body_hash[32];
+ uint8_t chip_id[32];
+ uint8_t auth_config[256];
+ struct gcip_image_config image_config;
+};
+
+struct gcip_common_image_sub_header_gen2 {
+ uint8_t body_hash[64];
+ uint8_t chip_id[32];
+ uint8_t auth_config[256];
+ struct gcip_image_config image_config;
+};
+
+struct gcip_common_image_header {
+ uint8_t sig[512];
+ uint8_t pub[512];
+ struct {
+ struct gcip_common_image_sub_header_common common;
+ union {
+ struct gcip_common_image_sub_header_gen1 gen1;
+ struct gcip_common_image_sub_header_gen2 gen2;
+ };
+ };
+};
+
+/*
+ * Returns the image config field from a common image header
+ * or NULL if the header has an invalid generation identifier
+ */
+static inline struct gcip_image_config *
+get_image_config_from_hdr(struct gcip_common_image_header *hdr)
+{
+ switch (hdr->common.generation) {
+ case 1:
+ return &hdr->gen1.image_config;
+ case 2:
+ return &hdr->gen2.image_config;
+ }
+ return NULL;
+}
+
+#endif /* __GCIP_COMMON_IMAGE_HEADER_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-domain-pool.h b/gcip-kernel-driver/include/gcip/gcip-domain-pool.h
new file mode 100644
index 0000000..b740bf9
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-domain-pool.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP IOMMU domain allocator.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_DOMAIN_POOL_H__
+#define __GCIP_DOMAIN_POOL_H__
+
+#include <linux/idr.h>
+#include <linux/iommu.h>
+
+struct gcip_domain_pool {
+ struct ida idp; /* ID allocator to keep track of used domains. */
+ /*
+ * Size of the pool. Can be set to 0, in which case the implementation will fall back to
+ * dynamic domain allocation using the IOMMU API directly.
+ */
+ unsigned int size;
+ struct iommu_domain **array; /* Array holding the pointers to pre-allocated domains. */
+ struct device *dev; /* The device used for logging warnings/errors. */
+};
+
+/*
+ * Initializes a domain pool.
+ *
+ * @dev: pointer to device structure.
+ * @pool: caller-allocated pool structure.
+ * @size: size of the pre-allocated domains pool.
+ * Set to zero to fall back to dynamically allocated domains.
+ *
+ * returns 0 on success or negative error value.
+ */
+int gcip_domain_pool_init(struct device *dev, struct gcip_domain_pool *pool, unsigned int size);
+
+/*
+ * Allocates a domain from the pool
+ * returns NULL on error.
+ */
+struct iommu_domain *gcip_domain_pool_alloc(struct gcip_domain_pool *pool);
+
+/* Releases a domain from the pool. */
+void gcip_domain_pool_free(struct gcip_domain_pool *pool, struct iommu_domain *domain);
+
+/* Cleans up all resources used by the domain pool. */
+void gcip_domain_pool_destroy(struct gcip_domain_pool *pool);
+
+#endif /* __GCIP_DOMAIN_POOL_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-firmware.h b/gcip-kernel-driver/include/gcip/gcip-firmware.h
new file mode 100644
index 0000000..b856e5c
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-firmware.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP firmware interface.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_FIRMWARE_H__
+#define __GCIP_FIRMWARE_H__
+
+#include <linux/types.h>
+
+enum gcip_fw_status {
+ /* No firmware loaded yet, or last firmware failed to run. */
+ GCIP_FW_INVALID = 0,
+ /* Load in progress. */
+ GCIP_FW_LOADING = 1,
+ /* Current firmware is valid and can be restarted. */
+ GCIP_FW_VALID = 2,
+};
+
+/* Firmware flavors returned via KCI FIRMWARE_INFO command. */
+enum gcip_fw_flavor {
+ /* Unused value for extending enum storage type. */
+ GCIP_FW_FLAVOR_ERROR = -1,
+ /* Used by host when cannot determine the flavor. */
+ GCIP_FW_FLAVOR_UNKNOWN = 0,
+ /* Second-stage bootloader (no longer used). */
+ GCIP_FW_FLAVOR_BL1 = 1,
+ /* Systest app image. */
+ GCIP_FW_FLAVOR_SYSTEST = 2,
+ /* Default production app image. */
+ GCIP_FW_FLAVOR_PROD_DEFAULT = 3,
+ /* Custom image produced by other teams. */
+ GCIP_FW_FLAVOR_CUSTOM = 4,
+};
+
+/* Firmware info filled out via KCI FIRMWARE_INFO command. */
+struct gcip_fw_info {
+ uint64_t fw_build_time; /* BuildData::Timestamp() */
+ uint32_t fw_flavor; /* enum gcip_fw_flavor */
+ uint32_t fw_changelist; /* BuildData::Changelist() */
+ uint32_t spare[10];
+};
+
+/* Returns the name of @fw_flavor in string. */
+char *gcip_fw_flavor_str(enum gcip_fw_flavor fw_flavor);
+
+#endif /* __GCIP_FIRMWARE_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-image-config.h b/gcip-kernel-driver/include/gcip/gcip-image-config.h
new file mode 100644
index 0000000..a995188
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-image-config.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Framework for parsing the firmware image configuration.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_IMAGE_CONFIG_H__
+#define __GCIP_IMAGE_CONFIG_H__
+
+#include <linux/types.h>
+
+#define GCIP_FW_NUM_VERSIONS 4
+#define GCIP_IMG_CFG_MAX_IOMMU_MAPPINGS 22
+#define GCIP_IMG_CFG_MAX_NS_IOMMU_MAPPINGS 5
+
+#define GCIP_FW_PRIV_LEVEL_GSA 0
+#define GCIP_FW_PRIV_LEVEL_TZ 1
+#define GCIP_FW_PRIV_LEVEL_NS 2
+
+/*
+ * The image configuration attached to the signed firmware.
+ */
+struct gcip_image_config {
+ __u32 carveout_base;
+ __u32 firmware_base;
+ __u32 firmware_size;
+ __u32 firmware_versions[GCIP_FW_NUM_VERSIONS];
+ __u32 config_version;
+ __u32 privilege_level;
+ __u32 remapped_region_start;
+ __u32 remapped_region_size;
+ __u32 num_iommu_mappings;
+ struct {
+ /* Device virtual address */
+ __u32 virt_address;
+ /*
+ * Encodes a 12-bit aligned address and the corresponding size
+ * into a 32-bit value.
+ * Detailed encoding method is defined in gcip-image-config.c.
+ */
+ __u32 image_config_value;
+ } iommu_mappings[GCIP_IMG_CFG_MAX_IOMMU_MAPPINGS];
+ __u32 remapped_data_start;
+ __u32 remapped_data_size;
+ __u32 num_ns_iommu_mappings;
+ __u32 ns_iommu_mappings[GCIP_IMG_CFG_MAX_NS_IOMMU_MAPPINGS];
+} __packed;
+
+#define GCIP_IMAGE_CONFIG_FLAGS_SECURE (1u << 0)
+
+struct gcip_image_config_ops {
+ /*
+ * Adds an IOMMU mapping from @daddr to @paddr with size @size.
+ *
+ * It is ensured that there is no overflow on @paddr + @size before calling this function.
+ *
+ * @flags is a bit-field with the following attributes:
+ * [0:0] - Security. 1 for secure and 0 for non-secure.
+ * [31:1] - Reserved.
+ *
+ * Returns 0 on success. Otherwise a negative errno.
+ * Mandatory.
+ */
+ int (*map)(void *data, dma_addr_t daddr, phys_addr_t paddr, size_t size,
+ unsigned int flags);
+ /*
+ * Removes the IOMMU mapping previously added by @map.
+ *
+ * Mandatory.
+ */
+ void (*unmap)(void *data, dma_addr_t daddr, size_t size, unsigned int flags);
+};
+
+struct gcip_image_config_parser {
+ struct device *dev;
+ void *data; /* User-specify data, will be passed to ops. */
+ const struct gcip_image_config_ops *ops;
+ /* The last image config being successfully parsed. */
+ struct gcip_image_config last_config;
+};
+
+/*
+ * Initializes the image configuration parser.
+ *
+ * @dev is only used for logging.
+ * @data will be passed to operations.
+ *
+ * Returns 0 on success. Returns -EINVAL when any mandatory operations is NULL.
+ */
+int gcip_image_config_parser_init(struct gcip_image_config_parser *parser,
+ const struct gcip_image_config_ops *ops, struct device *dev,
+ void *data);
+
+/*
+ * Parses the image configuration and adds specified IOMMU mappings by calling pre-registered
+ * operations.
+ *
+ * Number of mappings to be added might be different according to the value of
+ * @config->privilege_level:
+ * - GCIP_FW_PRIV_LEVEL_NS:
+ * Both @iommu_mappings and @ns_iommu_mappings will be added. Because GCIP_FW_PRIV_LEVEL_NS means
+ * the firmware will run in non-secure mode and all transactions will go through the non-secure
+ * IOMMU.
+ * - Otherwise:
+ * Only @ns_iommu_mappings are considered. TZ/GSA will be the one who programs secure IOMMU for
+ * those secure IOMMU mappings.
+ *
+ * Before parsing the newly passed @config, the mappings of the last record (stored by @parser
+ * internally) will be reverted. If there is any mapping in the new config fails to be mapped, the
+ * reverted last config will be reverted again. i.e. This function will keep the mapping state the
+ * same as before calling it on any error happens. But if the IOMMU state is somehow corrupted and
+ * hence fails to roll back the reverted last image config, only an error is logged. See the pseudo
+ * code below:
+ *
+ * gcip_image_config_parse(config):
+ * unmap(last_image_config)
+ * if ret = map(config) fails:
+ * LOG("Failed to map image config, rolling back to the last image config.")
+ * if map(last_image_config) fails:
+ * LOG("Failed to roll back the last image config.")
+ * return ret
+ * else:
+ * last_image_config = config
+ * return SUCCESS
+ *
+ * A special case being considered is if the content of @config is identical to the last
+ * successfully parsed image config, this function will return 0 immediately without removing /
+ * adding any mapping.
+ *
+ * Returns 0 on success. Otherwise an errno, which usually would be the one returned by
+ * gcip_image_config_ops.map. On error no new mapping specified in @config is added.
+ */
+int gcip_image_config_parse(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config);
+
+/*
+ * Clears the mappings specified in the last image config.
+ *
+ * It's valid to call this function without any image config has been successfully parsed, or when
+ * the last image config is already cleared. In which case this function works as no-op.
+ */
+void gcip_image_config_clear(struct gcip_image_config_parser *parser);
+
+/*
+ * Returns whether the privilege level specified by @config is non-secure.
+ */
+static inline bool gcip_image_config_is_ns(struct gcip_image_config *config)
+{
+ return config->privilege_level == GCIP_FW_PRIV_LEVEL_NS;
+}
+
+#endif /* __GCIP_IMAGE_CONFIG_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-kci.h b/gcip-kernel-driver/include/gcip/gcip-kci.h
new file mode 100644
index 0000000..bda1b40
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-kci.h
@@ -0,0 +1,387 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Kernel Control Interface, implements the protocol between AP kernel and GCIP firmware.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_KCI_H__
+#define __GCIP_KCI_H__
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <gcip/gcip-mailbox.h>
+
+/*
+ * The status field in a firmware response is set to this by us when the response is fetched from
+ * the queue.
+ */
+#define GCIP_KCI_STATUS_OK GCIP_MAILBOX_STATUS_OK
+/*
+ * gcip_kci#mailbox.wait_list uses this value to record the status of responses that haven't been
+ * received yet.
+ */
+#define GCIP_KCI_STATUS_WAITING_RESPONSE GCIP_MAILBOX_STATUS_WAITING_RESPONSE
+/*
+ * Used when an expected response is not received, see the documentation of
+ * gcip_mailbox_handle_response() for details.
+ */
+#define GCIP_KCI_STATUS_NO_RESPONSE GCIP_MAILBOX_STATUS_NO_RESPONSE
+
+/*
+ * Command/response sequence numbers capped at half the range of the 64-bit value range. The second
+ * half is reserved for incoming requests from firmware.
+ * These are tagged with the MSB set.
+ */
+#define GCIP_KCI_REVERSE_FLAG (0x8000000000000000ull)
+
+/* Command/response queue elements for KCI. */
+
+struct gcip_kci_dma_descriptor {
+ u64 address;
+ u32 size;
+ u32 flags;
+};
+
+struct gcip_kci_command_element {
+ /*
+ * Set by gcip_kci_push_cmd() in case of KCI cmd and copied from the RKCI cmd in case of
+ * RKCI response.
+ */
+ u64 seq;
+ u16 code;
+ u16 reserved[3]; /* Explicit padding, does not affect alignment. */
+ struct gcip_kci_dma_descriptor dma;
+} __packed;
+
+struct gcip_kci_response_element {
+ u64 seq;
+ u16 code;
+ /*
+ * Reserved for host use - firmware can't touch this.
+ * If a value is written here it will be discarded and overwritten during response
+ * processing. However, when repurposed as an RKCI command, the FW can set this field.
+ */
+ u16 status;
+ /*
+ * Return value is not currently needed by KCI command responses.
+ * For reverse KCI commands this is set as value2.
+ */
+ u32 retval;
+} __packed;
+
+/*
+ * Definition of code in command elements.
+ * Code for KCI is a 16-bit unsigned integer.
+ */
+enum gcip_kci_code {
+ GCIP_KCI_CODE_ACK = 0,
+ GCIP_KCI_CODE_UNMAP_BUFFER = 1,
+ GCIP_KCI_CODE_MAP_LOG_BUFFER = 2,
+ GCIP_KCI_CODE_JOIN_GROUP = 3,
+ GCIP_KCI_CODE_LEAVE_GROUP = 4,
+ GCIP_KCI_CODE_MAP_TRACE_BUFFER = 5,
+ GCIP_KCI_CODE_SHUTDOWN = 7,
+ GCIP_KCI_CODE_GET_DEBUG_DUMP = 8,
+ GCIP_KCI_CODE_OPEN_DEVICE = 9,
+ GCIP_KCI_CODE_CLOSE_DEVICE = 10,
+ GCIP_KCI_CODE_FIRMWARE_INFO = 11,
+ GCIP_KCI_CODE_GET_USAGE = 12,
+ GCIP_KCI_CODE_NOTIFY_THROTTLING = 13,
+ GCIP_KCI_CODE_BLOCK_BUS_SPEED_CONTROL = 14,
+ GCIP_KCI_CODE_ALLOCATE_VMBOX = 15,
+ GCIP_KCI_CODE_RELEASE_VMBOX = 16,
+ GCIP_KCI_CODE_LINK_OFFLOAD_VMBOX = 17,
+ GCIP_KCI_CODE_UNLINK_OFFLOAD_VMBOX = 18,
+
+ GCIP_KCI_CODE_RKCI_ACK = 256,
+};
+
+/*
+ * Definition of reverse KCI request code ranges.
+ * Code for reverse KCI is a 16-bit unsigned integer.
+ * The first half is reserved for the chip specific codes and the generic codes can use the
+ * second half.
+ */
+enum gcip_reverse_kci_code {
+ GCIP_RKCI_CHIP_CODE_FIRST = 0,
+ GCIP_RKCI_CHIP_CODE_LAST = 0x7FFF,
+ GCIP_RKCI_GENERIC_CODE_FIRST = 0x8000,
+ GCIP_RKCI_FIRMWARE_CRASH = GCIP_RKCI_GENERIC_CODE_FIRST + 0,
+ GCIP_RKCI_JOB_LOCKUP = GCIP_RKCI_GENERIC_CODE_FIRST + 1,
+ GCIP_RKCI_GENERIC_CODE_LAST = 0xFFFF,
+};
+
+/*
+ * Definition of code in response elements.
+ * It is a 16-bit unsigned integer.
+ */
+enum gcip_kci_error {
+ GCIP_KCI_ERROR_OK = 0, /* Not an error; returned on success. */
+ GCIP_KCI_ERROR_CANCELLED = 1,
+ GCIP_KCI_ERROR_UNKNOWN = 2,
+ GCIP_KCI_ERROR_INVALID_ARGUMENT = 3,
+ GCIP_KCI_ERROR_DEADLINE_EXCEEDED = 4,
+ GCIP_KCI_ERROR_NOT_FOUND = 5,
+ GCIP_KCI_ERROR_ALREADY_EXISTS = 6,
+ GCIP_KCI_ERROR_PERMISSION_DENIED = 7,
+ GCIP_KCI_ERROR_RESOURCE_EXHAUSTED = 8,
+ GCIP_KCI_ERROR_FAILED_PRECONDITION = 9,
+ GCIP_KCI_ERROR_ABORTED = 10,
+ GCIP_KCI_ERROR_OUT_OF_RANGE = 11,
+ GCIP_KCI_ERROR_UNIMPLEMENTED = 12,
+ GCIP_KCI_ERROR_INTERNAL = 13,
+ GCIP_KCI_ERROR_UNAVAILABLE = 14,
+ GCIP_KCI_ERROR_DATA_LOSS = 15,
+ GCIP_KCI_ERROR_UNAUTHENTICATED = 16,
+};
+
+/* Type of the chip of the offload vmbox to be linked. */
+enum gcip_kci_offload_chip_type {
+ GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU = 0,
+};
+
+/*
+ * Reason for triggering the CMD doorbell.
+ * The CMD doorbell is triggered either when a CMD is pushed or the RESP that might blocks the FW is
+ * consumed.
+ */
+enum gcip_kci_doorbell_reason {
+ GCIP_KCI_PUSH_CMD,
+ GCIP_KCI_CONSUME_RESP,
+};
+
+/* Struct to hold a circular buffer for incoming KCI responses. */
+struct gcip_reverse_kci {
+ /* Reverse kci buffer head. */
+ unsigned long head;
+ /* Reverse kci buffer tail. */
+ unsigned long tail;
+ /*
+ * Maximum number of outstanding KCI requests from firmware.
+ * This is used to size a circular buffer, so it must be a power of 2.
+ */
+ u32 buffer_size;
+ struct gcip_kci_response_element *buffer;
+ /* Lock to push elements in the buffer from the interrupt handler. */
+ spinlock_t producer_lock;
+ /* Lock to pop elements from the buffer in the worker. */
+ spinlock_t consumer_lock;
+ /* Worker to handle responses. */
+ struct work_struct work;
+};
+
+struct gcip_kci;
+
+/*
+ * KCI operators.
+ * For in_interrupt() context, see the implementation of gcip_kci_handle_irq for details.
+ */
+struct gcip_kci_ops {
+ /* Mandatory. */
+ /*
+ * Gets the head of mailbox command queue.
+ * Context: normal.
+ */
+ u32 (*get_cmd_queue_head)(struct gcip_kci *kci);
+ /*
+ * Gets the tail of mailbox command queue.
+ * Context: normal.
+ */
+ u32 (*get_cmd_queue_tail)(struct gcip_kci *kci);
+ /*
+ * Increases the tail of mailbox command queue by @inc.
+ * Context: normal.
+ */
+ void (*inc_cmd_queue_tail)(struct gcip_kci *kci, u32 inc);
+
+ /*
+ * Gets the size of mailbox response queue.
+ * Context: normal.
+ */
+ u32 (*get_resp_queue_size)(struct gcip_kci *kci);
+ /*
+ * Gets the head of mailbox response queue.
+ * Context: normal and in_interrupt().
+ */
+ u32 (*get_resp_queue_head)(struct gcip_kci *kci);
+ /*
+ * Gets the tail of mailbox response queue.
+ * Context: normal and in_interrupt().
+ */
+ u32 (*get_resp_queue_tail)(struct gcip_kci *kci);
+ /*
+ * Increases the head of mailbox response queue by @inc.
+ * Context: normal and in_interrupt().
+ */
+ void (*inc_resp_queue_head)(struct gcip_kci *kci, u32 inc);
+ /*
+ * Rings the doorbell.
+ * Context: normal.
+ */
+ void (*trigger_doorbell)(struct gcip_kci *kci, enum gcip_kci_doorbell_reason);
+
+ /* Optional. */
+ /*
+ * Reverse KCI handler called by the worker. Only required if reverse kci is enabled.
+ * Context: normal.
+ */
+ void (*reverse_kci_handle_response)(struct gcip_kci *kci,
+ struct gcip_kci_response_element *resp);
+ /*
+ * Usage updater called by the worker.
+ * Context: normal.
+ */
+ int (*update_usage)(struct gcip_kci *kci);
+};
+
+struct gcip_kci {
+ /* Device used for logging and memory allocation. */
+ struct device *dev;
+ /* Mailbox used by KCI. */
+ struct gcip_mailbox mailbox;
+ /* Protects cmd_queue. */
+ struct mutex cmd_queue_lock;
+ /* Protects resp_queue. */
+ spinlock_t resp_queue_lock;
+ /* Queue for waiting for the response doorbell to be rung. */
+ wait_queue_head_t resp_doorbell_waitq;
+ /* Protects wait_list. */
+ spinlock_t wait_list_lock;
+ /* Worker of consuming responses. */
+ struct work_struct work;
+ /* Handler for reverse (firmware -> kernel) requests. */
+ struct gcip_reverse_kci rkci;
+ /* Worker that sends update usage KCI. */
+ struct work_struct usage_work;
+ /* KCI operators. */
+ const struct gcip_kci_ops *ops;
+ /* Private data. */
+ void *data;
+};
+
+/*
+ * Arguments for gcip_kci_init.
+ *
+ * For the following arguments, see struct gcip_kci and struct gcip_reverse_kci for details.
+ * : `dev`, `rkci_buffer_size`, `ops` and `data`.
+ *
+ * For the following arguments, see struct gcip_mailbox for details. They will be passed to the
+ * struct gcip_mailbox using struct gcip_mailbox_args internally.
+ * : `dev`, `cmd_queue`, `resp_queue`, `queue_wrap_bit` and `timeout`.
+ */
+struct gcip_kci_args {
+ struct device *dev;
+ void *cmd_queue;
+ void *resp_queue;
+ u32 queue_wrap_bit;
+ u32 rkci_buffer_size;
+ u32 timeout;
+ const struct gcip_kci_ops *ops;
+ void *data;
+};
+
+/* Initializes a KCI object. */
+int gcip_kci_init(struct gcip_kci *kci, const struct gcip_kci_args *args);
+
+/* Cancels KCI and reverse KCI workers and workers that may send KCIs. */
+void gcip_kci_cancel_work_queues(struct gcip_kci *kci);
+
+/*
+ * Release KCI.
+ * Caller must call gcip_kci_cancel_work_queues before calling gcip_kci_release.
+ */
+void gcip_kci_release(struct gcip_kci *kci);
+
+/*
+ * Pushes an element to cmd queue and waits for the response.
+ * Returns -ETIMEDOUT if no response is received within kci->mailbox.timeout.
+ *
+ * Returns the code of response, or a negative errno on error.
+ */
+int gcip_kci_send_cmd(struct gcip_kci *kci, struct gcip_kci_command_element *cmd);
+
+/*
+ * Pushes an element to cmd queue and waits for the response.
+ * Returns -ETIMEDOUT if no response is received within kci->mailbox.timeout msecs.
+ *
+ * Returns the code of response, or a negative errno on error.
+ * @resp is updated with the response, as to retrieve returned retval field.
+ */
+int gcip_kci_send_cmd_return_resp(struct gcip_kci *kci, struct gcip_kci_command_element *cmd,
+ struct gcip_kci_response_element *resp);
+
+/*
+ * Interrupt handler.
+ * This function should be called when the interrupt of KCI mailbox is fired.
+ */
+void gcip_kci_handle_irq(struct gcip_kci *kci);
+
+/*
+ * Schedules a usage update worker.
+ *
+ * For functions that don't require the usage to be updated immediately, use this function instead
+ * of update_usage in struct gcip_kci_ops.
+ */
+void gcip_kci_update_usage_async(struct gcip_kci *kci);
+
+/* Gets the KCI private data. */
+static inline void *gcip_kci_get_data(struct gcip_kci *kci)
+{
+ return kci->data;
+}
+
+/* Returns the element size according to @type. */
+static inline u32 gcip_kci_queue_element_size(enum gcip_mailbox_queue_type type)
+{
+ if (type == GCIP_MAILBOX_CMD_QUEUE)
+ return sizeof(struct gcip_kci_command_element);
+ else
+ return sizeof(struct gcip_kci_response_element);
+}
+
+static inline u64 gcip_kci_get_cur_seq(struct gcip_kci *kci)
+{
+ return gcip_mailbox_get_cur_seq(&kci->mailbox);
+}
+
+static inline struct gcip_kci_command_element *gcip_kci_get_cmd_queue(struct gcip_kci *kci)
+{
+ return (struct gcip_kci_command_element *)gcip_mailbox_get_cmd_queue(&kci->mailbox);
+}
+
+static inline struct gcip_kci_response_element *gcip_kci_get_resp_queue(struct gcip_kci *kci)
+{
+ return (struct gcip_kci_response_element *)gcip_mailbox_get_resp_queue(&kci->mailbox);
+}
+
+static inline u64 gcip_kci_get_queue_wrap_bit(struct gcip_kci *kci)
+{
+ return gcip_mailbox_get_queue_wrap_bit(&kci->mailbox);
+}
+
+static inline struct list_head *gcip_kci_get_wait_list(struct gcip_kci *kci)
+{
+ return gcip_mailbox_get_wait_list(&kci->mailbox);
+}
+
+static inline u32 gcip_kci_get_timeout(struct gcip_kci *kci)
+{
+ return gcip_mailbox_get_timeout(&kci->mailbox);
+}
+
+static inline unsigned long gcip_rkci_get_head(struct gcip_kci *kci)
+{
+ return kci->rkci.head;
+}
+
+static inline unsigned long gcip_rkci_get_tail(struct gcip_kci *kci)
+{
+ return kci->rkci.tail;
+}
+
+#endif /* __GCIP_KCI_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-mailbox.h b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
new file mode 100644
index 0000000..e81cfb9
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
@@ -0,0 +1,538 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP Mailbox Interface.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_MAILBOX_H__
+#define __GCIP_MAILBOX_H__
+
+#include <linux/compiler.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#define CIRC_QUEUE_WRAPPED(idx, wrap_bit) ((idx)&wrap_bit)
+#define CIRC_QUEUE_INDEX_MASK(wrap_bit) (wrap_bit - 1)
+#define CIRC_QUEUE_VALID_MASK(wrap_bit) (CIRC_QUEUE_INDEX_MASK(wrap_bit) | wrap_bit)
+#define CIRC_QUEUE_REAL_INDEX(idx, wrap_bit) ((idx)&CIRC_QUEUE_INDEX_MASK(wrap_bit))
+
+#define CIRC_QUEUE_MAX_SIZE(wrap_bit) (wrap_bit - 1)
+
+/*
+ * The status field in a firmware response is set to this by us when the response is fetched from
+ * the queue.
+ */
+#define GCIP_MAILBOX_STATUS_OK (0)
+/*
+ * gcip_mailbox#wait_list uses this value to record the status of responses that haven't been
+ * received yet.
+ */
+#define GCIP_MAILBOX_STATUS_WAITING_RESPONSE (1)
+/*
+ * Used when an expected response is not received, see the documentation of
+ * gcip_mailbox_consume_wait_list() for details.
+ */
+#define GCIP_MAILBOX_STATUS_NO_RESPONSE (2)
+
+/* To specify the operation is toward cmd or resp queue. */
+enum gcip_mailbox_queue_type { GCIP_MAILBOX_CMD_QUEUE, GCIP_MAILBOX_RESP_QUEUE };
+
+/* Utilities of circular queue operations */
+
+/*
+ * Returns the number of elements in a circular queue given its @head, @tail,
+ * and @queue_size.
+ */
+static inline u32 gcip_circ_queue_cnt(u32 head, u32 tail, u32 queue_size, u32 wrap_bit)
+{
+ u32 ret;
+
+ if (CIRC_QUEUE_WRAPPED(tail, wrap_bit) != CIRC_QUEUE_WRAPPED(head, wrap_bit))
+ ret = queue_size - CIRC_QUEUE_REAL_INDEX(head, wrap_bit) +
+ CIRC_QUEUE_REAL_INDEX(tail, wrap_bit);
+ else
+ ret = tail - head;
+
+ if (unlikely(ret > queue_size))
+ return 0;
+
+ return ret;
+}
+
+/* Increases @index of a circular queue by @inc. */
+static inline u32 gcip_circ_queue_inc(u32 index, u32 inc, u32 queue_size, u32 wrap_bit)
+{
+ u32 new_index = CIRC_QUEUE_REAL_INDEX(index, wrap_bit) + inc;
+
+ if (unlikely(new_index >= queue_size))
+ return (index + inc - queue_size) ^ wrap_bit;
+ else
+ return index + inc;
+}
+
+/*
+ * Checks if @size is a valid circular queue size, which should be a positive
+ * number and less than or equal to MAX_QUEUE_SIZE.
+ */
+static inline bool gcip_valid_circ_queue_size(u32 size, u32 wrap_bit)
+{
+ if (!size || size > CIRC_QUEUE_MAX_SIZE(wrap_bit))
+ return false;
+ return true;
+}
+
+struct gcip_mailbox;
+
+/* Wrapper struct for responses consumed by a thread other than the one which sent the command. */
+struct gcip_mailbox_resp_awaiter {
+ /* Response. */
+ void *resp;
+ /* The work which will be executed when the timeout occurs. */
+ struct delayed_work timeout_work;
+ /*
+ * If this response times out, this pointer to the owning mailbox is
+ * needed to delete this response from the list of pending responses.
+ */
+ struct gcip_mailbox *mailbox;
+ /* User-defined data. */
+ void *data;
+ /* Reference count. */
+ refcount_t refs;
+ /*
+ * The callback for releasing the @data.
+ * It will be set as @release_awaiter_data of struct gcip_mailbox_ops.
+ */
+ void (*release_data)(void *data);
+};
+
+/*
+ * Mailbox operators.
+ * For in_interrupt() context, see the implementation of gcip_mailbox_handle_irq for details.
+ */
+struct gcip_mailbox_ops {
+ /* Mandatory. */
+ /*
+ * Gets the head of mailbox command queue.
+ * Context: normal.
+ */
+ u32 (*get_cmd_queue_head)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the tail of mailbox command queue.
+ * Context: normal.
+ */
+ u32 (*get_cmd_queue_tail)(struct gcip_mailbox *mailbox);
+ /*
+ * Increases the tail of mailbox command queue by @inc.
+ * Context: normal.
+ */
+ void (*inc_cmd_queue_tail)(struct gcip_mailbox *mailbox, u32 inc);
+ /*
+ * Acquires the lock of cmd_queue. If @try is true, "_trylock" functions can be used, but
+ * also it can be ignored. Returns 1 if succeed, 0 if failed. This callback will be called
+ * in the following situations.
+ * - Enqueue a command to the cmd_queue.
+ * The lock can be mutex lock or spin lock and it will be released by calling
+ * `release_cmd_queue_lock` callback.
+ * Context: normal.
+ */
+ int (*acquire_cmd_queue_lock)(struct gcip_mailbox *mailbox, bool try);
+ /*
+ * Releases the lock of cmd_queue which is acquired by calling `acquire_cmd_queue_lock`.
+ * Context: normal.
+ */
+ void (*release_cmd_queue_lock)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the sequence number of @cmd queue element.
+ * Context: normal.
+ */
+ u64 (*get_cmd_elem_seq)(struct gcip_mailbox *mailbox, void *cmd);
+ /*
+ * Sets the sequence number of @cmd queue element.
+ * Context: normal.
+ */
+ void (*set_cmd_elem_seq)(struct gcip_mailbox *mailbox, void *cmd, u64 seq);
+ /*
+ * Gets the code of @cmd queue element.
+ * Context: normal.
+ */
+ u32 (*get_cmd_elem_code)(struct gcip_mailbox *mailbox, void *cmd);
+
+ /*
+ * Gets the size of mailbox response queue.
+ * Context: normal.
+ */
+ u32 (*get_resp_queue_size)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the head of mailbox response queue.
+ * Context: normal and in_interrupt().
+ */
+ u32 (*get_resp_queue_head)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the tail of mailbox response queue.
+ * Context: normal and in_interrupt().
+ */
+ u32 (*get_resp_queue_tail)(struct gcip_mailbox *mailbox);
+ /*
+ * Increases the head of mailbox response queue by @inc.
+ * Context: normal and in_interrupt().
+ */
+ void (*inc_resp_queue_head)(struct gcip_mailbox *mailbox, u32 inc);
+ /*
+ * Acquires the lock of resp_queue. If @try is true, "_trylock" functions can be used, but
+ * also it can be ignored. Returns 1 if succeed, 0 if failed. This callback will be called
+ * in the following situations.
+ * - Fetch response(s) from the resp_queue.
+ * The lock can be a mutex lock or a spin lock. However, if @try is considered and the
+ * "_trylock" is used, it must be a spin lock only.
+ * The lock will be released by calling `release_resp_queue_lock` callback.
+ * Context: normal and in_interrupt().
+ */
+ int (*acquire_resp_queue_lock)(struct gcip_mailbox *mailbox, bool try);
+ /*
+ * Releases the lock of resp_queue which is acquired by calling `acquire_resp_queue_lock`.
+ * Context: normal and in_interrupt().
+ */
+ void (*release_resp_queue_lock)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the sequence number of @resp queue element.
+ * Context: normal and in_interrupt().
+ */
+ u64 (*get_resp_elem_seq)(struct gcip_mailbox *mailbox, void *resp);
+ /*
+ * Sets the sequence number of @resp queue element.
+ * Context: normal and in_interrupt().
+ */
+ void (*set_resp_elem_seq)(struct gcip_mailbox *mailbox, void *resp, u64 seq);
+ /*
+ * Gets the status of @resp queue element.
+ * Context: normal and in_interrupt().
+ */
+ u16 (*get_resp_elem_status)(struct gcip_mailbox *mailbox, void *resp);
+ /*
+ * Sets the status of @resp queue element.
+ * Context: normal and in_interrupt().
+ */
+ void (*set_resp_elem_status)(struct gcip_mailbox *mailbox, void *resp, u16 status);
+
+ /*
+ * Acquires the lock of wait_list. If @irqsave is true, "_irqsave" functions can be used to
+ * store the irq state to @flags, but also it can be ignored.
+ * This callback will be called in following situations.
+ * - Push a waiting response to the @mailbox->wait_list.
+ * - Delete a waiting response from the @mailbox->wait_list.
+ * - Handle an arrived response and delete it from the @mailbox->wait_list.
+ * - Flush the asynchronous responses in the @mailbox->wait_list when release the @mailbox.
+ * The lock can be a mutex lock or a spin lock. However, if @irqsave is considered and
+ * "_irqsave" is used, it must be spin lock only.
+ * The lock will be released by calling `release_wait_list_lock` callback.
+ * Context: normal and in_interrupt().
+ */
+ void (*acquire_wait_list_lock)(struct gcip_mailbox *mailbox, bool irqsave,
+ unsigned long *flags);
+ /*
+ * Releases the lock of wait_list which is acquired by calling `acquire_wait_list_lock`.
+ * If @irqsave is true, restores @flags from `acquire_wait_list_lock` to the irq state.
+ * Or it can be ignored, if @irqsave was not considered in the `acquire_wait_list_lock`.
+ * Context: normal and in_interrupt().
+ */
+ void (*release_wait_list_lock)(struct gcip_mailbox *mailbox, bool irqrestore,
+ unsigned long flags);
+
+ /* Optional. */
+ /*
+ * Waits for the cmd queue of @mailbox has a available space for putting the command. If
+ * the queue has a space, returns 0. Otherwise, returns error as non-zero value. It depends
+ * on the implementation details, but it is okay to return right away with error when the
+ * queue is full. If this callback returns an error, `gcip_mailbox_send_cmd` function or
+ * `gcip_mailbox_put_cmd` function will return that error too. This callback is called with
+ * the `cmd_queue_lock` being held.
+ *
+ * Note: if this callback is NULL, it will simply check the fullness of cmd_queue and
+ * return -EAGAIN error right away if it is full. Please refer the implementation of the
+ * `gcip_mailbox_enqueue_cmd` function.
+ *
+ * Context: normal.
+ */
+ int (*wait_for_cmd_queue_not_full)(struct gcip_mailbox *mailbox);
+ /*
+ * This callback will be called after putting the @cmd to the command queue. It can be used
+ * for triggering the doorbell. Also, @mailbox->cur_seq will be increased by the return
+ * value. If error occurs, returns negative value and @mailbox->cur_seq will not be changed
+ * in that case. If this callback is not defined, @mailbox->cur_seq will be increased by 1
+ * each time cmd enters the queue. This is called with the `cmd_queue_lock` being held.
+ * Context: normal.
+ */
+ int (*after_enqueue_cmd)(struct gcip_mailbox *mailbox, void *cmd);
+ /*
+ * This callback will be called after fetching responses. It can be used for triggering
+ * a signal to break up waiting consuming the response queue. This is called without
+ * holding any locks.
+ * - @num_resps: the number of fetched responses.
+ * Context: normal and in_interrupt().
+ */
+ void (*after_fetch_resps)(struct gcip_mailbox *mailbox, u32 num_resps);
+ /*
+ * Before handling each fetched responses, this callback will be called. If this callback
+ * is not defined or returns true, the mailbox will handle the @resp normally. If the @resp
+ * should not be handled, returns false. This is called without holding any locks.
+ * Context: normal and in_interrupt().
+ */
+ bool (*before_handle_resp)(struct gcip_mailbox *mailbox, const void *resp);
+ /*
+ * Handles the asynchronous response which arrives well. How to handle it depends on the
+ * chip implementation. However, @awaiter should be released by calling the
+ * `gcip_mailbox_release_awaiter` function when the kernel driver doesn't need
+ * @awaiter anymore. This is called with the `wait_list_lock` being held.
+ * Context: normal and in_interrupt().
+ */
+ void (*handle_awaiter_arrived)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
+ /*
+ * Handles the timed out asynchronous response. How to handle it depends on the chip
+ * implementation. However, @awaiter should be released by calling the
+ * `gcip_mailbox_release_awaiter` function when the kernel driver doesn't need
+ * @awaiter anymore. This is called without holding any locks.
+ * Context: normal and in_interrupt().
+ */
+ void (*handle_awaiter_timedout)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
+ /*
+ * Cleans up asynchronous response which is not arrived yet, but also not timed out.
+ * The @awaiter should be marked as unprocessable to make it not to be processed by
+ * the `handle_awaiter_arrived` or `handle_awaiter_timedout` callbacks in race
+ * conditions. Don't have to release @awaiter of this function by calling the
+ * `gcip_mailbox_release_awaiter` function. It will be released internally. This is
+ * called with the `wait_list_lock` being held.
+ * Context: normal.
+ */
+ void (*flush_awaiter)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
+ /*
+ * Releases the @data which was passed to the `gcip_mailbox_put_cmd` function. This is
+ * called without holding any locks.
+ * Context: normal and in_interrupt().
+ */
+ void (*release_awaiter_data)(void *data);
+};
+
+struct gcip_mailbox {
+ /* Device used for logging and memory allocation. */
+ struct device *dev;
+ /* Warp bit for both cmd and resp queue. */
+ u64 queue_wrap_bit;
+ /* Cmd sequence number. */
+ u64 cur_seq;
+
+ /* Cmd queue pointer. */
+ void *cmd_queue;
+ /* Size of element of cmd queue. */
+ u32 cmd_elem_size;
+
+ /* Resp queue pointer. */
+ void *resp_queue;
+ /* Size of element of resp queue. */
+ u32 resp_elem_size;
+
+ /* List of commands that need to wait for responses. */
+ struct list_head wait_list;
+ /* Queue for waiting for the wait_list to be consumed. */
+ wait_queue_head_t wait_list_waitq;
+
+ /* Mailbox timeout in milliseconds. */
+ u32 timeout;
+ /* Mailbox operators. */
+ const struct gcip_mailbox_ops *ops;
+ /* User-defined data. */
+ void *data;
+
+ /*
+ * The flag to specify sequence numbers of command responses are not
+ * required to be in order.
+ */
+ bool ignore_seq_order;
+};
+
+/* Arguments for gcip_mailbox_init. See struct gcip_mailbox for details. */
+struct gcip_mailbox_args {
+ struct device *dev;
+ u32 queue_wrap_bit;
+
+ void *cmd_queue;
+ u32 cmd_elem_size;
+
+ void *resp_queue;
+ u32 resp_elem_size;
+
+ u32 timeout;
+ const struct gcip_mailbox_ops *ops;
+ void *data;
+
+ bool ignore_seq_order;
+};
+
+/* Initializes a mailbox object. */
+int gcip_mailbox_init(struct gcip_mailbox *mailbox, const struct gcip_mailbox_args *args);
+
+/* Releases a mailbox object which is initialized by gcip_mailbox_init */
+void gcip_mailbox_release(struct gcip_mailbox *mailbox);
+
+/*
+ * Fetches and handles responses, then wakes up threads that are waiting for a response.
+ * To consume response queue and get responses, this function should be used as deferred work
+ * such as `struct work_struct` or `struct kthread_work`.
+ *
+ * Note: this worker is scheduled in the IRQ handler, to prevent use-after-free or race-condition
+ * bugs, cancel all works before free the mailbox.
+ */
+void gcip_mailbox_consume_responses_work(struct gcip_mailbox *mailbox);
+
+/*
+ * Pushes an element to cmd queue and waits for the response (synchronous).
+ * Returns -ETIMEDOUT if no response is received within mailbox->timeout msecs.
+ *
+ * Returns the code of response, or a negative errno on error.
+ * @resp is updated with the response, as to retrieve returned retval field.
+ */
+int gcip_mailbox_send_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp);
+
+/*
+ * Executes @cmd command asynchronously. This function returns an instance of
+ * `struct gcip_mailbox_resp_awaiter` which handles the arrival and time-out of the response.
+ * The implementation side can cancel the asynchronous response by calling the
+ * `gcip_mailbox_cancel_awaiter` or `gcip_mailbox_cancel_awaiter_timeout` function with it.
+ *
+ * Arrived asynchronous response will be handled by `handle_awaiter_arrived` callback and timed out
+ * asynchronous response will be handled by `handle_awaiter_timedout` callback. Those callbacks
+ * will pass the @awaiter as a parameter which is the same with the return of this function.
+ * The response can be accessed from `resp` member of it. Also, the @data passed to this function
+ * can be accessed from `data` member variable of it. The @awaiter must be released by calling
+ * the `gcip_mailbox_release_awaiter` function when it is not needed anymore.
+ *
+ * If the mailbox is released before the response arrives, all the waiting asynchronous responses
+ * will be flushed. In this case, the `flush_awaiter` callback will be called for that response
+ * and @awaiter don't have to be released by the implementation side.
+ * (i.e, the `gcip_mailbox_release_awaiter` function will be called internally.)
+ *
+ * The caller defines the way of cleaning up the @data to the `release_awaiter_data` callback.
+ * This callback will be called when the `gcip_mailbox_release_awaiter` function is called or
+ * the response is flushed.
+ *
+ * If this function fails to request the command, it will return the error pointer. In this case,
+ * the caller should free @data explicitly. (i.e, the callback `release_awaiter_data` will not
+ * be.)
+ *
+ * Note: the asynchronous responses fetched from @resp_queue should be released by calling the
+ * `gcip_mailbox_release_awaiter` function.
+ *
+ * Note: if the life cycle of the mailbox is longer than the caller part, you should make sure
+ * that the callbacks don't access the variables of caller part after the release of it.
+ *
+ * Note: if you don't need the result of the response (e.g., if you pass @resp as NULL), you
+ * can release the returned awaiter right away by calling the `gcip_mailbox_release_awaiter`
+ * function.
+ */
+struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
+ void *resp, void *data);
+
+/*
+ * Cancels awaiting the asynchronous response.
+ * This function will remove @awaiter from the waiting list to make it not to be handled by the
+ * arrived callback. Also, it will cancel the timeout work of @awaiter synchronously. Therefore,
+ * AFTER the return of this function, you can guarantee that arrived or timedout callback will
+ * not be called for @awaiter.
+ *
+ * However, by the race condition, you must note that arrived or timedout callback can be executed
+ * BEFORE this function returns. (i.e, this function and arrived/timedout callback is called at the
+ * same time but the callback acquired the lock earlier.)
+ *
+ * Note: this function will cancel or wait for the completion of arrived or timedout callbacks
+ * synchronously. Therefore, make sure that the caller side doesn't hold any locks which can be
+ * acquired by the arrived or timedout callbacks.
+ *
+ * If you already got a response of @awaiter and want to ensure that timedout handler is finished,
+ * you can use the `gcip_mailbox_cancel_awaiter_timeout` function instead.
+ */
+void gcip_mailbox_cancel_awaiter(struct gcip_mailbox_resp_awaiter *awaiter);
+
+/*
+ * Cancels the timeout work of the asynchronous response. In normally, the response arrives and
+ * the timeout is canceled, or the response timed out and the timeout handler executes. However,
+ * rarely, the response handler cancels the timeout handler while it has been already in progress.
+ * To handle this and ensure any in-process timeout handler has been able to exit cleanly, it is
+ * recommended to call this function after fetching the asynchronous response even though the
+ * response arrived successfully.
+ *
+ * Note: this function will cancel or wait for the completion of timedout callbacks synchronously.
+ * Therefore, make sure that the caller side doesn't hold any locks which can be acquired by the
+ * timedout callbacks.
+ *
+ * If you haven't gotten a response of @awaiter yet and want to make it not to be processed by
+ * arrived and timedout callbacks, use the `gcip_mailbox_cancel_awaiter` function.
+ */
+void gcip_mailbox_cancel_awaiter_timeout(struct gcip_mailbox_resp_awaiter *awaiter);
+
+/*
+ * Releases @awaiter. Every fetched (arrived or timed out) asynchronous responses should be
+ * released by calling this. It will call the `release_awaiter_data` callback internally.
+ */
+void gcip_mailbox_release_awaiter(struct gcip_mailbox_resp_awaiter *awaiter);
+
+/*
+ * Consume one response and handle it. This can be used for consuming one response quickly and then
+ * schedule `gcip_mailbox_consume_responses_work` work in the IRQ handler of mailbox.
+ */
+void gcip_mailbox_consume_one_response(struct gcip_mailbox *mailbox, void *resp);
+
+/* Getters for member variables of the `struct gcip_mailbox`. */
+
+static inline u64 gcip_mailbox_get_cur_seq(struct gcip_mailbox *mailbox)
+{
+ return mailbox->cur_seq;
+}
+
+static inline void *gcip_mailbox_get_cmd_queue(struct gcip_mailbox *mailbox)
+{
+ return mailbox->cmd_queue;
+}
+
+static inline u32 gcip_mailbox_get_cmd_elem_size(struct gcip_mailbox *mailbox)
+{
+ return mailbox->cmd_elem_size;
+}
+
+static inline void *gcip_mailbox_get_resp_queue(struct gcip_mailbox *mailbox)
+{
+ return mailbox->resp_queue;
+}
+
+static inline u32 gcip_mailbox_get_resp_elem_size(struct gcip_mailbox *mailbox)
+{
+ return mailbox->resp_elem_size;
+}
+
+static inline u64 gcip_mailbox_get_queue_wrap_bit(struct gcip_mailbox *mailbox)
+{
+ return mailbox->queue_wrap_bit;
+}
+
+static inline struct list_head *gcip_mailbox_get_wait_list(struct gcip_mailbox *mailbox)
+{
+ return &mailbox->wait_list;
+}
+
+static inline u32 gcip_mailbox_get_timeout(struct gcip_mailbox *mailbox)
+{
+ return mailbox->timeout;
+}
+
+static inline void *gcip_mailbox_get_data(struct gcip_mailbox *mailbox)
+{
+ return mailbox->data;
+}
+
+#endif /* __GCIP_MAILBOX_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-mem-pool.h b/gcip-kernel-driver/include/gcip/gcip-mem-pool.h
new file mode 100644
index 0000000..c770300
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-mem-pool.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A simple memory allocator to help allocating reserved memory pools.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_MEM_POOL_H__
+#define __GCIP_MEM_POOL_H__
+
+#include <linux/device.h>
+#include <linux/genalloc.h>
+#include <linux/types.h>
+
+struct gcip_mem_pool {
+ struct device *dev;
+ struct gen_pool *gen_pool;
+ phys_addr_t base_paddr;
+ size_t granule;
+};
+
+/*
+ * Initializes the memory pool object.
+ *
+ * @pool: The memory pool object to be initialized.
+ * @dev: Used for logging only.
+ * @base_paddr: The base physical address of the pool. Must be greater than 0 and a multiple of
+ * @granule.
+ * @size: The size of the pool. @size should be a multiple of @granule.
+ * @granule: The granule when invoking the allocator. Should be a power of 2.
+ *
+ * Returns 0 on success, a negative errno otherwise.
+ *
+ * Call gcip_mem_pool_exit() to release the resources of @pool.
+ */
+int gcip_mem_pool_init(struct gcip_mem_pool *pool, struct device *dev, phys_addr_t base_paddr,
+ size_t size, size_t granule);
+/*
+ * Releases resources of @pool.
+ *
+ * Note: you must release (by calling gcip_mem_pool_free) all allocations before calling this
+ * function.
+ */
+void gcip_mem_pool_exit(struct gcip_mem_pool *pool);
+
+/*
+ * Allocates and returns the allocated physical address.
+ *
+ * @size: Size to be allocated.
+ *
+ * Returns the allocated address. Returns 0 on allocation failure.
+ */
+phys_addr_t gcip_mem_pool_alloc(struct gcip_mem_pool *pool, size_t size);
+/*
+ * Returns the address previously allocated by gcip_mem_pool_alloc().
+ *
+ * The size and address must match what previously passed to / returned by gcip_mem_pool_alloc().
+ */
+void gcip_mem_pool_free(struct gcip_mem_pool *pool, phys_addr_t paddr, size_t size);
+
+/*
+ * Returns the offset between @paddr and @base_paddr passed to gcip_mem_pool_init().
+ *
+ * @paddr must be a value returned by gcip_mem_pool_alloc().
+ */
+static inline size_t gcip_mem_pool_offset(struct gcip_mem_pool *pool, phys_addr_t paddr)
+{
+ return paddr - pool->base_paddr;
+}
+
+#endif /* __GCIP_MEM_POOL_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-telemetry.h b/gcip-kernel-driver/include/gcip/gcip-telemetry.h
new file mode 100644
index 0000000..4556291
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-telemetry.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP telemetry: logging and tracing.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_TELEMETRY_H__
+#define __GCIP_TELEMETRY_H__
+
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/mutex.h>
+#include <linux/rwlock_types.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+/* Log level codes used by gcip firmware. */
+#define GCIP_FW_LOG_LEVEL_VERBOSE (2)
+#define GCIP_FW_LOG_LEVEL_DEBUG (1)
+#define GCIP_FW_LOG_LEVEL_INFO (0)
+#define GCIP_FW_LOG_LEVEL_WARN (-1)
+#define GCIP_FW_LOG_LEVEL_ERROR (-2)
+
+#define GCIP_FW_DMESG_LOG_LEVEL (GCIP_FW_LOG_LEVEL_WARN)
+
+/* When log data arrives, recheck for more log data after this delay. */
+#define GCIP_TELEMETRY_LOG_RECHECK_DELAY 200 /* ms */
+
+enum gcip_telemetry_state {
+ GCIP_TELEMETRY_DISABLED = 0,
+ GCIP_TELEMETRY_ENABLED = 1,
+ GCIP_TELEMETRY_INVALID = -1,
+};
+
+/* To specify the target of operation. */
+enum gcip_telemetry_type {
+ GCIP_TELEMETRY_LOG = 0,
+ GCIP_TELEMETRY_TRACE = 1,
+};
+
+struct gcip_telemetry_header {
+ u32 head;
+ u32 size;
+ u32 reserved0[14]; /* Place head and tail into different cache lines */
+ u32 tail;
+ u32 entries_dropped; /* Number of entries dropped due to buffer full */
+ u32 reserved1[14]; /* Pad to 128 bytes in total */
+};
+
+struct gcip_log_entry_header {
+ s16 code;
+ u16 length;
+ u64 timestamp;
+ u16 crc16;
+} __packed;
+
+struct gcip_telemetry {
+ /* Device used for logging and memory allocation. */
+ struct device *dev;
+
+ /*
+ * State transitioning is to prevent racing in IRQ handlers. e.g. the interrupt comes when
+ * the kernel is releasing buffers.
+ */
+ enum gcip_telemetry_state state;
+ spinlock_t state_lock; /* protects state */
+
+ struct gcip_telemetry_header *header;
+
+ struct eventfd_ctx *ctx; /* signal this to notify the runtime */
+ rwlock_t ctx_lock; /* protects ctx */
+ const char *name; /* for debugging */
+
+ struct work_struct work; /* worker for handling data */
+ /* Fallback function to call for default log/trace handling. */
+ void (*fallback_fn)(struct gcip_telemetry *tel);
+ struct mutex mmap_lock; /* protects mmapped_count */
+ long mmapped_count; /* number of VMAs that are mapped to this telemetry buffer */
+};
+
+struct gcip_kci;
+
+struct gcip_telemetry_kci_args {
+ struct gcip_kci *kci;
+ u64 addr;
+ u32 size;
+};
+
+/* Sends telemetry KCI through send kci callback and args. */
+int gcip_telemetry_kci(struct gcip_telemetry *tel,
+ int (*send_kci)(struct gcip_telemetry_kci_args *),
+ struct gcip_telemetry_kci_args *args);
+/* Sets the eventfd for telemetry. */
+int gcip_telemetry_set_event(struct gcip_telemetry *tel, u32 eventfd);
+/* Unsets the eventfd for telemetry. */
+void gcip_telemetry_unset_event(struct gcip_telemetry *tel);
+/* Fallback to log messages from host CPU to dmesg. */
+void gcip_telemetry_fw_log(struct gcip_telemetry *log);
+/* Fallback to consumes the trace buffer. */
+void gcip_telemetry_fw_trace(struct gcip_telemetry *trace);
+/* Interrupt handler to schedule the worker when the buffer is not empty. */
+void gcip_telemetry_irq_handler(struct gcip_telemetry *tel);
+/* Increases the telemetry mmap count. */
+void gcip_telemetry_inc_mmap_count(struct gcip_telemetry *tel, int dif);
+/* Mmaps the telemetry buffer through mmap callback and args. */
+int gcip_telemetry_mmap_buffer(struct gcip_telemetry *tel, int (*mmap)(void *), void *args);
+/*
+ * Initializes struct gcip_telemetry.
+ *
+ * @vaddr: Virtual address of the queue buffer.
+ * @size: Size of the queue buffer. Must be power of 2 and greater than the size of struct
+ * gcip_telemetry_header.
+ * @fallback_fn: Fallback function to call for default log/trace handling.
+ */
+int gcip_telemetry_init(struct device *dev, struct gcip_telemetry *tel, const char *name,
+ void *vaddr, const size_t size,
+ void (*fallback_fn)(struct gcip_telemetry *));
+/* Exits and sets the telemetry state to GCIP_TELEMETRY_INVALID. */
+void gcip_telemetry_exit(struct gcip_telemetry *tel);
+
+#endif /* __GCIP_TELEMETRY_H__ */