summaryrefslogtreecommitdiff
path: root/gcip-kernel-driver/drivers
diff options
context:
space:
mode:
authorAurora pro automerger <aurora-pro-automerger@google.com>2023-01-06 06:59:41 +0000
committerdavidchiang <davidchiang@google.com>2023-01-06 08:08:49 +0000
commit8a4b0d93e1f8b9ae23fb476eef4d43f18bc3cc1d (patch)
tree77a55bc81515aae0827a3cdb0f2ef0d68a46f677 /gcip-kernel-driver/drivers
parent9cc533bf4e533f90504fe355b5fcc726c1c549e0 (diff)
downloadgs201-8a4b0d93e1f8b9ae23fb476eef4d43f18bc3cc1d.tar.gz
gxp: [Copybara Auto Merge] Merge branch 'gs201-u' into 'android13-gs-pixel-5.10-udc'
gxp: amalthea uses different edgetpu path Bug: 263918299 gxp: fix uninit warn of gxp_debugfs_mailbox gcip: style fix up gcip: implement noncontiguous alloc Bug: 262684159 gcip: Change hard-coded magic numbers to MACROs Bug: 257300340 gcip: Update the size encoding of image config Bug: 257300340 (repeat) gcip: unittests: implement timeout race trigger Bug: 261822585 gcip: implement reference count to the awaiter Bug: 261822585 (repeat) gcip: implement gcip_mailbox_cancel_awaiter Bug: 261822585 (repeat) gcip: fix log style in gcip domain pool gcip: Add domain pool Bug: 228907682 gcip: init ignore_seq_order of KCI gcip: Use term awaiter instead of async_resp Bug: 249642792 gcip: constantize gcip_kci_args gcip: use rate limiting for reverse KCI failure warning gcip: remote unnecessary pointer cast gcip: Check telemetry buffer size gcip: Remove redundant error gcip: support arbitrary seq order commands Bug: 247414706 gcip: Add gcip-telemetry Bug: 239374826 gcip: use pad/pap instead of llx for logging gcip: implement gcip image config utilities Bug: 243500340 gcip: Don't pass mailbox param when calling the release_async_resp_data callback Bug: 239804137 gcip: Call release_data callback instead of ops->release_async_resp_data Bug: 239804137 (repeat) gcip: fix typo 'timedout' gcip: Implement gcip-firmware.c Bug: 239637765 gcip: Free arrived or timedout, but not handled async_resp Bug: 239010918 gcip: Set base_paddr of gcip_mem_pool Bug: 236673496 gcip: Adopt gcip-mailbox to gcip-kci Bug: 236679300 gcip: Implement the generalized mailbox (gcip-mailbox.c) Bug: 236679300 (repeat) gcip: Make gcip_kci_push_cmd static Bug: 223764481 gcip: implement gcip-mem-pool Bug: 236673496 (repeat) gcip: Add KCI support Bug: 223764481 (repeat) gcip: unittests: Add initial Kconfig and Makefile Bug: 222416109 gcip: Add initial Kconfig and Makefile Bug: 222416109 (repeat) GCIP_MAIN_REV_ID: a1895a61a873d1f52896f955e09d2e263b22a0e8 gxp: bump GXP version to 1.7 gxp: remove debugfs earlier than before_remove Bug: 263830035 gxp: adopt updated gxp_client_allocate_virtual_device gxp: debugfs utilizes UCI in the MCU mode Bug: 263830035 (repeat) gxp: call gxp_client_* funcs from firmware_run debugfs Bug: 263830035 (repeat) gxp: apply clang-format to gxp-debugfs.c gxp: update the interface of execute_cmd callback Bug: 263830035 (repeat) gxp: power off core on mbox allocation failure Bug: 264184974 gxp: Set special client ID to secure VD Bug: 263685745 gxp: Allocate secure VD Bug: 263836991 gxp: Add a secure VD field to struct gxp_dev Bug: 263685535 gxp: Add 'flags' to struct gxp_virtual_device_ioctl Bug: 263836981 gxp: Remove warnings during compilation Bug: 264010198 gxp: cleanup common platform probe Bug: 263844135 gxp: amalthea use GCIP Bug: 263918299 (repeat) gxp: update Makefile for kleaf support gxp: use PMU register Bug: 263830026 gxp: map private firmware data region Bug: 261797596 gxp: introduce gxp_dma_map_iova_sgt Bug: 262825536 gxp: allocate per VD firmware data Bug: 261797596 (repeat) gxp: Correct the lpm offsets calculations Bug: 263239197 gxp: Move shareability config with block power configuration Bug: 263310466 gxp: Enable new telemetry Bug: 247955426 gxp: enable dynamic slice index Bug: 255706432 gxp: Add support for core_boot insmod parameter Bug: 251612313 gxp: Add first_open support to VMBox Bug: 263296400 gxp: new UCI ioctl interfaces Bug: 261667704 gxp: remove unused virt_core in vd_suspend gxp: move system memory rates to config files Bug: 262964769 gxp: increase shared buffer size from 192K to 512K Bug: 262686729 gxp: introduce wait_queue for waiting responses Bug: 261822585 (repeat) gxp: rename queue to dest_queue Bug: 261822585 (repeat) gxp: adopt gxp-mcu-platform Bug: 249918544 gxp: revert tpu mapped checking logic gxp: Add telemetry buffer size fields to specs ioctl Bug: 259404526 gxp: fix error handling of gxp_map_tpu_mbx_queue gxp: fix logic of checking tpu_file when destroying clients gxp: europa: add mailbox array csrs Bug: 261150717 gxp: Implement mmap for new telemetry flow. Bug: 259404466 gxp: add 'features' field to specs Bug: 256073820 gxp: fix up reserved fields in gxp_specs_ioctl gxp: Add missing include Bug: 261550238 gxp: Apply gcip domain pool Bug: 228907682 (repeat) gxp: Move mailbox CSR based calls to chip specific file Bug: 261150717 (repeat) gxp: Make mailbox csr offset chip specific Bug: 261150717 (repeat) gxp: Allocate telemetry buffers during init Bug: 247955426 (repeat) gxp: introduce gxp-mcu-fs Bug: 249918544 (repeat) gxp: move client_has_vd check to client.c Bug: 249918544 (repeat) gxp: use entry point in elf header instead of fixed IOVA Bug: 260647260 gxp: Add _legacy postfix to telemetry function Bug: 247955426 (repeat) gxp: Refactor LPM to accommodate new chips Bug: 254168276 gxp: introduce gxp_pm_update_pm_qos Bug: 239638427 gxp: map tpu mbx queue even in MCU mode Bug: 237624453 gxp: fetch segment boundaries from ELF header Bug: 259197130 gxp: add ifdef guard for edgetpu_ext interface Bug: 259357745 gxp: set VD credit to 256 Bug: 257137038 gxp: Indroduce config-pwr-state.h Bug: 258154981 gxp: add cmd credit to virtual device Bug: 257137038 (repeat) gxp: fix the default domain cache gxp: revert ignore_seq_order settings gxp: Map TPU mbx only in the direct mode Bug: 249440369 gxp: Forward the thermal request to MCU when possible Bug: 255613035 gxp: Introduce gxp_wakelock_acquire_if_powered Bug: 255613035 (repeat) gxp: Add GXP_HAS_MCU config gxp: Roll back aur_power_state2rate changes in thermal gxp: Use aur_power_state2rate for thermal gxp: Fix reader/writer lock for vd allocation Bug: 257049139 gxp: Add a static debug pointer to driver state Bug: 255451381 gxp: fixup headers in platform.c gxp: remove obsolete ACPI support Bug: b/230701592 gxp: Add namespace import for DMA_BUF for 5.16 Bug: 232003048 gxp: Handle Core TelemetryRead rcki Bug: 249096610 gxp: Add GXP_HAS_LAP to config Bug: 249227451 gxp: remove explicit values of LPM PSM enum gxp: temporarily set slice_index to 0 Bug: 255706432 (repeat) Bug: 242011394 gxp: do power votes in VD wakelock acquisition Bug: 253990922, 253555787 gxp: Remove redundant state settings Bug: 189396709 gxp: LPM interfaces accepts enum psm Bug: 254168276 (repeat) gxp: Reduce mailbox timeout to 1s Bug: 250265514 gxp: Adopt awaiter of GCIP Bug: 249642792 (repeat) gxp: disable core interrupts before core releasing Bug: 252915360 gxp: remove response consuming callbacks Bug: 245455607 gxp: abstract GCIP send/put funcs from gxp-mailbox Bug: 245455607 (repeat) gxp: re-purpose GXP_LEGACY_MAILBOX Bug: 245455607 (repeat) gxp: use const args for mailbox init gxp: bump uapi version to 1.5 gxp: remove unnecessary coherent buf cast gxp: fix typo an core gxp: Call MCU telemetry irq handler when KCI irq is fired BUg: 237099103 gxp: move DVFS macro out from lpm.h gxp: use BIT() for shifting operations gxp: introduce coherent buffer data structure Bug: 248436918 gxp: add europa platform driver Bug: 235918085 gxp: address review comments on gdomain alloc gxp: store client IDs in gxp_virtual_device Bug: 246520556 gxp: clang-format gxp.h gxp: introduce {after,before}_{map,unmap}_tpu_mbx_queue callbacks Bug: 246520556 (repeat) gxp: call GCIP consume funcs from gxp-mailbox Bug: 245455607 (repeat) gxp: manage gcip_{mailbox,kci} from gxp_mailbox Bug: 245455607 (repeat) gxp: pass data size and wrap bit to gxp_mailbox_args Bug: 245455607 (repeat) gxp: pass GCIP operators to gxp_mailbox Bug: 245455607 (repeat) gxp: introduce enum gxp_mailbox_type Bug: 245455607 (repeat) gxp: call exposed response consuming funcs of gxp-mailbox-impl.h Bug: 245455607 (repeat) gxp: expose response consuming functions to gxp-mailbox-impl.h Bug: 245455607 (repeat) gxp: Add MCU telemetry support Bug: 237099103 (repeat) gxp: Add chip specific mmap handler Bug: 237099103 (repeat) gxp: Prefix the telemetry with core Bug: 237099103 (repeat) gxp: Rename gxp-telemetry to gxp-core-telemetry Bug: 237099103 (repeat) gxp: add wrappers around iommu domain Bug: 248436918 (repeat) gxp: introduce gxp-mailbox-impl.[c|h] Bug: 237908534 gxp: Hide Amalthea only things of gxp-mailbox from Callisto Bug: 237908534 (repeat) gxp: introduce GXP_LEGACY_MAILBOX Bug: 245455607 (repeat) gxp: return error when gxp_fw_data_create_app fails Bug: 249402363 gxp: fix memory leak on VD allocate resp queues Bug: 247662695 gxp: Wait for PS0 before powering down BLK_AUR Bug: 247273478 gxp: Enable best-fit IOVA allocator Bug: 241190719 gxp: remove compat ioctl interfaces gxp: remove cache invalidate of telemetry buffers Bug: 247772036 Bug: 245238253 gxp: cache invalidate on signal telemetry eventfd Bug: 247772036 (repeat) gxp: fix passing a NULL pointer to the gxp_vd_block_ready Bug: 247660434 gxp: Sort the GXP_IOCTL defines gxp: accept finalizing non-initalized modules Bug: 245690393 gxp: introduce IS_GXP_TEST Bug: 245690393 (repeat) gxp: define chip_rev module param gxp: sync with the fake core firmware Bug: 245270826 gxp: invalidate cache before fetching responses Bug: 242326098 gxp: make load_dsp_firmware return error properly Bug: 245270826 (repeat) gxp: support VD suspend/resume in Zuma direct mode Bug: 244699959 gxp: bump version to 1.4 Revert "gxp: Update gxp.h to reflect OFF being allowed in mbox IOCTL" Revert "gxp: remove GXP_POWER_STATE_OFF check in mbox cmd" gxp: Removing printing the log buff Bug: 244270315 gxp: support both coherent/non-coherent mappings Bug: 205831251 gxp: fix the unused warning on gem5 platform gxp: Update gxp.h to reflect OFF being allowed in mbox IOCTL Bug: 243737206 gxp: add wakelock_{after,before}_blk_{on,off} Bug: 241044848 gxp: Attach device after iommu domain allocated Bug: 243328707 gxp: Introduce {after,before}_vd_block_{ready,unready} callbacks Bug: 241057541 gxp: Rollback {after,before}_{allocate,release}_vd callbacks Bug: 241057541 (repeat) gxp: Introduce the `gxp_vd_block_unready` function. Bug: 241057541 (repeat) gxp: Make the `gxp_vd_stop` can work with READY state Bug: 241057541 (repeat) gxp: wrap power states into a struct Bug: 242155424 gxp: fix various typos gxp: Protect telemetry status in vd alloc/release Bug: 242145264 gxp: Fail to allocate VD if initial mapping fails Bug: 242145264 (repeat) gxp: Use one gxp_mailbox_create_manager Bug: 242939166 gxp: Extract mailbox manager from gxp-mailbox.h Bug: 242939166 (repeat) gxp: Implement the mailbox manager operator setting function of UCI Bug: 242178774 gxp: Add init functions for the legacy mailbox and DCI Bug: 242178774 (repeat) gxp: Introduce `GXP_HAS_DCI` definition Bug: 242964051 gxp: fix error handling on probing gxp: append internal headers after EXTRA_CFLAGS Bug: 242960640 gxp: remove GXP_POWER_STATE_OFF check in mbox cmd gxp: Add power states requests callback Bug: 241782481 gxp: Allocate resources on VD creation Bug: 241206240 gxp: handle VD allocation in gxp-client.c Bug: 241206240 (repeat) gxp: handle wakelock request in gxp-client.c Bug: 241206240 (repeat) gxp: gxp_mapping_create accepts domain Bug: 240415495 gxp: split out gxp_vd & gxp_fw module init/destroy Bug: 193180931 gxp: fixup DVFS requests Bug: 242785262 gxp: Only (un)map telem if enabled on vd start/stop Bug: 241090227 gxp: release TPU file after VD stop Bug: 241085004 gxp: increase the ref to TPU FD on TPU buffer map Bug: 241085004 (repeat) gxp: misc_register as the last step of probe Bug: 241718472 gxp: remove unneeded devm_kfree Bug: 241517691 gxp: Cancel last worker when power req queue is full Bug: 240533763 gxp: Map slice of shared buffer to IOMMU domain instead of whole of it Bug: 240128962 gxp: Allocate slice_index when allocating virtual device Bug: 240128962 (repeat) gxp: Add ID allocator for the slice index of shared buffer Bug: 240128962 (repeat) gxp: Introduce slice_index of the virtual device Bug: 240128962 (repeat) gxp: unittests: Introduce gxp-debug-dump-test.c and enable debug dump test Bug: 241086197 Bug: 234892966 gxp: remove domain attach on VD creation Bug: 241057799 gxp: Add `num_cores` in command ioctls for UCI Bug: 232310140, 237660465 gxp: Disallow /d/gxp/firmware_run if VDs are running Bug: 240764261 gxp: remove unneeded warnings from in PM gxp: Review feedback from 7/21 release Bug: 240315433 gxp: work around thermal_cdev_update Bug: 235433985 gxp: remove virt_core_list from gxp_dmabuf_map Bug: 240661491 gxp: remove virt_core_list from gxp_mapping Bug: 240661491 (repeat) gxp: add gxp_vd_phys_core_list gxp: remove ZEBU_SYSMMU_WORKAROUND gxp: Replace (resp_queue, eventfd) params with (client, virt_core) of async ops gxp: remove virt_core_list from tpu_mbx_desc gxp: check VD is non-null in gxp_client_destroy gxp: remove core_domains from vd Bug: 240415495 (repeat) gxp: remove gxp_vd_phys_core_to_virt_core gxp: clean up gxp-dma.h descriptions Bug: 240415495 (repeat) gxp: dma_map_tpu_buffer accepts domain Bug: 240415495 (repeat) gxp: gxp_dma_alloc_coherent takes domain Bug: 240415495 (repeat) gxp: map_allocated_coherent_buffer takes domain Bug: 240415495 (repeat) gxp: dma_map_dmabuf accepts domain as the arg Bug: 240415495 (repeat) gxp: dma_map_sg accepts domain as arg Bug: 240415495 (repeat) gxp: dma_map_core_resources accepts domain Bug: 240415495 (repeat) gxp: dma_domain_attach_device accepts domain Bug: 240415495 (repeat) gxp: Add callbacks for after allocating / before releasing the vd Bug: 237955391 gxp: program SSMT to the first domain Bug: 240415495 (repeat) gxp: deprecate virtual_core_list Bug: 240514360 gxp: map telemetry buffers to all domains Bug: 240415495 (repeat) gxp: map all core resources to all domains in VD Bug: 240415495 (repeat) gxp: remove the core parameter from dbg dump buf gxp: Map telemetry buffs before writing descriptor Bug: 239640408 gxp: Decide generating interrupt after allocating mailbox according to interface gxp: Add common operators of gcip-mailbox-ops to the gxp-mailbox-driver gxp: Merge resp_queue wait macro into the gxp-mailbox.h gxp: fix typos in comments Bug: 240315433 (repeat) gxp: rename callisto.ko to gxp.ko Bug: 240212593 gxp: Cleanup FW data on VD creation failure Bug: 240192343 gxp: remove the core number patch on gem5 platform Bug: 239908693 gxp: edgetpu symbol path according to target chip Bug: 239766974 gxp: warn deprecate pwr states only once Bug: 237337595 gxp: don't modify clkmux state during core booting Bug: 238960149 gxp: Map mailboxes for IOVA of KCI/UCI Bug: 228401855 gxp: Introduce gxp-usage-stats.c and complete update_usage KCI command Bug: 237967242 gxp: Introduce gxp-kci.c Bug: 228401855 (repeat) gxp: Introduce gxp-dci.c Bug: 236332988 gxp: Remove the dependency of gcip-mailbox from Amalthea Bug: 237908672 gxp: Introduce gxp_mailbox_args Bug: 236332988 (repeat) gxp: Introduce gxp_mailbox_ops Bug: 236332988 (repeat) gxp: Apply gcip-mailbox to gxp-mailbox Bug: 236679300 (repeat) gxp: Apply abstracted mailbox functions Bug: 237908672 (repeat) gxp: Abstracts mailbox related functions into the mailbox manager Bug: 237908672 (repeat) gxp: temporarily attach a domain while VD creating Bug: 232310140 gxp: add interface to override ioctl handlers gxp: implement UCI Bug: 232310140 (repeat) gxp: fix OOT build for amalthea gxp: increase the waiting time for cores booting Bug: 237378056 gxp: fix deadlock on power states queue full Bug: 236087752 gxp: parallelize the firmware startup process Bug: 207036666 gxp: Fix bug when clearing FW buffers on auth fail Bug: 237789581 gxp: firmware load should use configured name gxp: add gcip include for GKI kernel gxp: deprecate NON_AGGRESSOR / add LOW_FREQ_CLKMUX Bug: 237337595 (repeat) Bug: 237378056 (repeat) gxp: Disable telemetry before free Bug: 235771175 gxp: hold mmap lock around call to find_extend_vma Bug: 237404338 gxp: Don't change doorbell mask for running cores Bug: 235447772 gxp: compile gcip as an object Bug: 234674944 gxp: map shared buffer region Bug: 237262124 gxp: add gcip support to Makefile Bug: 234674944 (repeat) gxp: move firmware default name to chip config Bug: 234261504 gxp: Rename gxp-hw-mailbox-driver.c to gxp-mailbox-driver.c Bug: 236431904 gxp: Move increasing queue head / tail functions to gxp-hw-mailbox-driver.c Bug: 236431904 (repeat) gxp: Move setting queue head / tail functions to gxp-hw-mailbox-driver.c Bug: 236431904 (repeat) gxp: Move circ queue cnt / inc functions to gxp-hw-mailbox-driver.c Bug: 236431904 (repeat) gxp: Apply clang-format on the mailbox sources Bug: 236431904 (repeat) gxp: propagate GXP_PLATFORM option to Kbuild gxp: optional chip scratchpad region gxp: gxp-platform adopt common-platform.c Bug: 232894415 gxp: correct MAILBOX_DEVICE_INTERFACE_OFFSET for gem5 gxp: define ZEBU_SYSMMU_WORKAROUND in GEM5 and ZEBU_IP platform gxp: adjust Makefile for gem5 & kernel 5.13 gxp: add a parameter to specify callisto mode Bug: 235193368 gxp: introduce GXP_NUM_MAILBOXES config Bug: 235173180 gxp: remove redundant checks for mailbox mgr gxp: remove unused DMA functions Bug: 235212179 gxp: avoid holding semaphore during mmap Bug: 232183143 gxp: move iova.h to config files Bug: 235173809 gxp: Remove CONFIG_GXP_TEST from the SSMT related codes Bug: 201505925 gxp: lpm enable PS1 only for core CSRs gxp: add an MCU structure Bug: 229587136 gxp: define core CSR addresses as macros Bug: 234705757 gxp: Add option to disable DSP FW auth Bug: 235135800 gxp: Fix hangs and OOB writes when auth fails Bug: 234947988 gxp: Adjust the per-core FW size from 16MB to 1MB Bug: 228277106 gxp: Add enable_debug_dump argument for insmod Bug: 234529356 gxp: Dynamically allocate memory for debug dump buffer Bug: 234529355 gxp: Trigger debug dump only when firmware is up and running Bug: 233660431 Bug: 233607168 gxp: remove support for unsigned firmware Bug: 220246540 gxp: Expose additional DVFS states in power APIs Bug: 233929549 gxp: introduce a gxp_firmware_manager Bug: 230682809 gxp: remove CONFIG_GXP_CLOUDRIPPER gitignore: ignore .repo and gcip-kernel-driver Bug: 234674944 (repeat) gxp: set VID for both SSMTs and possible SIDs gxp: move LPM_TOP_PSM to config files Bug: 232471681 gxp: move SID of core macros to config files Bug: 233989804 gxp: different mailbox offset for platforms Bug: 233887617 gxp: authenticate firmware after requesting it Bug: 232715929 gxp: dma-iommu adopts gxp-ssmt interface Bug: 233989804 (repeat) gxp: add standalone ssmt driver Bug: 233989804 (repeat) gxp: add ifdef guard to disable suspend Bug: 234096867 gxp: set VD state in vd_stop Bug: 234096867 (repeat) gxp: Add lockdep checks to gxp-telemetry.c gxp: add log on probe success Bug: 233887617 (repeat) gxp: add tgid to client tracking and suspend block reporting Bug: 230656700 Revert "gxp: send UUD request before shutting down AUR_BLK" gxp: send UUD request before shutting down AUR_BLK Bug: 233584605 gxp: add parse_dt callback for device probe Bug: 232894415 (repeat) gxp: remove unrequired CORE_SCRATCHPAD_BASE offset Bug: 233381187 gxp: Switch the boot mode storage from CSRs into the scratchpad space Bug: 233381187 (repeat) gxp: Refactor FW boot mode into its own functions Bug: 233381187 (repeat) gxp: Fix locking in gxp_telemetry_disable() Bug: 232876605 gxp: initial gxp mcu firmware support Bug: 229587136 (repeat) gxp: add callbacks for common probing / removal Bug: 232894415 (repeat) gxp: introduce gxp-common-platform.c Bug: 232894415 (repeat) gxp: remove mm-backport.h gxp: Add NULL check of doorbells and barriers allocation Bug: 232612591 gxp: enhance Makefile for future chip support Bug: 230702544 gxp: debugfs use cmu.vaddr for accessing CMU CSRs gcip: add gcip-alloc-helper.h Bug: 262684159 (repeat) gcip: Update the comments in gcip-image-config for new encoding Bug: 257300340 (repeat) gcip: add reference count to the awaiter Bug: 261822585 (repeat) gcip: introduce gcip_mailbox_cancel_awaiter Bug: 261822585 (repeat) gcip: introduce gcip_kci_offload_chip_type Bug: 260690355 gcip: add {link,unlink}_offload_vmbox KCI codes Bug: 260690355 (repeat) gcip: Add domain pool Bug: 228907682 (repeat) gcip: Add remapped_data_{start,size} to image config Bug: 257212385 gcip: image header use unsigned fields gcip: add common authenticated image format header gcip: Use term awaiter instead of async_resp Bug: 249642792 (repeat) gcip: constantize gcip_kci_args gcip: support arbitrary seq order commands Bug: 247414706 (repeat) gcip: Add gcip-telemetry Bug: 239374826 (repeat) gcip: add gcip-image-config.h Bug: 243500340 (repeat) gcip: Add linux/workqueue.h as header gcip: Remove mailbox param from the release_async_resp_data callback Bug: 239804137 (repeat) gcip: Add release_data callback to the gcip_mailbox_async_response Bug: 239804137 (repeat) gcip: fix typo in gcip-mailbox.h gcip: fix various typos gcip: Fix reverse KCI codes Bug: 223764481 (repeat) gcip: Introduce gcip-firmware.h Bug: 239637765 (repeat) gcip: add gcip_mem_pool_offset gcip: Add KCI codes Bug: 223764481 (repeat) gcip: Remove the variables of gcip-kci.h which are moved to gcip-mailbox.h Bug: 236679300 (repeat) gcip: Add mailbox related structures and functions (gcip-mailbox.h) Bug: 236679300 (repeat) gcip: Add getters for gcip_kci Bug: 237785687 gcip: Make gcip_kci_push_cmd static Bug: 223764481 (repeat) gcip: add gcip memory pool Bug: 236673496 (repeat) gcip: Add KCI related structures and functions Bug: 223764481 (repeat) gcip: Add mailbox macro and enum Bug: 223764481 (repeat) gcip: Add circular queue mailbox helper functions Bug: 223764481 (repeat) GCIP_HEADERS_REV_ID: 37a282fd7aad536dc4521a908468bc9557911a19 gxp: Add a static debug pointer to driver state To assist with debugging of ramdumps where the GXP driver is not in the backstack, add a static symbol containing a pointer to the driver state so it can be located quickly in memory. Bug: 255451381 (repeat) gxp: Reduce mailbox timeout to 1s Reduce GXP timeout to allow for faster notification of failure since the longest any workloads are expected to run is ~500ms. Bug: 250265514 (repeat) GitOrigin-RevId: d7c38381aeae2ecc8b3b3f84abf45f1fe26edc4b Change-Id: Id0718e8bff32a18aff796dfd7779e2d61a6c4a64
Diffstat (limited to 'gcip-kernel-driver/drivers')
-rw-r--r--gcip-kernel-driver/drivers/gcip/Makefile22
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c93
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c101
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-firmware.c25
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-image-config.c220
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-kci.c525
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-mailbox.c680
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-mem-pool.c69
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-telemetry.c267
9 files changed, 2002 insertions, 0 deletions
diff --git a/gcip-kernel-driver/drivers/gcip/Makefile b/gcip-kernel-driver/drivers/gcip/Makefile
new file mode 100644
index 0000000..c3424ee
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for GCIP framework.
+#
+
+CONFIG_GCIP ?= m
+obj-$(CONFIG_GCIP) += gcip.o
+
+gcip-objs := gcip-alloc-helper.o gcip-domain-pool.o gcip-firmware.o \
+ gcip-image-config.o gcip-kci.o gcip-mailbox.o gcip-mem-pool.o \
+ gcip-telemetry.o
+
+CURRENT_DIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
+
+ccflags-y += -I$(CURRENT_DIR)/../../include
+
+ifdef CONFIG_GCIP_TEST
+obj-y += unittests/
+endif
+
+modules modules_install clean:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 $(KBUILD_OPTIONS) $(@)
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c b/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
new file mode 100644
index 0000000..33c95e2
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP helpers for allocating memories.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <asm/page.h>
+#include <linux/device.h>
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <gcip/gcip-alloc-helper.h>
+
+/*
+ * Set @pages to the pages @mem represents.
+ * @mem must be a pointer returned by vmalloc.
+ *
+ * Returns 0 on success, -ENOMEM when any page is NULL.
+ */
+static int gcip_vmalloc_to_pages(void *mem, size_t count, struct page **pages)
+{
+ size_t i = 0;
+
+ while (count--) {
+ pages[i] = vmalloc_to_page(mem);
+ if (!pages[i])
+ return -ENOMEM;
+ i++;
+ mem += PAGE_SIZE;
+ }
+ return 0;
+}
+
+struct sg_table *gcip_alloc_noncontiguous(struct device *dev, size_t size, gfp_t gfp)
+{
+ struct gcip_sgt_handle *sh = kmalloc(sizeof(*sh), gfp);
+ void *mem;
+ struct page **pages;
+ size_t count;
+ int ret;
+
+ if (!sh)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+ count = size >> PAGE_SHIFT;
+ mem = vzalloc_node(size, dev_to_node(dev));
+ if (!mem) {
+ dev_err(dev, "GCIP noncontiguous alloc size=%#zx failed", size);
+ goto err_free_sh;
+ }
+
+ pages = kmalloc_array(count, sizeof(*pages), gfp);
+ if (!pages) {
+ dev_err(dev, "GCIP alloc pages array count=%zu failed", count);
+ goto err_free_mem;
+ }
+
+ if (gcip_vmalloc_to_pages(mem, count, pages)) {
+ dev_err(dev, "convert memory to pages failed");
+ goto err_free_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(&sh->sgt, pages, count, 0, size, gfp);
+ if (ret) {
+ dev_err(dev, "alloc SG table with size=%#zx failed: %d", size, ret);
+ goto err_free_pages;
+ }
+
+ kfree(pages);
+ sh->mem = mem;
+ return &sh->sgt;
+
+err_free_pages:
+ kfree(pages);
+err_free_mem:
+ vfree(mem);
+err_free_sh:
+ kfree(sh);
+ return NULL;
+}
+
+void gcip_free_noncontiguous(struct sg_table *sgt)
+{
+ struct gcip_sgt_handle *sh = container_of(sgt, struct gcip_sgt_handle, sgt);
+
+ sg_free_table(&sh->sgt);
+ vfree(sh->mem);
+ kfree(sh);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c b/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c
new file mode 100644
index 0000000..2341b52
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP IOMMU domain allocator.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/iommu.h>
+#include <linux/vmalloc.h>
+
+#include <gcip/gcip-domain-pool.h>
+
+int gcip_domain_pool_init(struct device *dev, struct gcip_domain_pool *pool, unsigned int size)
+{
+ unsigned int i;
+ struct iommu_domain *domain;
+
+ pool->size = size;
+ pool->dev = dev;
+
+ if (!size)
+ return 0;
+
+ dev_dbg(pool->dev, "Initializing domain pool with %u domains\n", size);
+
+ ida_init(&pool->idp);
+ pool->array = vzalloc(sizeof(*pool->array) * size);
+ if (!pool->array) {
+ ida_destroy(&pool->idp);
+ return -ENOMEM;
+ }
+ for (i = 0; i < size; i++) {
+ domain = iommu_domain_alloc(dev->bus);
+ if (!domain) {
+ dev_err(pool->dev, "Failed to allocate iommu domain %d of %u\n", i + 1,
+ size);
+ gcip_domain_pool_destroy(pool);
+ return -ENOMEM;
+ }
+
+ pool->array[i] = domain;
+ }
+ return 0;
+}
+
+struct iommu_domain *gcip_domain_pool_alloc(struct gcip_domain_pool *pool)
+{
+ int id;
+
+ if (!pool->size)
+ return iommu_domain_alloc(pool->dev->bus);
+
+ id = ida_alloc_max(&pool->idp, pool->size - 1, GFP_KERNEL);
+
+ if (id < 0) {
+ dev_err(pool->dev, "No more domains available from pool of size %u\n", pool->size);
+ return NULL;
+ }
+
+ dev_dbg(pool->dev, "Allocated domain from pool with id = %d\n", id);
+
+ return pool->array[id];
+}
+
+void gcip_domain_pool_free(struct gcip_domain_pool *pool, struct iommu_domain *domain)
+{
+ int id;
+
+ if (!pool->size) {
+ iommu_domain_free(domain);
+ return;
+ }
+ for (id = 0; id < pool->size; id++) {
+ if (pool->array[id] == domain) {
+ dev_dbg(pool->dev, "Released domain from pool with id = %d\n", id);
+ ida_free(&pool->idp, id);
+ return;
+ }
+ }
+ dev_err(pool->dev, "Domain not found in pool\n");
+}
+
+void gcip_domain_pool_destroy(struct gcip_domain_pool *pool)
+{
+ int i;
+
+ if (!pool->size)
+ return;
+
+ dev_dbg(pool->dev, "Destroying domain pool with %u domains\n", pool->size);
+
+ for (i = 0; i < pool->size; i++) {
+ if (pool->array[i])
+ iommu_domain_free(pool->array[i]);
+ }
+
+ ida_destroy(&pool->idp);
+ vfree(pool->array);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-firmware.c b/gcip-kernel-driver/drivers/gcip/gcip-firmware.c
new file mode 100644
index 0000000..0b0225c
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-firmware.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP firmware interface.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <gcip/gcip-firmware.h>
+
+char *gcip_fw_flavor_str(enum gcip_fw_flavor fw_flavor)
+{
+ switch (fw_flavor) {
+ case GCIP_FW_FLAVOR_BL1:
+ return "stage 2 bootloader";
+ case GCIP_FW_FLAVOR_SYSTEST:
+ return "test";
+ case GCIP_FW_FLAVOR_PROD_DEFAULT:
+ return "prod";
+ case GCIP_FW_FLAVOR_CUSTOM:
+ return "custom";
+ case GCIP_FW_FLAVOR_UNKNOWN:
+ default:
+ return "unknown";
+ }
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-image-config.c b/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
new file mode 100644
index 0000000..312bbdc
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Framework for parsing the firmware image configuration.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <gcip/gcip-image-config.h>
+
+#define ADDR_SHIFT 12
+#define SIZE_MODE_BIT BIT(ADDR_SHIFT - 1)
+#define SECURE_SIZE_MASK (SIZE_MODE_BIT - 1u)
+#define NS_SIZE_MASK (BIT(ADDR_SHIFT) - 1u)
+#define ADDR_MASK ~(BIT(ADDR_SHIFT) - 1u)
+
+/* used by ns_iommu_mappings */
+#define CONFIG_TO_MBSIZE(a) (((a) & NS_SIZE_MASK) << 20)
+
+/* used by iommu_mappings */
+static inline __u32 config_to_size(__u32 cfg)
+{
+ __u32 page_size;
+
+ if (cfg & SIZE_MODE_BIT)
+ page_size = cfg & SECURE_SIZE_MASK;
+ else
+ page_size = BIT(cfg & SECURE_SIZE_MASK);
+
+ return page_size << PAGE_SHIFT;
+}
+
+static int setup_iommu_mappings(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ int i, ret;
+ dma_addr_t daddr;
+ size_t size;
+ phys_addr_t paddr;
+
+ for (i = 0; i < config->num_iommu_mappings; i++) {
+ daddr = config->iommu_mappings[i].virt_address;
+ if (unlikely(!daddr)) {
+ dev_warn(parser->dev, "Invalid config, device address is zero");
+ ret = -EIO;
+ goto err;
+ }
+ size = config_to_size(config->iommu_mappings[i].image_config_value);
+ paddr = config->iommu_mappings[i].image_config_value & ADDR_MASK;
+
+ dev_dbg(parser->dev, "Image config adding IOMMU mapping: %pad -> %pap", &daddr,
+ &paddr);
+
+ if (unlikely(daddr + size <= daddr || paddr + size <= paddr)) {
+ ret = -EOVERFLOW;
+ goto err;
+ }
+ ret = parser->ops->map(parser->data, daddr, paddr, size,
+ GCIP_IMAGE_CONFIG_FLAGS_SECURE);
+ if (ret) {
+ dev_err(parser->dev,
+ "Unable to Map: %d dma_addr: %pad phys_addr: %pap size: %#lx\n",
+ ret, &daddr, &paddr, size);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ daddr = config->iommu_mappings[i].virt_address;
+ size = config_to_size(config->iommu_mappings[i].image_config_value);
+ parser->ops->unmap(parser->data, daddr, size, GCIP_IMAGE_CONFIG_FLAGS_SECURE);
+ }
+ return ret;
+}
+
+static void clear_iommu_mappings(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ dma_addr_t daddr;
+ size_t size;
+ int i;
+
+ for (i = config->num_iommu_mappings - 1; i >= 0; i--) {
+ daddr = config->iommu_mappings[i].virt_address;
+ size = config_to_size(config->iommu_mappings[i].image_config_value);
+ dev_dbg(parser->dev, "Image config removing IOMMU mapping: %pad size=%#lx", &daddr,
+ size);
+ parser->ops->unmap(parser->data, daddr, size, GCIP_IMAGE_CONFIG_FLAGS_SECURE);
+ }
+}
+
+static int setup_ns_iommu_mappings(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ dma_addr_t daddr;
+ size_t size;
+ int ret, i;
+ phys_addr_t paddr = 0;
+
+ for (i = 0; i < config->num_ns_iommu_mappings; i++) {
+ daddr = config->ns_iommu_mappings[i] & ADDR_MASK;
+ if (unlikely(!daddr)) {
+ dev_warn(parser->dev, "Invalid config, device address is zero");
+ ret = -EIO;
+ goto err;
+ }
+ size = CONFIG_TO_MBSIZE(config->ns_iommu_mappings[i]);
+ dev_dbg(parser->dev, "Image config adding NS IOMMU mapping: %pad -> %pap", &daddr,
+ &paddr);
+ if (unlikely(daddr + size <= daddr || paddr + size <= paddr)) {
+ ret = -EOVERFLOW;
+ goto err;
+ }
+ ret = parser->ops->map(parser->data, daddr, paddr, size, 0);
+ if (ret)
+ goto err;
+ paddr += size;
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ size = CONFIG_TO_MBSIZE(config->ns_iommu_mappings[i]);
+ daddr = config->ns_iommu_mappings[i] & ADDR_MASK;
+ parser->ops->unmap(parser->data, daddr, size, 0);
+ }
+ return ret;
+}
+
+static void clear_ns_iommu_mappings(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ dma_addr_t daddr;
+ size_t size;
+ int i;
+
+ for (i = config->num_ns_iommu_mappings - 1; i >= 0; i--) {
+ size = CONFIG_TO_MBSIZE(config->ns_iommu_mappings[i]);
+ daddr = config->ns_iommu_mappings[i] & ADDR_MASK;
+ dev_dbg(parser->dev, "Image config removing NS IOMMU mapping: %pad size=%#lx",
+ &daddr, size);
+ parser->ops->unmap(parser->data, daddr, size, 0);
+ }
+}
+
+static int map_image_config(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ int ret = setup_ns_iommu_mappings(parser, config);
+
+ if (ret)
+ return ret;
+ if (gcip_image_config_is_ns(config)) {
+ ret = setup_iommu_mappings(parser, config);
+ if (ret)
+ clear_ns_iommu_mappings(parser, config);
+ }
+ return ret;
+}
+
+static void unmap_image_config(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ if (gcip_image_config_is_ns(config))
+ clear_iommu_mappings(parser, config);
+ clear_ns_iommu_mappings(parser, config);
+}
+
+int gcip_image_config_parser_init(struct gcip_image_config_parser *parser,
+ const struct gcip_image_config_ops *ops, struct device *dev,
+ void *data)
+{
+ if (!ops->map || !ops->unmap) {
+ dev_err(dev, "Missing mandatory operations for image config parser");
+ return -EINVAL;
+ }
+ parser->dev = dev;
+ parser->data = data;
+ parser->ops = ops;
+ memset(&parser->last_config, 0, sizeof(parser->last_config));
+ return 0;
+}
+
+int gcip_image_config_parse(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ int ret;
+
+ if (!memcmp(config, &parser->last_config, sizeof(*config)))
+ return 0;
+ unmap_image_config(parser, &parser->last_config);
+ ret = map_image_config(parser, config);
+ if (ret) {
+ dev_err(parser->dev, "Map image config failed: %d", ret);
+ /*
+ * Weird case as the mappings in the last config were just removed - might happen
+ * if the IOMMU driver state is corrupted. We can't help to rescue it so let's
+ * simply log a message.
+ */
+ if (unlikely(map_image_config(parser, &parser->last_config)))
+ dev_err(parser->dev, "Failed to roll back the last image config");
+ return ret;
+ }
+ memcpy(&parser->last_config, config, sizeof(parser->last_config));
+ return 0;
+}
+
+void gcip_image_config_clear(struct gcip_image_config_parser *parser)
+{
+ unmap_image_config(parser, &parser->last_config);
+ memset(&parser->last_config, 0, sizeof(parser->last_config));
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-kci.c b/gcip-kernel-driver/drivers/gcip/gcip-kci.c
new file mode 100644
index 0000000..15b2c53
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-kci.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kernel Control Interface, implements the protocol between AP kernel and GCIP firmware.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h> /* memcpy */
+
+#include <gcip/gcip-kci.h>
+#include <gcip/gcip-mailbox.h>
+
+static u32 gcip_kci_get_cmd_queue_head(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_cmd_queue_head(kci);
+}
+
+static u32 gcip_kci_get_cmd_queue_tail(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_cmd_queue_tail(kci);
+}
+
+static void gcip_kci_inc_cmd_queue_tail(struct gcip_mailbox *mailbox, u32 inc)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ lockdep_assert_held(&kci->cmd_queue_lock);
+ kci->ops->inc_cmd_queue_tail(kci, inc);
+}
+
+static int gcip_kci_acquire_cmd_queue_lock(struct gcip_mailbox *mailbox, bool try)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ mutex_lock(&kci->cmd_queue_lock);
+ return 1;
+}
+
+static void gcip_kci_release_cmd_queue_lock(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ mutex_unlock(&kci->cmd_queue_lock);
+}
+
+static u64 gcip_kci_get_cmd_elem_seq(struct gcip_mailbox *mailbox, void *cmd)
+{
+ struct gcip_kci_command_element *elem = cmd;
+
+ return elem->seq;
+}
+
+static u32 gcip_kci_get_cmd_elem_code(struct gcip_mailbox *mailbox, void *cmd)
+{
+ struct gcip_kci_command_element *elem = cmd;
+
+ return elem->code;
+}
+
+static void gcip_kci_set_cmd_elem_seq(struct gcip_mailbox *mailbox, void *cmd, u64 seq)
+{
+ struct gcip_kci_command_element *elem = cmd;
+
+ if (!(elem->seq & GCIP_KCI_REVERSE_FLAG))
+ elem->seq = seq;
+}
+
+static u32 gcip_kci_get_resp_queue_size(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_resp_queue_size(kci);
+}
+
+static u32 gcip_kci_get_resp_queue_head(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_resp_queue_head(kci);
+}
+
+static u32 gcip_kci_get_resp_queue_tail(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_resp_queue_tail(kci);
+}
+
+static void gcip_kci_inc_resp_queue_head(struct gcip_mailbox *mailbox, u32 inc)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ lockdep_assert_held(&kci->resp_queue_lock);
+ kci->ops->inc_resp_queue_head(kci, inc);
+}
+
+static int gcip_kci_acquire_resp_queue_lock(struct gcip_mailbox *mailbox, bool try)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ if (try)
+ return spin_trylock(&kci->resp_queue_lock);
+
+ spin_lock(&kci->resp_queue_lock);
+ return 1;
+}
+
+static void gcip_kci_release_resp_queue_lock(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ spin_unlock(&kci->resp_queue_lock);
+}
+
+static u64 gcip_kci_get_resp_elem_seq(struct gcip_mailbox *mailbox, void *resp)
+{
+ struct gcip_kci_response_element *elem = resp;
+
+ return elem->seq;
+}
+
+static void gcip_kci_set_resp_elem_seq(struct gcip_mailbox *mailbox, void *resp, u64 seq)
+{
+ struct gcip_kci_response_element *elem = resp;
+
+ elem->seq = seq;
+}
+
+static u16 gcip_kci_get_resp_elem_status(struct gcip_mailbox *mailbox, void *resp)
+{
+ struct gcip_kci_response_element *elem = resp;
+
+ return elem->status;
+}
+
+static void gcip_kci_set_resp_elem_status(struct gcip_mailbox *mailbox, void *resp, u16 status)
+{
+ struct gcip_kci_response_element *elem = resp;
+
+ elem->status = status;
+}
+
+static void gcip_kci_acquire_wait_list_lock(struct gcip_mailbox *mailbox, bool irqsave,
+ unsigned long *flags)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ if (irqsave)
+ spin_lock_irqsave(&kci->wait_list_lock, *flags);
+ else
+ spin_lock(&kci->wait_list_lock);
+}
+
+static void gcip_kci_release_wait_list_lock(struct gcip_mailbox *mailbox, bool irqrestore,
+ unsigned long flags)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ if (irqrestore)
+ spin_unlock_irqrestore(&kci->wait_list_lock, flags);
+ else
+ spin_unlock(&kci->wait_list_lock);
+}
+
+static int gcip_kci_wait_for_cmd_queue_not_full(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+ u32 tail = kci->ops->get_cmd_queue_tail(kci);
+ int ret;
+
+ ret = wait_event_timeout(kci->resp_doorbell_waitq,
+ kci->ops->get_cmd_queue_head(kci) !=
+ (tail ^ mailbox->queue_wrap_bit),
+ msecs_to_jiffies(mailbox->timeout));
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int gcip_kci_after_enqueue_cmd(struct gcip_mailbox *mailbox, void *cmd)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+ struct gcip_kci_command_element *elem = cmd;
+
+ kci->ops->trigger_doorbell(kci, GCIP_KCI_PUSH_CMD);
+ if (!(elem->seq & GCIP_KCI_REVERSE_FLAG))
+ return 1;
+ return 0;
+}
+
+static void gcip_kci_after_fetch_resps(struct gcip_mailbox *mailbox, u32 num_resps)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+ u32 size = kci->ops->get_resp_queue_size(kci);
+
+ /*
+ * We consumed a lot of responses - ring the doorbell of *cmd* queue to notify the firmware,
+ * which might be waiting us to consume the response queue.
+ */
+ if (num_resps >= size / 2)
+ kci->ops->trigger_doorbell(kci, GCIP_KCI_CONSUME_RESP);
+}
+
+/*
+ * Adds an incoming request from firmware to the circular buffer and schedules the work queue for
+ * processing.
+ */
+static int gcip_reverse_kci_add_resp(struct gcip_kci *kci,
+ const struct gcip_kci_response_element *resp)
+{
+ struct gcip_reverse_kci *rkci = &kci->rkci;
+ unsigned long head, tail, flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&rkci->producer_lock, flags);
+ head = rkci->head;
+ tail = READ_ONCE(rkci->tail);
+ if (CIRC_SPACE(head, tail, rkci->buffer_size) >= 1) {
+ rkci->buffer[head] = *resp;
+ smp_store_release(&rkci->head, (head + 1) & (rkci->buffer_size - 1));
+ schedule_work(&rkci->work);
+ } else {
+ ret = -ENOSPC;
+ }
+ spin_unlock_irqrestore(&rkci->producer_lock, flags);
+
+ return ret;
+}
+
+static bool gcip_kci_before_handle_resp(struct gcip_mailbox *mailbox, const void *resp)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+ const struct gcip_kci_response_element *elem = resp;
+
+ if (elem->seq & GCIP_KCI_REVERSE_FLAG) {
+ int ret = gcip_reverse_kci_add_resp(kci, elem);
+
+ if (ret)
+ dev_warn_ratelimited(kci->dev,
+ "Failed to handle reverse KCI code %u (%d)\n",
+ elem->code, ret);
+ return false;
+ }
+
+ return true;
+}
+
+static const struct gcip_mailbox_ops gcip_mailbox_ops = {
+ .get_cmd_queue_head = gcip_kci_get_cmd_queue_head,
+ .get_cmd_queue_tail = gcip_kci_get_cmd_queue_tail,
+ .inc_cmd_queue_tail = gcip_kci_inc_cmd_queue_tail,
+ .acquire_cmd_queue_lock = gcip_kci_acquire_cmd_queue_lock,
+ .release_cmd_queue_lock = gcip_kci_release_cmd_queue_lock,
+ .get_cmd_elem_seq = gcip_kci_get_cmd_elem_seq,
+ .set_cmd_elem_seq = gcip_kci_set_cmd_elem_seq,
+ .get_cmd_elem_code = gcip_kci_get_cmd_elem_code,
+ .get_resp_queue_size = gcip_kci_get_resp_queue_size,
+ .get_resp_queue_head = gcip_kci_get_resp_queue_head,
+ .get_resp_queue_tail = gcip_kci_get_resp_queue_tail,
+ .inc_resp_queue_head = gcip_kci_inc_resp_queue_head,
+ .acquire_resp_queue_lock = gcip_kci_acquire_resp_queue_lock,
+ .release_resp_queue_lock = gcip_kci_release_resp_queue_lock,
+ .get_resp_elem_seq = gcip_kci_get_resp_elem_seq,
+ .set_resp_elem_seq = gcip_kci_set_resp_elem_seq,
+ .get_resp_elem_status = gcip_kci_get_resp_elem_status,
+ .set_resp_elem_status = gcip_kci_set_resp_elem_status,
+ .acquire_wait_list_lock = gcip_kci_acquire_wait_list_lock,
+ .release_wait_list_lock = gcip_kci_release_wait_list_lock,
+ .wait_for_cmd_queue_not_full = gcip_kci_wait_for_cmd_queue_not_full,
+ .after_enqueue_cmd = gcip_kci_after_enqueue_cmd,
+ .after_fetch_resps = gcip_kci_after_fetch_resps,
+ .before_handle_resp = gcip_kci_before_handle_resp,
+};
+
+/*
+ * Pushes an element to cmd queue and waits for the response.
+ * Returns -ETIMEDOUT if no response is received within kci->mailbox.timeout msecs.
+ *
+ * Returns the code of response, or a negative errno on error.
+ * @resp is updated with the response, as to retrieve returned retval field.
+ */
+int gcip_kci_send_cmd_return_resp(struct gcip_kci *kci, struct gcip_kci_command_element *cmd,
+ struct gcip_kci_response_element *resp)
+{
+ int ret;
+
+ ret = gcip_mailbox_send_cmd(&kci->mailbox, cmd, resp);
+ if (ret || !resp)
+ return ret;
+
+ return resp->code;
+}
+
+int gcip_kci_send_cmd(struct gcip_kci *kci, struct gcip_kci_command_element *cmd)
+{
+ struct gcip_kci_response_element resp;
+
+ /* Don't wait on a response for reverse KCI response. */
+ if (cmd->seq & GCIP_KCI_REVERSE_FLAG)
+ return gcip_kci_send_cmd_return_resp(kci, cmd, NULL);
+ else
+ return gcip_kci_send_cmd_return_resp(kci, cmd, &resp);
+}
+
+/*
+ * Fetches and handles responses, then wakes up threads that are waiting for a response.
+ *
+ * Note: this worker is scheduled in the IRQ handler, to prevent use-after-free or race-condition
+ * bugs, gcip_kci_cancel_work_queues() must be called before free the mailbox.
+ */
+static void gcip_kci_consume_responses_work(struct work_struct *work)
+{
+ struct gcip_kci *kci = container_of(work, struct gcip_kci, work);
+
+ gcip_mailbox_consume_responses_work(&kci->mailbox);
+}
+
+/*
+ * IRQ handler of KCI mailbox.
+ *
+ * Consumes one response (if any) and puts gcip_kci_consume_responses_work() into the system work
+ * queue.
+ */
+void gcip_kci_handle_irq(struct gcip_kci *kci)
+{
+ struct gcip_kci_response_element resp;
+
+ /* Wakes up threads that are waiting for response doorbell to be rung. */
+ wake_up(&kci->resp_doorbell_waitq);
+
+ /*
+ * Quickly consumes one response, which should be enough for usual cases, to prevent the
+ * host from being too busy to execute the scheduled work.
+ */
+ gcip_mailbox_consume_one_response(&kci->mailbox, &resp);
+
+ schedule_work(&kci->work);
+}
+
+static void gcip_kci_update_usage_work(struct work_struct *work)
+{
+ struct gcip_kci *kci = container_of(work, struct gcip_kci, usage_work);
+
+ kci->ops->update_usage(kci);
+}
+
+void gcip_kci_update_usage_async(struct gcip_kci *kci)
+{
+ schedule_work(&kci->usage_work);
+}
+
+/* Removes one element from the circular buffer. */
+static int gcip_reverse_kci_remove_resp(struct gcip_reverse_kci *rkci,
+ struct gcip_kci_response_element *resp)
+{
+ unsigned long head, tail;
+ int ret = 0;
+
+ spin_lock(&rkci->consumer_lock);
+
+ /*
+ * Prevents the compiler from discarding and reloading its cached value additionally forces
+ * the CPU to order against subsequent memory references.
+ * Shamelessly stolen from:
+ * https://www.kernel.org/doc/html/latest/core-api/circular-buffers.html
+ */
+ head = smp_load_acquire(&rkci->head);
+ tail = rkci->tail;
+ if (CIRC_CNT(head, tail, rkci->buffer_size) >= 1) {
+ *resp = rkci->buffer[tail];
+ tail = (tail + 1) & (rkci->buffer_size - 1);
+ ret = 1;
+ smp_store_release(&rkci->tail, tail);
+ }
+ spin_unlock(&rkci->consumer_lock);
+ return ret;
+}
+
+/* Worker for incoming requests from firmware. */
+static void gcip_reverse_kci_work(struct work_struct *work)
+{
+ struct gcip_kci_response_element resp;
+ struct gcip_reverse_kci *rkci = container_of(work, struct gcip_reverse_kci, work);
+ struct gcip_kci *kci = container_of(rkci, struct gcip_kci, rkci);
+
+ while (gcip_reverse_kci_remove_resp(rkci, &resp))
+ kci->ops->reverse_kci_handle_response(kci, &resp);
+}
+
+/* Initializes the Reverse KCI handler. */
+static int gcip_reverse_kci_init(struct gcip_reverse_kci *rkci, struct device *dev, u32 buffer_size)
+{
+ if (rkci->buffer)
+ return 0;
+
+ rkci->buffer_size = buffer_size;
+ rkci->buffer = devm_kcalloc(dev, buffer_size, sizeof(*rkci->buffer), GFP_KERNEL);
+ if (!rkci->buffer)
+ return -ENOMEM;
+
+ spin_lock_init(&rkci->producer_lock);
+ spin_lock_init(&rkci->consumer_lock);
+ INIT_WORK(&rkci->work, gcip_reverse_kci_work);
+
+ return 0;
+}
+
+/* Verifies and sets the KCI operators. */
+static int gcip_kci_set_ops(struct gcip_kci *kci, const struct gcip_kci_ops *ops)
+{
+ if (!ops) {
+ kci->ops = NULL;
+ return 0;
+ }
+
+ if (!ops->get_cmd_queue_head || !ops->get_cmd_queue_tail || !ops->inc_cmd_queue_tail) {
+ dev_err(kci->dev, "Incomplete KCI CMD queue ops.\n");
+ return -EINVAL;
+ }
+
+ if (!ops->get_resp_queue_size || !ops->get_resp_queue_head || !ops->get_resp_queue_tail ||
+ !ops->inc_resp_queue_head) {
+ dev_err(kci->dev, "Incomplete KCI RESP queue ops.\n");
+ return -EINVAL;
+ }
+
+ if (!ops->trigger_doorbell) {
+ dev_err(kci->dev, "Incomplete KCI ops. Missing trigger_doorbell.\n");
+ return -EINVAL;
+ }
+
+ kci->ops = ops;
+
+ return 0;
+}
+
+/* Sets the KCI private data. */
+static inline void gcip_kci_set_data(struct gcip_kci *kci, void *data)
+{
+ kci->data = data;
+}
+
+int gcip_kci_init(struct gcip_kci *kci, const struct gcip_kci_args *args)
+{
+ int ret;
+ struct gcip_mailbox_args mailbox_args;
+
+ if (kci->ops)
+ return 0;
+
+ kci->dev = args->dev;
+ gcip_kci_set_data(kci, args->data);
+
+ ret = gcip_kci_set_ops(kci, args->ops);
+ if (ret)
+ goto err_unset_data;
+
+ ret = gcip_reverse_kci_init(&kci->rkci, kci->dev, args->rkci_buffer_size);
+ if (ret)
+ goto err_unset_ops;
+
+ mailbox_args.dev = args->dev;
+ mailbox_args.queue_wrap_bit = args->queue_wrap_bit;
+ mailbox_args.cmd_queue = args->cmd_queue;
+ mailbox_args.cmd_elem_size = sizeof(struct gcip_kci_command_element);
+ mailbox_args.resp_queue = args->resp_queue;
+ mailbox_args.resp_elem_size = sizeof(struct gcip_kci_response_element);
+ mailbox_args.timeout = args->timeout;
+ mailbox_args.ops = &gcip_mailbox_ops;
+ mailbox_args.data = kci;
+ mailbox_args.ignore_seq_order = false;
+
+ ret = gcip_mailbox_init(&kci->mailbox, &mailbox_args);
+ if (ret)
+ goto err_unset_ops;
+
+ mutex_init(&kci->cmd_queue_lock);
+ spin_lock_init(&kci->resp_queue_lock);
+ spin_lock_init(&kci->wait_list_lock);
+ init_waitqueue_head(&kci->resp_doorbell_waitq);
+ INIT_WORK(&kci->work, gcip_kci_consume_responses_work);
+ INIT_WORK(&kci->usage_work, gcip_kci_update_usage_work);
+
+ return 0;
+err_unset_ops:
+ gcip_kci_set_ops(kci, NULL);
+err_unset_data:
+ gcip_kci_set_data(kci, NULL);
+
+ return ret;
+}
+
+void gcip_kci_cancel_work_queues(struct gcip_kci *kci)
+{
+ cancel_work_sync(&kci->usage_work);
+ cancel_work_sync(&kci->work);
+ cancel_work_sync(&kci->rkci.work);
+}
+
+void gcip_kci_release(struct gcip_kci *kci)
+{
+ kci->rkci.buffer = NULL;
+ gcip_kci_set_ops(kci, NULL);
+ gcip_kci_set_data(kci, NULL);
+
+ /*
+ * Non-empty @kci->wait_list means someone (gcip_kci_send_cmd) is waiting for a response.
+ *
+ * Since this function should only be called when removing a device, it should be impossible
+ * to reach here with gcip_kci_send_cmd() is still waiting (rmmod should fail), add a simple
+ * check here so we can more easily figure it out when this happens.
+ */
+ if (!list_empty(gcip_kci_get_wait_list(kci)))
+ dev_warn(kci->dev, "KCI commands still pending.\n");
+ gcip_mailbox_release(&kci->mailbox);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
new file mode 100644
index 0000000..cbb3c80
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP Mailbox Interface.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h> /* memcpy */
+#include <linux/wait.h>
+
+#include <gcip/gcip-mailbox.h>
+
+#if IS_ENABLED(CONFIG_GCIP_TEST)
+#include "unittests/helper/gcip-mailbox-controller.h"
+
+#define TEST_TRIGGER_TIMEOUT_RACE(awaiter) gcip_mailbox_controller_trigger_timeout_race(awaiter)
+#else
+#define TEST_TRIGGER_TIMEOUT_RACE(...)
+#endif
+
+#define GET_CMD_QUEUE_HEAD() mailbox->ops->get_cmd_queue_head(mailbox)
+#define GET_CMD_QUEUE_TAIL() mailbox->ops->get_cmd_queue_tail(mailbox)
+#define INC_CMD_QUEUE_TAIL(inc) mailbox->ops->inc_cmd_queue_tail(mailbox, inc)
+#define ACQUIRE_CMD_QUEUE_LOCK(try) mailbox->ops->acquire_cmd_queue_lock(mailbox, try)
+#define RELEASE_CMD_QUEUE_LOCK() mailbox->ops->release_cmd_queue_lock(mailbox)
+
+#define GET_CMD_ELEM_SEQ(cmd) mailbox->ops->get_cmd_elem_seq(mailbox, cmd)
+#define SET_CMD_ELEM_SEQ(cmd, seq) mailbox->ops->set_cmd_elem_seq(mailbox, cmd, seq)
+#define GET_CMD_ELEM_CODE(cmd) mailbox->ops->get_cmd_elem_code(mailbox, cmd)
+
+#define GET_RESP_QUEUE_SIZE() mailbox->ops->get_resp_queue_size(mailbox)
+#define GET_RESP_QUEUE_HEAD() mailbox->ops->get_resp_queue_head(mailbox)
+#define INC_RESP_QUEUE_HEAD(inc) mailbox->ops->inc_resp_queue_head(mailbox, inc)
+#define GET_RESP_QUEUE_TAIL() mailbox->ops->get_resp_queue_tail(mailbox)
+#define ACQUIRE_RESP_QUEUE_LOCK(try) mailbox->ops->acquire_resp_queue_lock(mailbox, try)
+#define RELEASE_RESP_QUEUE_LOCK() mailbox->ops->release_resp_queue_lock(mailbox)
+
+#define GET_RESP_ELEM_SEQ(resp) mailbox->ops->get_resp_elem_seq(mailbox, resp)
+#define SET_RESP_ELEM_SEQ(resp, seq) mailbox->ops->set_resp_elem_seq(mailbox, resp, seq)
+#define GET_RESP_ELEM_STATUS(resp) mailbox->ops->get_resp_elem_status(mailbox, resp)
+#define SET_RESP_ELEM_STATUS(resp, status) mailbox->ops->set_resp_elem_status(mailbox, resp, status)
+
+#define ACQUIRE_WAIT_LIST_LOCK(irqsave, flags) \
+ mailbox->ops->acquire_wait_list_lock(mailbox, irqsave, flags)
+#define RELEASE_WAIT_LIST_LOCK(irqrestore, flags) \
+ mailbox->ops->release_wait_list_lock(mailbox, irqrestore, flags)
+
+struct gcip_mailbox_wait_list_elem {
+ struct list_head list;
+ void *resp;
+ struct gcip_mailbox_resp_awaiter *awaiter;
+};
+
+static void gcip_mailbox_awaiter_release(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ if (awaiter->release_data)
+ awaiter->release_data(awaiter->data);
+ kfree(awaiter);
+}
+
+static void gcip_mailbox_awaiter_dec_refs(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ if (refcount_dec_and_test(&awaiter->refs))
+ gcip_mailbox_awaiter_release(awaiter);
+}
+
+/*
+ * Removes the response previously pushed with gcip_mailbox_push_wait_resp().
+ *
+ * This is used when the kernel gives up waiting for the response.
+ */
+static void gcip_mailbox_del_wait_resp(struct gcip_mailbox *mailbox, void *resp)
+{
+ struct gcip_mailbox_wait_list_elem *cur;
+ unsigned long flags;
+ u64 cur_seq, seq = GET_RESP_ELEM_SEQ(resp);
+
+ ACQUIRE_WAIT_LIST_LOCK(true, &flags);
+
+ list_for_each_entry (cur, &mailbox->wait_list, list) {
+ cur_seq = GET_RESP_ELEM_SEQ(cur->resp);
+ if (cur_seq > seq)
+ break;
+ if (cur_seq == seq) {
+ list_del(&cur->list);
+ if (cur->awaiter) {
+ /* Remove the reference of the arrived handler. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ }
+ kfree(cur);
+ break;
+ }
+ }
+
+ RELEASE_WAIT_LIST_LOCK(true, flags);
+}
+
+/*
+ * Adds @resp to @mailbox->wait_list. If @awaiter is not NULL, the @resp is asynchronous.
+ * Otherwise, the @resp is synchronous.
+ *
+ * wait_list is a FIFO queue, with sequence number in increasing order.
+ *
+ * Returns 0 on success, or -ENOMEM if failed on allocation.
+ */
+static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
+ struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ struct gcip_mailbox_wait_list_elem *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ unsigned long flags;
+
+ if (!entry)
+ return -ENOMEM;
+
+ /* Increase a reference of arrived handler. */
+ if (awaiter)
+ refcount_inc(&awaiter->refs);
+
+ entry->resp = resp;
+ entry->awaiter = awaiter;
+ ACQUIRE_WAIT_LIST_LOCK(true, &flags);
+ list_add_tail(&entry->list, &mailbox->wait_list);
+ RELEASE_WAIT_LIST_LOCK(true, flags);
+
+ return 0;
+}
+
+/*
+ * Pushes @cmd to the command queue of mailbox and returns. @resp should be passed if the request
+ * is synchronous and want to get the response. If @resp is NULL even though the request is
+ * synchronous, the @cmd will be put into the queue, but the caller may not wait the response and
+ * ignore it. If the request is async, @awaiter should be passed too.
+ */
+static int gcip_mailbox_enqueue_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp,
+ struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ int ret = 0;
+ u32 tail;
+
+ ACQUIRE_CMD_QUEUE_LOCK(false);
+
+ SET_CMD_ELEM_SEQ(cmd, mailbox->cur_seq);
+ /*
+ * The lock ensures mailbox cmd_queue_tail cannot be changed by other processes (this
+ * method should be the only one to modify the value of tail), therefore we can remember
+ * its value here and use it in the condition of wait_event() call.
+ */
+ tail = GET_CMD_QUEUE_TAIL();
+
+ if (mailbox->ops->wait_for_cmd_queue_not_full) {
+ /* Wait until the cmd queue has a space for putting cmd. */
+ ret = mailbox->ops->wait_for_cmd_queue_not_full(mailbox);
+ if (ret)
+ goto out;
+ } else if (GET_CMD_QUEUE_HEAD() == (tail ^ mailbox->queue_wrap_bit)) {
+ /*
+ * Default logic of checking the fullness of cmd_queue. If the cmd_queue is full,
+ * it's up to the caller to retry.
+ */
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (resp) {
+ /* Adds @resp to the wait_list only if the cmd can be pushed successfully. */
+ SET_RESP_ELEM_SEQ(resp, GET_CMD_ELEM_SEQ(cmd));
+ SET_RESP_ELEM_STATUS(resp, GCIP_MAILBOX_STATUS_WAITING_RESPONSE);
+ ret = gcip_mailbox_push_wait_resp(mailbox, resp, awaiter);
+ if (ret)
+ goto out;
+ }
+ /* Size of cmd_queue is a multiple of mailbox->cmd_elem_size. */
+ memcpy(mailbox->cmd_queue + mailbox->cmd_elem_size *
+ CIRC_QUEUE_REAL_INDEX(tail, mailbox->queue_wrap_bit),
+ cmd, mailbox->cmd_elem_size);
+ INC_CMD_QUEUE_TAIL(1);
+ if (mailbox->ops->after_enqueue_cmd) {
+ ret = mailbox->ops->after_enqueue_cmd(mailbox, cmd);
+ if (ret < 0) {
+ /*
+ * Currently, as both DSP and EdgeTPU never return errors, do nothing
+ * here. We can decide later how to rollback the status such as
+ * `cmd_queue_tail` when the possibility of returning an error is raised.
+ */
+ dev_warn(mailbox->dev,
+ "after_enqueue_cmd returned an error, but not handled: ret=%d\n",
+ ret);
+ goto out;
+ }
+ mailbox->cur_seq += ret;
+ ret = 0;
+ } else
+ mailbox->cur_seq += 1;
+
+out:
+ RELEASE_CMD_QUEUE_LOCK();
+ if (ret)
+ dev_dbg(mailbox->dev, "%s: ret=%d", __func__, ret);
+
+ return ret;
+}
+
+/*
+ * Handler of a response.
+ * Pops the wait_list until the sequence number of @resp is found, and copies @resp to the found
+ * entry.
+ *
+ * Both entry in wait_list and response handling should have sequence number in increasing order.
+ * Comparing the #seq of head of wait_list with @resp->seq, we have three cases:
+ * 1. #seq > @resp->seq:
+ * - Nothing to do, @resp is not needed and we're done.
+ * 2. #seq == @resp->seq:
+ * - Copy @resp, pop the head and we're done.
+ * 3. #seq < @resp->seq:
+ * - If @mailbox->ignore_seq_order is specified, this is a normal case and the entry is skipped.
+ * - Otherwise, it *should* not happen, this implies the sequence number of either entries in
+ * wait_list or responses are out-of-order, or remote didn't respond to a command. In this
+ * case, the status of response will be set to GCIP_MAILBOX_STATUS_NO_RESPONSE. Then pop until
+ * case 1. or 2.
+ */
+static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *resp)
+{
+ struct gcip_mailbox_wait_list_elem *cur, *nxt;
+ struct gcip_mailbox_resp_awaiter *awaiter;
+ unsigned long flags;
+ u64 cur_seq, seq = GET_RESP_ELEM_SEQ(resp);
+
+ /* If before_handle_resp is defined and it returns false, don't handle the response */
+ if (mailbox->ops->before_handle_resp && !mailbox->ops->before_handle_resp(mailbox, resp))
+ return;
+
+ SET_RESP_ELEM_STATUS(resp, GCIP_MAILBOX_STATUS_OK);
+ ACQUIRE_WAIT_LIST_LOCK(true, &flags);
+
+ list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
+ cur_seq = GET_RESP_ELEM_SEQ(cur->resp);
+ if (cur_seq > seq) {
+ /*
+ * This response has already timed out and been removed
+ * from the wait list (or this is an invalid response).
+ * Drop it.
+ */
+ break;
+ }
+ if (cur_seq == seq) {
+ memcpy(cur->resp, resp, mailbox->resp_elem_size);
+ list_del(&cur->list);
+ if (cur->awaiter) {
+ awaiter = cur->awaiter;
+
+ /*
+ * The timedout handler will be fired, but pended by waiting for
+ * acquiring the wait_list_lock.
+ */
+ TEST_TRIGGER_TIMEOUT_RACE(awaiter);
+
+ /*
+ * If canceling timeout_work succeeded, we have to decrease the
+ * reference count here because the timeout handler will not be
+ * called. Otherwise, the timeout handler is already canceled or
+ * pending by race. If it is canceled, the count must be decreased
+ * already, and if it is pending, the timeout handler will decrease
+ * the awaiter reference.
+ */
+ if (cancel_delayed_work(&awaiter->timeout_work))
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+ /*
+ * If `handle_awaiter_arrived` callback is defined, @awaiter
+ * will be released from the implementation side. Otherwise, it
+ * should be freed from here.
+ */
+ if (mailbox->ops->handle_awaiter_arrived)
+ mailbox->ops->handle_awaiter_arrived(mailbox, awaiter);
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+ }
+ kfree(cur);
+ break;
+ }
+ if (!mailbox->ignore_seq_order && cur_seq < seq) {
+ SET_RESP_ELEM_STATUS(cur->resp, GCIP_MAILBOX_STATUS_NO_RESPONSE);
+ list_del(&cur->list);
+ if (cur->awaiter) {
+ /* Remove the reference of the arrived handler. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ }
+ kfree(cur);
+ }
+ }
+
+ RELEASE_WAIT_LIST_LOCK(true, flags);
+}
+
+/*
+ * Fetches elements in the response queue.
+ *
+ * Returns the pointer of fetched response elements.
+ * @total_ptr will be the number of elements fetched.
+ *
+ * Returns -ENOMEM if failed on memory allocation.
+ * Returns NULL if the response queue is empty or there is another worker fetching responses.
+ */
+static void *gcip_mailbox_fetch_responses(struct gcip_mailbox *mailbox, u32 *total_ptr)
+{
+ u32 head;
+ u32 tail;
+ u32 count;
+ u32 i;
+ u32 j;
+ u32 total = 0;
+ const u32 wrap_bit = mailbox->queue_wrap_bit;
+ const u32 size = GET_RESP_QUEUE_SIZE();
+ const u32 elem_size = mailbox->resp_elem_size;
+ void *ret = NULL; /* Array of responses. */
+ void *prev_ptr = NULL; /* Temporary pointer to realloc ret. */
+
+ /* Someone is working on consuming - we can leave early. */
+ if (!ACQUIRE_RESP_QUEUE_LOCK(true))
+ goto out;
+
+ head = GET_RESP_QUEUE_HEAD();
+ /* Loops until our head equals to CSR tail. */
+ while (1) {
+ tail = GET_RESP_QUEUE_TAIL();
+ /*
+ * Make sure the CSR is read and reported properly by checking if any bit higher
+ * than wrap_bit is set and if the tail exceeds resp_queue size.
+ */
+ if (unlikely(tail & ~CIRC_QUEUE_VALID_MASK(wrap_bit) ||
+ CIRC_QUEUE_REAL_INDEX(tail, wrap_bit) >= size)) {
+ dev_err_ratelimited(mailbox->dev, "Invalid response queue tail: %#x\n",
+ tail);
+ break;
+ }
+
+ count = gcip_circ_queue_cnt(head, tail, size, wrap_bit);
+ if (count == 0)
+ break;
+
+ prev_ptr = ret;
+ ret = krealloc(prev_ptr, (total + count) * elem_size, GFP_KERNEL);
+ /*
+ * Out-of-memory, we can return the previously fetched responses if any, or ENOMEM
+ * otherwise.
+ */
+ if (!ret) {
+ if (!prev_ptr)
+ ret = ERR_PTR(-ENOMEM);
+ else
+ ret = prev_ptr;
+ break;
+ }
+ /* Copies responses. */
+ j = CIRC_QUEUE_REAL_INDEX(head, wrap_bit);
+ for (i = 0; i < count; i++) {
+ memcpy(ret + elem_size * total, mailbox->resp_queue + elem_size * j,
+ elem_size);
+ j = (j + 1) % size;
+ total++;
+ }
+ head = gcip_circ_queue_inc(head, count, size, wrap_bit);
+ }
+ INC_RESP_QUEUE_HEAD(total);
+
+ RELEASE_RESP_QUEUE_LOCK();
+
+ if (mailbox->ops->after_fetch_resps)
+ mailbox->ops->after_fetch_resps(mailbox, total);
+out:
+ *total_ptr = total;
+ return ret;
+}
+
+/* Fetches one response from the response queue. */
+static int gcip_mailbox_fetch_one_response(struct gcip_mailbox *mailbox, void *resp)
+{
+ u32 head;
+ u32 tail;
+
+ if (!ACQUIRE_RESP_QUEUE_LOCK(true))
+ return 0;
+
+ head = GET_RESP_QUEUE_HEAD();
+ tail = GET_RESP_QUEUE_TAIL();
+ /* Queue empty. */
+ if (head == tail) {
+ RELEASE_RESP_QUEUE_LOCK();
+ return 0;
+ }
+
+ memcpy(resp,
+ mailbox->resp_queue + CIRC_QUEUE_REAL_INDEX(head, mailbox->queue_wrap_bit) *
+ mailbox->resp_elem_size,
+ mailbox->resp_elem_size);
+ INC_RESP_QUEUE_HEAD(1);
+
+ RELEASE_RESP_QUEUE_LOCK();
+
+ if (mailbox->ops->after_fetch_resps)
+ mailbox->ops->after_fetch_resps(mailbox, 1);
+
+ return 1;
+}
+
+/* Handles the timed out asynchronous commands. */
+static void gcip_mailbox_async_cmd_timeout_work(struct work_struct *work)
+{
+ struct gcip_mailbox_resp_awaiter *awaiter =
+ container_of(work, struct gcip_mailbox_resp_awaiter, timeout_work.work);
+ struct gcip_mailbox *mailbox = awaiter->mailbox;
+
+ /*
+ * This function will acquire the mailbox wait_list_lock. This means if
+ * response processing is in progress, it will complete before this
+ * response can be removed from the wait list.
+ *
+ * Once this function has the wait_list_lock, no future response
+ * processing will begin until this response has been removed.
+ */
+ gcip_mailbox_del_wait_resp(mailbox, awaiter->resp);
+
+ /*
+ * Handle timed out awaiter. If `handle_awaiter_timedout` is defined, @awaiter
+ * will be released from the implementation side. Otherwise, it should be freed from here.
+ */
+ if (mailbox->ops->handle_awaiter_timedout)
+ mailbox->ops->handle_awaiter_timedout(mailbox, awaiter);
+
+ /* Remove the reference of the timedout handler. */
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+}
+
+/* Cleans up all the asynchronous responses which are not responded yet. */
+static void gcip_mailbox_flush_awaiter(struct gcip_mailbox *mailbox)
+{
+ struct gcip_mailbox_wait_list_elem *cur, *nxt;
+ struct gcip_mailbox_resp_awaiter *awaiter;
+ struct list_head resps_to_flush;
+
+ /* If mailbox->ops is NULL, the mailbox is already released. */
+ if (!mailbox->ops)
+ return;
+
+ /*
+ * At this point only async responses should be pending. Flush them all
+ * from the `wait_list` at once so any remaining timeout workers
+ * waiting on `wait_list_lock` will know their responses have been
+ * handled already.
+ */
+ INIT_LIST_HEAD(&resps_to_flush);
+ ACQUIRE_WAIT_LIST_LOCK(false, NULL);
+ list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
+ list_del(&cur->list);
+ if (cur->awaiter) {
+ list_add_tail(&cur->list, &resps_to_flush);
+ /*
+ * Clear the response's destination queue so that if the
+ * timeout worker is running, it won't try to process
+ * this response after `wait_list_lock` is released.
+ */
+ awaiter = cur->awaiter;
+ if (mailbox->ops->flush_awaiter)
+ mailbox->ops->flush_awaiter(mailbox, awaiter);
+ /* Remove the reference of the arrived handler. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ } else {
+ dev_warn(mailbox->dev,
+ "Unexpected synchronous command pending on mailbox release\n");
+ kfree(cur);
+ }
+ }
+ RELEASE_WAIT_LIST_LOCK(false, 0);
+
+ /*
+ * Cancel the timeout timer of and free any responses that were still in
+ * the `wait_list` above.
+ */
+ list_for_each_entry_safe (cur, nxt, &resps_to_flush, list) {
+ list_del(&cur->list);
+ awaiter = cur->awaiter;
+ /* Cancel the timeout work and remove the reference of the timedout handler. */
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+ /* Remove the reference of the caller. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ kfree(cur);
+ }
+}
+
+/* Verifies and sets the mailbox operators. */
+static int gcip_mailbox_set_ops(struct gcip_mailbox *mailbox, const struct gcip_mailbox_ops *ops)
+{
+ if (!ops) {
+ mailbox->ops = NULL;
+ return 0;
+ }
+
+ if (!ops->get_cmd_queue_head || !ops->get_cmd_queue_tail || !ops->inc_cmd_queue_tail ||
+ !ops->acquire_cmd_queue_lock || !ops->release_cmd_queue_lock ||
+ !ops->get_cmd_elem_seq || !ops->set_cmd_elem_seq || !ops->get_cmd_elem_code) {
+ dev_err(mailbox->dev, "Incomplete mailbox CMD queue ops.\n");
+ return -EINVAL;
+ }
+
+ if (!ops->get_resp_queue_size || !ops->get_resp_queue_head || !ops->get_resp_queue_tail ||
+ !ops->inc_resp_queue_head || !ops->acquire_resp_queue_lock ||
+ !ops->release_resp_queue_lock || !ops->get_resp_elem_seq || !ops->set_resp_elem_seq ||
+ !ops->get_resp_elem_status || !ops->set_resp_elem_status) {
+ dev_err(mailbox->dev, "Incomplete mailbox RESP queue ops.\n");
+ return -EINVAL;
+ }
+
+ if (!ops->acquire_wait_list_lock || !ops->release_wait_list_lock) {
+ dev_err(mailbox->dev, "Incomplete mailbox wait_list ops.\n");
+ return -EINVAL;
+ }
+
+ mailbox->ops = ops;
+
+ return 0;
+}
+
+/* Sets the mailbox private data. */
+static inline void gcip_mailbox_set_data(struct gcip_mailbox *mailbox, void *data)
+{
+ mailbox->data = data;
+}
+
+int gcip_mailbox_init(struct gcip_mailbox *mailbox, const struct gcip_mailbox_args *args)
+{
+ int ret;
+
+ mailbox->dev = args->dev;
+ mailbox->queue_wrap_bit = args->queue_wrap_bit;
+ mailbox->cmd_queue = args->cmd_queue;
+ mailbox->cmd_elem_size = args->cmd_elem_size;
+ mailbox->resp_queue = args->resp_queue;
+ mailbox->resp_elem_size = args->resp_elem_size;
+ mailbox->timeout = args->timeout;
+ mailbox->cur_seq = 0;
+ mailbox->ignore_seq_order = args->ignore_seq_order;
+ gcip_mailbox_set_data(mailbox, args->data);
+
+ ret = gcip_mailbox_set_ops(mailbox, args->ops);
+ if (ret)
+ goto err_unset_data;
+
+ INIT_LIST_HEAD(&mailbox->wait_list);
+ init_waitqueue_head(&mailbox->wait_list_waitq);
+
+ return 0;
+
+err_unset_data:
+ gcip_mailbox_set_data(mailbox, NULL);
+
+ return ret;
+}
+
+void gcip_mailbox_release(struct gcip_mailbox *mailbox)
+{
+ gcip_mailbox_flush_awaiter(mailbox);
+ gcip_mailbox_set_ops(mailbox, NULL);
+ gcip_mailbox_set_data(mailbox, NULL);
+}
+
+void gcip_mailbox_consume_responses_work(struct gcip_mailbox *mailbox)
+{
+ void *responses;
+ u32 i;
+ u32 count = 0;
+
+ /* Fetches responses and bumps resp_queue head. */
+ responses = gcip_mailbox_fetch_responses(mailbox, &count);
+ if (count == 0)
+ return;
+ if (IS_ERR(responses)) {
+ dev_err(mailbox->dev, "GCIP mailbox failed on fetching responses: %ld",
+ PTR_ERR(responses));
+ return;
+ }
+
+ for (i = 0; i < count; i++)
+ gcip_mailbox_handle_response(mailbox, responses + mailbox->resp_elem_size * i);
+ /* Responses handled, wake up threads that are waiting for a response. */
+ wake_up(&mailbox->wait_list_waitq);
+ kfree(responses);
+}
+
+int gcip_mailbox_send_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp)
+{
+ int ret;
+
+ ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, resp, NULL);
+ if (ret)
+ return ret;
+
+ if (!resp)
+ return 0;
+
+ ret = wait_event_timeout(mailbox->wait_list_waitq,
+ GET_RESP_ELEM_STATUS(resp) != GCIP_MAILBOX_STATUS_WAITING_RESPONSE,
+ msecs_to_jiffies(mailbox->timeout));
+ if (!ret) {
+ dev_dbg(mailbox->dev, "event wait timeout");
+ gcip_mailbox_del_wait_resp(mailbox, resp);
+ return -ETIMEDOUT;
+ }
+ if (GET_RESP_ELEM_STATUS(resp) != GCIP_MAILBOX_STATUS_OK) {
+ dev_err(mailbox->dev, "Mailbox cmd %u response status %u", GET_CMD_ELEM_CODE(cmd),
+ GET_RESP_ELEM_STATUS(resp));
+ return -ENOMSG;
+ }
+
+ return 0;
+}
+
+struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
+ void *resp, void *data)
+{
+ struct gcip_mailbox_resp_awaiter *awaiter;
+ int ret;
+
+ awaiter = kzalloc(sizeof(*awaiter), GFP_KERNEL);
+ if (!awaiter)
+ return ERR_PTR(-ENOMEM);
+
+ awaiter->resp = resp;
+ awaiter->mailbox = mailbox;
+ awaiter->data = data;
+ awaiter->release_data = mailbox->ops->release_awaiter_data;
+ /* 2 refs: caller (vd) and timedout handler. */
+ refcount_set(&awaiter->refs, 2);
+
+ INIT_DELAYED_WORK(&awaiter->timeout_work, gcip_mailbox_async_cmd_timeout_work);
+ schedule_delayed_work(&awaiter->timeout_work, msecs_to_jiffies(mailbox->timeout));
+
+ ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, awaiter->resp, awaiter);
+ if (ret)
+ goto err_free_resp;
+
+ return awaiter;
+
+err_free_resp:
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+ kfree(awaiter);
+ return ERR_PTR(ret);
+}
+
+void gcip_mailbox_cancel_awaiter(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ gcip_mailbox_del_wait_resp(awaiter->mailbox, awaiter->resp);
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+}
+
+void gcip_mailbox_cancel_awaiter_timeout(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ if (cancel_delayed_work_sync(&awaiter->timeout_work))
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+}
+
+void gcip_mailbox_release_awaiter(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+}
+
+void gcip_mailbox_consume_one_response(struct gcip_mailbox *mailbox, void *resp)
+{
+ int ret;
+
+ /* Fetches (at most) one response. */
+ ret = gcip_mailbox_fetch_one_response(mailbox, resp);
+ if (!ret)
+ return;
+
+ gcip_mailbox_handle_response(mailbox, resp);
+
+ /* Responses handled, wakes up threads that are waiting for a response. */
+ wake_up(&mailbox->wait_list_waitq);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-mem-pool.c b/gcip-kernel-driver/drivers/gcip/gcip-mem-pool.c
new file mode 100644
index 0000000..3e18051
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-mem-pool.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A simple memory allocator to help allocating reserved memory pools.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/genalloc.h>
+#include <linux/log2.h>
+#include <linux/types.h>
+
+#include <gcip/gcip-mem-pool.h>
+
+int gcip_mem_pool_init(struct gcip_mem_pool *pool, struct device *dev, phys_addr_t base_paddr,
+ size_t size, size_t granule)
+{
+ int ret;
+
+ if (!base_paddr || granule == 0)
+ return -EINVAL;
+ if (base_paddr % granule || size % granule)
+ return -EINVAL;
+ pool->gen_pool = gen_pool_create(ilog2(granule), -1);
+ if (!pool->gen_pool) {
+ dev_err(dev, "gcip memory pool allocate gen_pool failed");
+ return -ENOMEM;
+ }
+ ret = gen_pool_add(pool->gen_pool, base_paddr, size, -1);
+ if (ret) {
+ gen_pool_destroy(pool->gen_pool);
+ pool->gen_pool = NULL;
+ dev_err(dev, "gcip failed to add memory to mem pool: %d", ret);
+ return ret;
+ }
+ pool->dev = dev;
+ pool->granule = granule;
+ pool->base_paddr = base_paddr;
+ return 0;
+}
+
+void gcip_mem_pool_exit(struct gcip_mem_pool *pool)
+{
+ if (!pool->gen_pool)
+ return;
+ gen_pool_destroy(pool->gen_pool);
+ pool->gen_pool = NULL;
+}
+
+phys_addr_t gcip_mem_pool_alloc(struct gcip_mem_pool *pool, size_t size)
+{
+ unsigned long addr;
+ size_t aligned_size = ALIGN(size, pool->granule);
+
+ addr = gen_pool_alloc(pool->gen_pool, aligned_size);
+ if (!addr)
+ return 0;
+ dev_dbg(pool->dev, "%s @ size = %#zx paddr=%#lx", __func__, size, addr);
+ return (phys_addr_t)addr;
+}
+
+void gcip_mem_pool_free(struct gcip_mem_pool *pool, phys_addr_t paddr, size_t size)
+{
+ unsigned long addr = paddr;
+
+ dev_dbg(pool->dev, "%s @ size = %#zx paddr=%#lx", __func__, size, addr);
+ size = ALIGN(size, pool->granule);
+ gen_pool_free(pool->gen_pool, addr, size);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c b/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c
new file mode 100644
index 0000000..f557c24
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP telemetry: logging and tracing.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/eventfd.h>
+#include <linux/log2.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <gcip/gcip-telemetry.h>
+
+int gcip_telemetry_kci(struct gcip_telemetry *tel,
+ int (*send_kci)(struct gcip_telemetry_kci_args *),
+ struct gcip_telemetry_kci_args *args)
+{
+ int err;
+
+ dev_dbg(tel->dev, "Sending KCI %s", tel->name);
+ err = send_kci(args);
+
+ if (err < 0) {
+ dev_err(tel->dev, "KCI %s failed - %d", tel->name, err);
+ return err;
+ }
+
+ if (err > 0) {
+ dev_err(tel->dev, "KCI %s returned %d", tel->name, err);
+ return -EBADMSG;
+ }
+
+ dev_dbg(tel->dev, "KCI %s Succeeded", tel->name);
+
+ return 0;
+}
+
+int gcip_telemetry_set_event(struct gcip_telemetry *tel, u32 eventfd)
+{
+ struct eventfd_ctx *ctx;
+ ulong flags;
+
+ ctx = eventfd_ctx_fdget(eventfd);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ write_lock_irqsave(&tel->ctx_lock, flags);
+ if (tel->ctx)
+ eventfd_ctx_put(tel->ctx);
+ tel->ctx = ctx;
+ write_unlock_irqrestore(&tel->ctx_lock, flags);
+
+ return 0;
+}
+
+void gcip_telemetry_unset_event(struct gcip_telemetry *tel)
+{
+ ulong flags;
+
+ write_lock_irqsave(&tel->ctx_lock, flags);
+ if (tel->ctx)
+ eventfd_ctx_put(tel->ctx);
+ tel->ctx = NULL;
+ write_unlock_irqrestore(&tel->ctx_lock, flags);
+}
+
+/* Copy data out of the log buffer with wrapping. */
+static void copy_with_wrap(struct gcip_telemetry_header *header, void *dest, u32 length, u32 size,
+ void *start)
+{
+ const u32 wrap_bit = size + sizeof(*header);
+ u32 remaining = 0;
+ u32 head = header->head & (wrap_bit - 1);
+
+ if (head + length < size) {
+ memcpy(dest, start + head, length);
+ header->head += length;
+ } else {
+ remaining = size - head;
+ memcpy(dest, start + head, remaining);
+ memcpy(dest + remaining, start, length - remaining);
+ header->head = (header->head & wrap_bit) ^ wrap_bit;
+ header->head |= length - remaining;
+ }
+}
+
+void gcip_telemetry_fw_log(struct gcip_telemetry *log)
+{
+ struct device *dev = log->dev;
+ struct gcip_telemetry_header *header = log->header;
+ struct gcip_log_entry_header entry;
+ u8 *start;
+ const size_t queue_size = header->size - sizeof(*header);
+ const size_t max_length = queue_size - sizeof(entry);
+ char *buffer = kmalloc(max_length + 1, GFP_ATOMIC);
+
+ if (!buffer) {
+ header->head = header->tail;
+ return;
+ }
+ start = (u8 *)header + sizeof(*header);
+
+ while (header->head != header->tail) {
+ copy_with_wrap(header, &entry, sizeof(entry), queue_size, start);
+ if (entry.length == 0 || entry.length > max_length) {
+ header->head = header->tail;
+ dev_err(dev, "log queue is corrupted");
+ break;
+ }
+ copy_with_wrap(header, buffer, entry.length, queue_size, start);
+ buffer[entry.length] = 0;
+
+ if (entry.code > GCIP_FW_DMESG_LOG_LEVEL)
+ continue;
+
+ switch (entry.code) {
+ case GCIP_FW_LOG_LEVEL_VERBOSE:
+ case GCIP_FW_LOG_LEVEL_DEBUG:
+ dev_dbg(dev, "%s", buffer);
+ break;
+ case GCIP_FW_LOG_LEVEL_WARN:
+ dev_warn(dev, "%s", buffer);
+ break;
+ case GCIP_FW_LOG_LEVEL_ERROR:
+ dev_err(dev, "%s", buffer);
+ break;
+ case GCIP_FW_LOG_LEVEL_INFO:
+ default:
+ dev_info(dev, "%s", buffer);
+ break;
+ }
+ }
+ kfree(buffer);
+}
+
+void gcip_telemetry_fw_trace(struct gcip_telemetry *trace)
+{
+ struct gcip_telemetry_header *header = trace->header;
+
+ header->head = header->tail;
+}
+
+void gcip_telemetry_irq_handler(struct gcip_telemetry *tel)
+{
+ spin_lock(&tel->state_lock);
+
+ if (tel->state == GCIP_TELEMETRY_ENABLED && tel->header->head != tel->header->tail)
+ schedule_work(&tel->work);
+
+ spin_unlock(&tel->state_lock);
+}
+
+void gcip_telemetry_inc_mmap_count(struct gcip_telemetry *tel, int dif)
+{
+ mutex_lock(&tel->mmap_lock);
+ tel->mmapped_count += dif;
+ mutex_unlock(&tel->mmap_lock);
+}
+
+int gcip_telemetry_mmap_buffer(struct gcip_telemetry *tel, int (*mmap)(void *), void *args)
+{
+ int ret;
+
+ mutex_lock(&tel->mmap_lock);
+
+ if (!tel->mmapped_count) {
+ ret = mmap(args);
+
+ if (!ret)
+ tel->mmapped_count = 1;
+ } else {
+ ret = -EBUSY;
+ dev_warn(tel->dev, "%s is already mmapped %ld times", tel->name,
+ tel->mmapped_count);
+ }
+
+ mutex_unlock(&tel->mmap_lock);
+
+ return ret;
+}
+
+/* Worker for processing log/trace buffers. */
+static void gcip_telemetry_worker(struct work_struct *work)
+{
+ struct gcip_telemetry *tel = container_of(work, struct gcip_telemetry, work);
+ u32 prev_head;
+ ulong flags;
+
+ /*
+ * Loops while telemetry enabled, there is data to be consumed, and the previous iteration
+ * made progress. If another IRQ arrives just after the last head != tail check we should
+ * get another worker schedule.
+ */
+ do {
+ spin_lock_irqsave(&tel->state_lock, flags);
+ if (tel->state != GCIP_TELEMETRY_ENABLED) {
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ return;
+ }
+
+ prev_head = tel->header->head;
+ if (tel->header->head != tel->header->tail) {
+ read_lock(&tel->ctx_lock);
+ if (tel->ctx)
+ eventfd_signal(tel->ctx, 1);
+ else
+ tel->fallback_fn(tel);
+ read_unlock(&tel->ctx_lock);
+ }
+
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ msleep(GCIP_TELEMETRY_LOG_RECHECK_DELAY);
+ } while (tel->header->head != tel->header->tail && tel->header->head != prev_head);
+}
+
+int gcip_telemetry_init(struct device *dev, struct gcip_telemetry *tel, const char *name,
+ void *vaddr, const size_t size,
+ void (*fallback_fn)(struct gcip_telemetry *))
+{
+ if (!is_power_of_2(size) || size <= sizeof(struct gcip_telemetry_header)) {
+ dev_err(dev,
+ "Size of GCIP telemetry buffer must be a power of 2 and greater than %zu.",
+ sizeof(struct gcip_telemetry_header));
+ return -EINVAL;
+ }
+
+ rwlock_init(&tel->ctx_lock);
+ tel->name = name;
+ tel->dev = dev;
+
+ tel->header = vaddr;
+ tel->header->head = 0;
+ tel->header->tail = 0;
+ tel->header->size = size;
+ tel->header->entries_dropped = 0;
+
+ tel->ctx = NULL;
+
+ spin_lock_init(&tel->state_lock);
+ INIT_WORK(&tel->work, gcip_telemetry_worker);
+ tel->fallback_fn = fallback_fn;
+ tel->state = GCIP_TELEMETRY_ENABLED;
+ mutex_init(&tel->mmap_lock);
+ tel->mmapped_count = 0;
+
+ return 0;
+}
+
+void gcip_telemetry_exit(struct gcip_telemetry *tel)
+{
+ ulong flags;
+
+ spin_lock_irqsave(&tel->state_lock, flags);
+ /* Prevents racing with the IRQ handler or worker. */
+ tel->state = GCIP_TELEMETRY_INVALID;
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ cancel_work_sync(&tel->work);
+
+ if (tel->ctx)
+ eventfd_ctx_put(tel->ctx);
+ tel->ctx = NULL;
+}