summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVictor Hsu <hsuvictor@google.com>2021-10-09 11:34:32 +0800
committerVictor Hsu <hsuvictor@google.com>2021-12-06 10:04:04 +0800
commitb8963e75368763f8753b912541ee6bb58c2473d3 (patch)
tree8aa6a3898dd792d4295615f6e0e7e03a980b2f07
parent54ea3e6dbee3ae9ea18df6f80ffd5efe2af63685 (diff)
downloadcnss2-b8963e75368763f8753b912541ee6bb58c2473d3.tar.gz
wcn6740: Add CNSS platform driver modules
Bug: 202572584 Change-Id: I6e2e81606910c6fe9c179492be0b14f1819a119f
-rw-r--r--Kbuild22
-rw-r--r--Makefile30
-rw-r--r--cnss2/Kconfig107
-rw-r--r--cnss2/Makefile18
-rw-r--r--cnss2/bus.c564
-rw-r--r--cnss2/bus.h67
-rw-r--r--cnss2/coexistence_service_v01.c100
-rw-r--r--cnss2/coexistence_service_v01.h52
-rw-r--r--cnss2/debug.c969
-rw-r--r--cnss2/debug.h87
-rw-r--r--cnss2/genl.c213
-rw-r--r--cnss2/genl.h17
-rw-r--r--cnss2/ip_multimedia_subsystem_private_service_v01.c450
-rw-r--r--cnss2/ip_multimedia_subsystem_private_service_v01.h134
-rw-r--r--cnss2/main.c3470
-rw-r--r--cnss2/main.h593
-rw-r--r--cnss2/pci.c6292
-rw-r--r--cnss2/pci.h266
-rw-r--r--cnss2/power.c1283
-rw-r--r--cnss2/qmi.c3448
-rw-r--r--cnss2/qmi.h316
-rw-r--r--cnss2/reg.h333
-rw-r--r--cnss_genl/Kconfig9
-rw-r--r--cnss_genl/Makefile6
-rw-r--r--cnss_genl/cnss_nl.c229
-rw-r--r--cnss_prealloc/Kconfig9
-rw-r--r--cnss_prealloc/Makefile6
-rw-r--r--cnss_prealloc/cnss_prealloc.c294
-rw-r--r--cnss_utils/Kconfig20
-rw-r--r--cnss_utils/Makefile11
-rw-r--r--cnss_utils/cnss_plat_ipc_qmi.c838
-rw-r--r--cnss_utils/cnss_plat_ipc_service_v01.c317
-rw-r--r--cnss_utils/cnss_plat_ipc_service_v01.h102
-rw-r--r--cnss_utils/cnss_utils.c500
-rw-r--r--cnss_utils/device_management_service_v01.c75
-rw-r--r--cnss_utils/device_management_service_v01.h39
-rw-r--r--cnss_utils/wlan_firmware_service_v01.c5495
-rw-r--r--cnss_utils/wlan_firmware_service_v01.h1271
-rw-r--r--inc/cnss2.h278
-rw-r--r--inc/cnss_nl.h103
-rw-r--r--inc/cnss_plat_ipc_qmi.h30
-rw-r--r--inc/cnss_prealloc.h16
-rw-r--r--inc/cnss_utils.h38
43 files changed, 28517 insertions, 0 deletions
diff --git a/Kbuild b/Kbuild
new file mode 100644
index 0000000..9ff1fd2
--- /dev/null
+++ b/Kbuild
@@ -0,0 +1,22 @@
+# ifeq ($(CONFIG_CNSS_OUT_OF_TREE),y)
+KBUILD_CPPFLAGS += -DCONFIG_CNSS_OUT_OF_TREE
+# endif
+
+ifeq ($(CONFIG_CNSS2_DEBUG),y)
+KBUILD_CPPFLAGS += -DCONFIG_CNSS2_DEBUG
+endif
+
+ifeq ($(CONFIG_CNSS2_QMI),y)
+KBUILD_CPPFLAGS += -DCONFIG_CNSS2_QMI
+endif
+
+# CONFIG_CNSS_PLAT_IPC_QMI_SVC should never be "y" here since it
+# can be only compiled as a module from out-of-kernel-tree source.
+ifeq ($(CONFIG_CNSS_PLAT_IPC_QMI_SVC),m)
+KBUILD_CPPFLAGS += -DCONFIG_CNSS_PLAT_IPC_QMI_SVC
+endif
+
+obj-$(CONFIG_CNSS2) += cnss2/
+obj-$(CONFIG_CNSS_GENL) += cnss_genl/
+obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/
+obj-y += cnss_utils/
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..6d709f9
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,30 @@
+KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
+M ?= $(shell pwd)
+
+# $(WLAN_PLATFORM_ROOT) has to be a absolute path
+ifeq ($(WLAN_PLATFORM_ROOT),)
+WLAN_PLATFORM_ROOT = $(shell pwd)
+
+# If it reaches here, compilation is probably without Android.mk,
+# so enable all flags (including debug flag CONFIG_CNSS2_DEBUG) by
+# default.
+KBUILD_OPTIONS := WLAN_PLATFORM_ROOT=$(WLAN_PLATFORM_ROOT)
+KBUILD_OPTIONS += CONFIG_CNSS_OUT_OF_TREE=y
+KBUILD_OPTIONS += CONFIG_CNSS2=m
+KBUILD_OPTIONS += CONFIG_CNSS2_QMI=y
+KBUILD_OPTIONS += CONFIG_CNSS2_DEBUG=y
+KBUILD_OPTIONS += CONFIG_CNSS_QMI_SVC=m
+KBUILD_OPTIONS += CONFIG_CNSS_PLAT_IPC_QMI_SVC=m
+KBUILD_OPTIONS += CONFIG_CNSS_GENL=m
+KBUILD_OPTIONS += CONFIG_WCNSS_MEM_PRE_ALLOC=m
+KBUILD_OPTIONS += CONFIG_CNSS_UTILS=m
+endif
+
+all:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
+
+modules_install:
+ $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
+
+clean:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) clean
diff --git a/cnss2/Kconfig b/cnss2/Kconfig
new file mode 100644
index 0000000..58e514c
--- /dev/null
+++ b/cnss2/Kconfig
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CNSS2
+ tristate "CNSS2 Platform Driver for Wi-Fi Module"
+# depends on !CNSS && PCI_MSM
+ select CNSS_PLAT_IPC_QMI_SVC
+ help
+ This module adds the support for Connectivity Subsystem (CNSS) used
+ for PCIe based Wi-Fi devices with QCA6174/QCA6290 chipsets.
+ This driver also adds support to integrate WLAN module to subsystem
+ restart framework.
+
+config CNSS2_DEBUG
+ bool "CNSS2 Platform Driver Debug Support"
+ depends on CNSS2
+ help
+ This option is to enable CNSS2 platform driver debug support which
+ primarily includes providing additional verbose logs for certain
+ features, enabling kernel panic for certain cases to aid the
+ debugging, and enabling any other debug mechanisms.
+
+config CNSS2_QMI
+ bool "CNSS2 Platform Driver QMI support"
+ select CNSS_QMI_SVC
+ depends on CNSS2
+ help
+ CNSS2 platform driver uses QMI framework to communicate with WLAN
+ firmware. It sends and receives boot handshake messages to WLAN
+ firmware, which includes hardware and software capabilities and
+ configurations. It also sends WLAN on/off control message to
+ firmware over QMI channel.
+
+config CNSS_ASYNC
+ bool "Enable/disable CNSS platform driver asynchronous probe"
+ depends on CNSS2
+ help
+ If enabled, CNSS platform driver would do asynchronous probe.
+ Using asynchronous probe will allow CNSS platform driver to
+ probe in parallel with other device drivers and will help to
+ reduce kernel boot time.
+
+config BUS_AUTO_SUSPEND
+ bool "Enable/Disable Runtime PM support for PCIe based WLAN Drivers"
+ depends on CNSS2
+ depends on PCI
+ help
+ Runtime Power Management is supported for PCIe based WLAN Drivers.
+ The features enable cld wlan driver to suspend pcie bus when APPS
+ is awake based on the driver inactivity with the Firmware.
+ The Feature uses runtime power management framework from kernel to
+ track bus access clients and to synchronize the driver activity
+ during system pm.
+ This config flag controls the feature per target based. The feature
+ requires CNSS driver support.
+
+config CNSS_QCA6290
+ bool "Enable CNSS QCA6290 chipset specific changes"
+ depends on CNSS2
+ help
+ This enables the changes from WLAN host driver that are specific to
+ CNSS QCA6290 chipset.
+ These changes are needed to support the new hardware architecture
+ for CNSS QCA6290 chipset.
+
+config CNSS_QCA6390
+ bool "Enable CNSS QCA6390 chipset specific changes"
+ depends on CNSS2
+ help
+ This enables the changes from WLAN host driver that are specific to
+ CNSS QCA6390 chipset.
+ These changes are needed to support the new hardware architecture
+ for CNSS QCA6390 chipset.
+
+config CNSS_EMULATION
+ bool "Enable specific changes for emulation hardware"
+ depends on CNSS2
+ help
+ This enables the changes from WLAN drivers that are specific to
+ emulation hardware.
+ These changes are needed for WLAN drivers to support and meet the
+ requirement of emulation hardware.
+
+config CNSS_QCA6490
+ bool "Enable CNSS QCA6490 chipset specific changes"
+ depends on CNSS2
+ help
+ This enables the changes from WLAN host driver that are specific to
+ CNSS QCA6490 chipset.
+ These changes are needed to support the new hardware architecture
+ for CNSS QCA6490 chipset.
+
+config CNSS_WCN7850
+ bool "Enable CNSS WCN7850 chipset specific changes"
+ depends on CNSS2
+ help
+ This enables the changes from WLAN host driver that are specific to
+ CNSS WCN7850 chipset.
+ These changes are needed to support the new hardware architecture
+ for CNSS WCN7850 chipset.
+
+config CNSS_REQ_FW_DIRECT
+ bool "Enable request_firmware_direct for firmware or configuration file"
+ depends on CNSS2
+ help
+ This enables calling request_firmware_direct for firmware or
+ configuration file to avoid 60s timeout while search file under user
+ space failure.
diff --git a/cnss2/Makefile b/cnss2/Makefile
new file mode 100644
index 0000000..2e37ac0
--- /dev/null
+++ b/cnss2/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CNSS2) += cnss2.o
+
+ifeq ($(CONFIG_CNSS_OUT_OF_TREE),y)
+ccflags-y += -I$(WLAN_PLATFORM_ROOT)/inc
+ccflags-y += -I$(WLAN_PLATFORM_ROOT)/cnss_utils
+ccflags-y += -DCONFIG_WCN_GOOGLE
+else
+ccflags-y += -I$(srctree)/drivers/net/wireless/cnss_utils/
+endif
+cnss2-y := main.o
+cnss2-y += bus.o
+cnss2-y += debug.o
+cnss2-y += pci.o
+cnss2-y += power.o
+cnss2-y += genl.o
+cnss2-$(CONFIG_CNSS2_QMI) += qmi.o coexistence_service_v01.o ip_multimedia_subsystem_private_service_v01.o
diff --git a/cnss2/bus.c b/cnss2/bus.c
new file mode 100644
index 0000000..9d53bd9
--- /dev/null
+++ b/cnss2/bus.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */
+
+#include "bus.h"
+#include "debug.h"
+#include "pci.h"
+
+enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev)
+{
+ if (!dev)
+ return CNSS_BUS_NONE;
+
+ if (!dev->bus)
+ return CNSS_BUS_NONE;
+
+ if (memcmp(dev->bus->name, "pci", 3) == 0)
+ return CNSS_BUS_PCI;
+ else
+ return CNSS_BUS_NONE;
+}
+
+enum cnss_dev_bus_type cnss_get_bus_type(unsigned long device_id)
+{
+ switch (device_id) {
+ case QCA6174_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ return CNSS_BUS_PCI;
+ default:
+ cnss_pr_err("Unknown device_id: 0x%lx\n", device_id);
+ return CNSS_BUS_NONE;
+ }
+}
+
+void *cnss_bus_dev_to_bus_priv(struct device *dev)
+{
+ if (!dev)
+ return NULL;
+
+ switch (cnss_get_dev_bus_type(dev)) {
+ case CNSS_BUS_PCI:
+ return cnss_get_pci_priv(to_pci_dev(dev));
+ default:
+ return NULL;
+ }
+}
+
+struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev)
+{
+ void *bus_priv;
+
+ if (!dev)
+ return cnss_get_plat_priv(NULL);
+
+ bus_priv = cnss_bus_dev_to_bus_priv(dev);
+ if (!bus_priv)
+ return NULL;
+
+ switch (cnss_get_dev_bus_type(dev)) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_priv_to_plat_priv(bus_priv);
+ default:
+ return NULL;
+ }
+}
+
+int cnss_bus_init(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_init(plat_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+void cnss_bus_deinit(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ cnss_pci_deinit(plat_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return;
+ }
+}
+
+void cnss_bus_add_fw_prefix_name(struct cnss_plat_data *plat_priv,
+ char *prefix_name, char *name)
+{
+ if (!plat_priv)
+ return;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_add_fw_prefix_name(plat_priv->bus_priv,
+ prefix_name, name);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return;
+ }
+}
+
+int cnss_bus_load_m3(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_load_m3(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_alloc_fw_mem(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_alloc_fw_mem(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_alloc_qdss_mem(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_alloc_qdss_mem(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+void cnss_bus_free_qdss_mem(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ cnss_pci_free_qdss_mem(plat_priv->bus_priv);
+ return;
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return;
+ }
+}
+
+u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_get_wake_msi(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_force_fw_assert_hdlr(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_qmi_send_get(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_qmi_send_get(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_qmi_send_put(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_qmi_send_put(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+void cnss_bus_fw_boot_timeout_hdlr(struct timer_list *t)
+{
+ struct cnss_plat_data *plat_priv =
+ from_timer(plat_priv, t, fw_boot_timer);
+
+ if (!plat_priv)
+ return;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_fw_boot_timeout_hdlr(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return;
+ }
+}
+
+void cnss_bus_collect_dump_info(struct cnss_plat_data *plat_priv, bool in_panic)
+{
+ if (!plat_priv)
+ return;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_collect_dump_info(plat_priv->bus_priv,
+ in_panic);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return;
+ }
+}
+
+void cnss_bus_device_crashed(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_device_crashed(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return;
+ }
+}
+
+int cnss_bus_call_driver_probe(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_call_driver_probe(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_call_driver_remove(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_call_driver_remove(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_dev_powerup(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_dev_powerup(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_dev_shutdown(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_dev_shutdown(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_dev_crash_shutdown(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_dev_crash_shutdown(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_dev_ramdump(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_dev_ramdump(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_register_driver_hdlr(struct cnss_plat_data *plat_priv, void *data)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_register_driver_hdlr(plat_priv->bus_priv, data);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_unregister_driver_hdlr(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_unregister_driver_hdlr(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_call_driver_modem_status(struct cnss_plat_data *plat_priv,
+ int modem_current_status)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_call_driver_modem_status(plat_priv->bus_priv,
+ modem_current_status);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_update_status(struct cnss_plat_data *plat_priv,
+ enum cnss_driver_status status)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_update_status(plat_priv->bus_priv, status);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_update_uevent(struct cnss_plat_data *plat_priv,
+ enum cnss_driver_status status, void *data)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_call_driver_uevent(plat_priv->bus_priv,
+ status, data);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_is_device_down(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pcie_is_device_down(plat_priv->bus_priv);
+ default:
+ cnss_pr_dbg("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return 0;
+ }
+}
+
+int cnss_bus_check_link_status(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_check_link_status(plat_priv->bus_priv);
+ default:
+ cnss_pr_dbg("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return 0;
+ }
+}
+
+int cnss_bus_recover_link_down(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_recover_link_down(plat_priv->bus_priv);
+ default:
+ cnss_pr_dbg("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_debug_reg_read(struct cnss_plat_data *plat_priv, u32 offset,
+ u32 *val, bool raw_access)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_debug_reg_read(plat_priv->bus_priv, offset,
+ val, raw_access);
+ default:
+ cnss_pr_dbg("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return 0;
+ }
+}
+
+int cnss_bus_debug_reg_write(struct cnss_plat_data *plat_priv, u32 offset,
+ u32 val, bool raw_access)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_debug_reg_write(plat_priv->bus_priv, offset,
+ val, raw_access);
+ default:
+ cnss_pr_dbg("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return 0;
+ }
+}
+
+int cnss_bus_get_iova(struct cnss_plat_data *plat_priv, u64 *addr, u64 *size)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_get_iova(plat_priv->bus_priv, addr, size);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_get_iova_ipa(struct cnss_plat_data *plat_priv, u64 *addr,
+ u64 *size)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_get_iova_ipa(plat_priv->bus_priv, addr, size);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
diff --git a/cnss2/bus.h b/cnss2/bus.h
new file mode 100644
index 0000000..fd21424
--- /dev/null
+++ b/cnss2/bus.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */
+
+#ifndef _CNSS_BUS_H
+#define _CNSS_BUS_H
+
+#include "main.h"
+
+#define QCA6174_VENDOR_ID 0x168C
+#define QCA6174_DEVICE_ID 0x003E
+#define QCA6174_REV_ID_OFFSET 0x08
+#define QCA6174_REV3_VERSION 0x5020000
+#define QCA6174_REV3_2_VERSION 0x5030000
+#define QCA6290_VENDOR_ID 0x17CB
+#define QCA6290_DEVICE_ID 0x1100
+#define QCA6390_VENDOR_ID 0x17CB
+#define QCA6390_DEVICE_ID 0x1101
+#define QCA6490_VENDOR_ID 0x17CB
+#define QCA6490_DEVICE_ID 0x1103
+#define WCN7850_VENDOR_ID 0x17CB
+#define WCN7850_DEVICE_ID 0x1107
+
+enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev);
+enum cnss_dev_bus_type cnss_get_bus_type(unsigned long device_id);
+void *cnss_bus_dev_to_bus_priv(struct device *dev);
+struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev);
+int cnss_bus_init(struct cnss_plat_data *plat_priv);
+void cnss_bus_deinit(struct cnss_plat_data *plat_priv);
+void cnss_bus_add_fw_prefix_name(struct cnss_plat_data *plat_priv,
+ char *prefix_name, char *name);
+int cnss_bus_load_m3(struct cnss_plat_data *plat_priv);
+int cnss_bus_alloc_fw_mem(struct cnss_plat_data *plat_priv);
+int cnss_bus_alloc_qdss_mem(struct cnss_plat_data *plat_priv);
+void cnss_bus_free_qdss_mem(struct cnss_plat_data *plat_priv);
+u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv);
+int cnss_bus_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv);
+int cnss_bus_qmi_send_get(struct cnss_plat_data *plat_priv);
+int cnss_bus_qmi_send_put(struct cnss_plat_data *plat_priv);
+void cnss_bus_fw_boot_timeout_hdlr(struct timer_list *t);
+void cnss_bus_collect_dump_info(struct cnss_plat_data *plat_priv,
+ bool in_panic);
+void cnss_bus_device_crashed(struct cnss_plat_data *plat_priv);
+int cnss_bus_call_driver_probe(struct cnss_plat_data *plat_priv);
+int cnss_bus_call_driver_remove(struct cnss_plat_data *plat_priv);
+int cnss_bus_dev_powerup(struct cnss_plat_data *plat_priv);
+int cnss_bus_dev_shutdown(struct cnss_plat_data *plat_priv);
+int cnss_bus_dev_crash_shutdown(struct cnss_plat_data *plat_priv);
+int cnss_bus_dev_ramdump(struct cnss_plat_data *plat_priv);
+int cnss_bus_register_driver_hdlr(struct cnss_plat_data *plat_priv, void *data);
+int cnss_bus_unregister_driver_hdlr(struct cnss_plat_data *plat_priv);
+int cnss_bus_call_driver_modem_status(struct cnss_plat_data *plat_priv,
+ int modem_current_status);
+int cnss_bus_update_status(struct cnss_plat_data *plat_priv,
+ enum cnss_driver_status status);
+int cnss_bus_update_uevent(struct cnss_plat_data *plat_priv,
+ enum cnss_driver_status status, void *data);
+int cnss_bus_is_device_down(struct cnss_plat_data *plat_priv);
+int cnss_bus_check_link_status(struct cnss_plat_data *plat_priv);
+int cnss_bus_recover_link_down(struct cnss_plat_data *plat_priv);
+int cnss_bus_debug_reg_read(struct cnss_plat_data *plat_priv, u32 offset,
+ u32 *val, bool raw_access);
+int cnss_bus_debug_reg_write(struct cnss_plat_data *plat_priv, u32 offset,
+ u32 val, bool raw_access);
+int cnss_bus_get_iova(struct cnss_plat_data *plat_priv, u64 *addr, u64 *size);
+int cnss_bus_get_iova_ipa(struct cnss_plat_data *plat_priv, u64 *addr,
+ u64 *size);
+#endif /* _CNSS_BUS_H */
diff --git a/cnss2/coexistence_service_v01.c b/cnss2/coexistence_service_v01.c
new file mode 100644
index 0000000..8fe1454
--- /dev/null
+++ b/cnss2/coexistence_service_v01.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
+
+#include <linux/soc/qcom/qmi.h>
+
+#include "coexistence_service_v01.h"
+
+struct qmi_elem_info coex_antenna_switch_to_wlan_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ coex_antenna_switch_to_wlan_req_msg_v01,
+ antenna),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info coex_antenna_switch_to_wlan_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ coex_antenna_switch_to_wlan_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ coex_antenna_switch_to_wlan_resp_msg_v01,
+ grant_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ coex_antenna_switch_to_wlan_resp_msg_v01,
+ grant),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info coex_antenna_switch_to_mdm_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ coex_antenna_switch_to_mdm_req_msg_v01,
+ antenna),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info coex_antenna_switch_to_mdm_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ coex_antenna_switch_to_mdm_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
diff --git a/cnss2/coexistence_service_v01.h b/cnss2/coexistence_service_v01.h
new file mode 100644
index 0000000..03ee7e9
--- /dev/null
+++ b/cnss2/coexistence_service_v01.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#ifndef COEXISTENCE_SERVICE_V01_H
+#define COEXISTENCE_SERVICE_V01_H
+
+#define COEX_SERVICE_ID_V01 0x22
+#define COEX_SERVICE_VERS_V01 0x01
+
+#define COEX_SERVICE_MAX_MSG_LEN 8204
+
+#define QMI_COEX_SWITCH_ANTENNA_TO_WLAN_RESP_V01 0x0042
+#define QMI_COEX_SWITCH_ANTENNA_TO_WLAN_REQ_V01 0x0042
+#define QMI_COEX_SWITCH_ANTENNA_TO_MDM_RESP_V01 0x0042
+#define QMI_COEX_SWITCH_ANTENNA_TO_MDM_REQ_V01 0x0042
+
+#define COEX_ANTENNA_BAND_2GHZ_CHAIN0_V01 ((u64)0x0000000000000001ULL)
+#define COEX_ANTENNA_BAND_2GHZ_CHAIN1_V01 ((u64)0x0000000000000002ULL)
+#define COEX_ANTENNA_BAND_5GHZ_CHAIN0_V01 ((u64)0x0000000000000004ULL)
+#define COEX_ANTENNA_BAND_5GHZ_CHAIN1_V01 ((u64)0x0000000000000008ULL)
+
+struct coex_antenna_switch_to_wlan_req_msg_v01 {
+ u64 antenna;
+};
+
+#define COEX_ANTENNA_SWITCH_TO_WLAN_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info coex_antenna_switch_to_wlan_req_msg_v01_ei[];
+
+struct coex_antenna_switch_to_wlan_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 grant_valid;
+ u64 grant;
+};
+
+#define COEX_ANTENNA_SWITCH_TO_WLAN_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info coex_antenna_switch_to_wlan_resp_msg_v01_ei[];
+
+struct coex_antenna_switch_to_mdm_req_msg_v01 {
+ u64 antenna;
+};
+
+#define COEX_ANTENNA_SWITCH_TO_MDM_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info coex_antenna_switch_to_mdm_req_msg_v01_ei[];
+
+struct coex_antenna_switch_to_mdm_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define COEX_ANTENNA_SWITCH_TO_MDM_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info coex_antenna_switch_to_mdm_resp_msg_v01_ei[];
+
+#endif
diff --git a/cnss2/debug.c b/cnss2/debug.c
new file mode 100644
index 0000000..06e1a9a
--- /dev/null
+++ b/cnss2/debug.c
@@ -0,0 +1,969 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include "main.h"
+#include "bus.h"
+#include "debug.h"
+#include "pci.h"
+
+#define MMIO_REG_ACCESS_MEM_TYPE 0xFF
+#define MMIO_REG_RAW_ACCESS_MEM_TYPE 0xFE
+
+#if IS_ENABLED(CONFIG_IPC_LOGGING)
+void *cnss_ipc_log_context;
+void *cnss_ipc_log_long_context;
+#endif
+
+static int cnss_pin_connect_show(struct seq_file *s, void *data)
+{
+ struct cnss_plat_data *cnss_priv = s->private;
+
+ seq_puts(s, "Pin connect results\n");
+ seq_printf(s, "FW power pin result: %04x\n",
+ cnss_priv->pin_result.fw_pwr_pin_result);
+ seq_printf(s, "FW PHY IO pin result: %04x\n",
+ cnss_priv->pin_result.fw_phy_io_pin_result);
+ seq_printf(s, "FW RF pin result: %04x\n",
+ cnss_priv->pin_result.fw_rf_pin_result);
+ seq_printf(s, "Host pin result: %04x\n",
+ cnss_priv->pin_result.host_pin_result);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int cnss_pin_connect_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_pin_connect_show, inode->i_private);
+}
+
+static const struct file_operations cnss_pin_connect_fops = {
+ .read = seq_read,
+ .release = single_release,
+ .open = cnss_pin_connect_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static int cnss_stats_show_state(struct seq_file *s,
+ struct cnss_plat_data *plat_priv)
+{
+ enum cnss_driver_state i;
+ int skip = 0;
+ unsigned long state;
+
+ seq_printf(s, "\nState: 0x%lx(", plat_priv->driver_state);
+ for (i = 0, state = plat_priv->driver_state; state != 0;
+ state >>= 1, i++) {
+ if (!(state & 0x1))
+ continue;
+
+ if (skip++)
+ seq_puts(s, " | ");
+
+ switch (i) {
+ case CNSS_QMI_WLFW_CONNECTED:
+ seq_puts(s, "QMI_WLFW_CONNECTED");
+ continue;
+ case CNSS_FW_MEM_READY:
+ seq_puts(s, "FW_MEM_READY");
+ continue;
+ case CNSS_FW_READY:
+ seq_puts(s, "FW_READY");
+ continue;
+ case CNSS_IN_COLD_BOOT_CAL:
+ seq_puts(s, "IN_COLD_BOOT_CAL");
+ continue;
+ case CNSS_DRIVER_LOADING:
+ seq_puts(s, "DRIVER_LOADING");
+ continue;
+ case CNSS_DRIVER_UNLOADING:
+ seq_puts(s, "DRIVER_UNLOADING");
+ continue;
+ case CNSS_DRIVER_IDLE_RESTART:
+ seq_puts(s, "IDLE_RESTART");
+ continue;
+ case CNSS_DRIVER_IDLE_SHUTDOWN:
+ seq_puts(s, "IDLE_SHUTDOWN");
+ continue;
+ case CNSS_DRIVER_PROBED:
+ seq_puts(s, "DRIVER_PROBED");
+ continue;
+ case CNSS_DRIVER_RECOVERY:
+ seq_puts(s, "DRIVER_RECOVERY");
+ continue;
+ case CNSS_FW_BOOT_RECOVERY:
+ seq_puts(s, "FW_BOOT_RECOVERY");
+ continue;
+ case CNSS_DEV_ERR_NOTIFY:
+ seq_puts(s, "DEV_ERR");
+ continue;
+ case CNSS_DRIVER_DEBUG:
+ seq_puts(s, "DRIVER_DEBUG");
+ continue;
+ case CNSS_COEX_CONNECTED:
+ seq_puts(s, "COEX_CONNECTED");
+ continue;
+ case CNSS_IMS_CONNECTED:
+ seq_puts(s, "IMS_CONNECTED");
+ continue;
+ case CNSS_IN_SUSPEND_RESUME:
+ seq_puts(s, "IN_SUSPEND_RESUME");
+ continue;
+ case CNSS_IN_REBOOT:
+ seq_puts(s, "IN_REBOOT");
+ continue;
+ case CNSS_COLD_BOOT_CAL_DONE:
+ seq_puts(s, "COLD_BOOT_CAL_DONE");
+ continue;
+ case CNSS_IN_PANIC:
+ seq_puts(s, "IN_PANIC");
+ continue;
+ case CNSS_QMI_DEL_SERVER:
+ seq_puts(s, "DEL_SERVER_IN_PROGRESS");
+ continue;
+ case CNSS_QMI_DMS_CONNECTED:
+ seq_puts(s, "DMS_CONNECTED");
+ continue;
+ case CNSS_DAEMON_CONNECTED:
+ seq_puts(s, "DAEMON_CONNECTED");
+ continue;
+ }
+
+ seq_printf(s, "UNKNOWN-%d", i);
+ }
+ seq_puts(s, ")\n");
+
+ return 0;
+}
+
+static int cnss_stats_show(struct seq_file *s, void *data)
+{
+ struct cnss_plat_data *plat_priv = s->private;
+
+ cnss_stats_show_state(s, plat_priv);
+
+ return 0;
+}
+
+static int cnss_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_stats_show, inode->i_private);
+}
+
+static const struct file_operations cnss_stats_fops = {
+ .read = seq_read,
+ .release = single_release,
+ .open = cnss_stats_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static ssize_t cnss_dev_boot_debug_write(struct file *fp,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct cnss_plat_data *plat_priv =
+ ((struct seq_file *)fp->private_data)->private;
+ struct cnss_pci_data *pci_priv;
+ char buf[64];
+ char *cmd;
+ unsigned int len = 0;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ cmd = buf;
+ cnss_pr_dbg("Received dev_boot debug command: %s\n", cmd);
+
+ if (sysfs_streq(cmd, "on")) {
+ ret = cnss_power_on_device(plat_priv);
+ } else if (sysfs_streq(cmd, "off")) {
+ cnss_power_off_device(plat_priv);
+ } else if (sysfs_streq(cmd, "enumerate")) {
+ ret = cnss_pci_init(plat_priv);
+ } else if (sysfs_streq(cmd, "powerup")) {
+ set_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state);
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_POWER_UP,
+ CNSS_EVENT_SYNC, NULL);
+ } else if (sysfs_streq(cmd, "shutdown")) {
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_POWER_DOWN,
+ 0, NULL);
+ clear_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state);
+ } else {
+ pci_priv = plat_priv->bus_priv;
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (sysfs_streq(cmd, "download")) {
+ set_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state);
+ ret = cnss_pci_start_mhi(pci_priv);
+ } else if (sysfs_streq(cmd, "linkup")) {
+ ret = cnss_resume_pci_link(pci_priv);
+ } else if (sysfs_streq(cmd, "linkdown")) {
+ ret = cnss_suspend_pci_link(pci_priv);
+ } else if (sysfs_streq(cmd, "assert")) {
+ cnss_pr_info("FW Assert triggered for debug\n");
+ ret = cnss_force_fw_assert(&pci_priv->pci_dev->dev);
+ } else if (sysfs_streq(cmd, "set_cbc_done")) {
+ cnss_pr_dbg("Force set cold boot cal done status\n");
+ set_bit(CNSS_COLD_BOOT_CAL_DONE,
+ &plat_priv->driver_state);
+ } else {
+ cnss_pr_err("Device boot debugfs command is invalid\n");
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static int cnss_dev_boot_debug_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "\nUsage: echo <action> > <debugfs_path>/cnss/dev_boot\n");
+ seq_puts(s, "<action> can be one of below:\n");
+ seq_puts(s, "on: turn on device power, assert WLAN_EN\n");
+ seq_puts(s, "off: de-assert WLAN_EN, turn off device power\n");
+ seq_puts(s, "enumerate: de-assert PERST, enumerate PCIe\n");
+ seq_puts(s, "download: download FW and do QMI handshake with FW\n");
+ seq_puts(s, "linkup: bring up PCIe link\n");
+ seq_puts(s, "linkdown: bring down PCIe link\n");
+ seq_puts(s, "powerup: full power on sequence to boot device, download FW and do QMI handshake with FW\n");
+ seq_puts(s, "shutdown: full power off sequence to shutdown device\n");
+ seq_puts(s, "assert: trigger firmware assert\n");
+ seq_puts(s, "set_cbc_done: Set cold boot calibration done status\n");
+
+ return 0;
+}
+
+static int cnss_dev_boot_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_dev_boot_debug_show, inode->i_private);
+}
+
+static const struct file_operations cnss_dev_boot_debug_fops = {
+ .read = seq_read,
+ .write = cnss_dev_boot_debug_write,
+ .release = single_release,
+ .open = cnss_dev_boot_debug_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static int cnss_reg_read_debug_show(struct seq_file *s, void *data)
+{
+ struct cnss_plat_data *plat_priv = s->private;
+
+ mutex_lock(&plat_priv->dev_lock);
+ if (!plat_priv->diag_reg_read_buf) {
+ seq_puts(s, "\nUsage: echo <mem_type> <offset> <data_len> > <debugfs_path>/cnss/reg_read\n");
+ seq_puts(s, "Use mem_type = 0xff for register read by IO access, data_len will be ignored\n");
+ seq_puts(s, "Use mem_type = 0xfe for register read by raw IO access which skips sanity checks, data_len will be ignored\n");
+ seq_puts(s, "Use other mem_type for register read by QMI\n");
+ mutex_unlock(&plat_priv->dev_lock);
+ return 0;
+ }
+
+ seq_printf(s, "\nRegister read, address: 0x%x memory type: 0x%x length: 0x%x\n\n",
+ plat_priv->diag_reg_read_addr,
+ plat_priv->diag_reg_read_mem_type,
+ plat_priv->diag_reg_read_len);
+
+ seq_hex_dump(s, "", DUMP_PREFIX_OFFSET, 32, 4,
+ plat_priv->diag_reg_read_buf,
+ plat_priv->diag_reg_read_len, false);
+
+ plat_priv->diag_reg_read_len = 0;
+ kfree(plat_priv->diag_reg_read_buf);
+ plat_priv->diag_reg_read_buf = NULL;
+ mutex_unlock(&plat_priv->dev_lock);
+
+ return 0;
+}
+
+static ssize_t cnss_reg_read_debug_write(struct file *fp,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct cnss_plat_data *plat_priv =
+ ((struct seq_file *)fp->private_data)->private;
+ char buf[64];
+ char *sptr, *token;
+ unsigned int len = 0;
+ u32 reg_offset, mem_type;
+ u32 data_len = 0, reg_val = 0;
+ u8 *reg_buf = NULL;
+ const char *delim = " ";
+ int ret = 0;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (!sptr)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &mem_type))
+ return -EINVAL;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (!sptr)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &reg_offset))
+ return -EINVAL;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &data_len))
+ return -EINVAL;
+
+ if (mem_type == MMIO_REG_ACCESS_MEM_TYPE ||
+ mem_type == MMIO_REG_RAW_ACCESS_MEM_TYPE) {
+ ret = cnss_bus_debug_reg_read(plat_priv, reg_offset, &reg_val,
+ mem_type ==
+ MMIO_REG_RAW_ACCESS_MEM_TYPE);
+ if (ret)
+ return ret;
+ cnss_pr_dbg("Read 0x%x from register offset 0x%x\n", reg_val,
+ reg_offset);
+ return count;
+ }
+
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ cnss_pr_err("Firmware is not ready yet\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&plat_priv->dev_lock);
+ kfree(plat_priv->diag_reg_read_buf);
+ plat_priv->diag_reg_read_buf = NULL;
+
+ reg_buf = kzalloc(data_len, GFP_KERNEL);
+ if (!reg_buf) {
+ mutex_unlock(&plat_priv->dev_lock);
+ return -ENOMEM;
+ }
+
+ ret = cnss_wlfw_athdiag_read_send_sync(plat_priv, reg_offset,
+ mem_type, data_len,
+ reg_buf);
+ if (ret) {
+ kfree(reg_buf);
+ mutex_unlock(&plat_priv->dev_lock);
+ return ret;
+ }
+
+ plat_priv->diag_reg_read_addr = reg_offset;
+ plat_priv->diag_reg_read_mem_type = mem_type;
+ plat_priv->diag_reg_read_len = data_len;
+ plat_priv->diag_reg_read_buf = reg_buf;
+ mutex_unlock(&plat_priv->dev_lock);
+
+ return count;
+}
+
+static int cnss_reg_read_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_reg_read_debug_show, inode->i_private);
+}
+
+static const struct file_operations cnss_reg_read_debug_fops = {
+ .read = seq_read,
+ .write = cnss_reg_read_debug_write,
+ .open = cnss_reg_read_debug_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static int cnss_reg_write_debug_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "\nUsage: echo <mem_type> <offset> <reg_val> > <debugfs_path>/cnss/reg_write\n");
+ seq_puts(s, "Use mem_type = 0xff for register write by IO access\n");
+ seq_puts(s, "Use mem_type = 0xfe for register write by raw IO access which skips sanity checks\n");
+ seq_puts(s, "Use other mem_type for register write by QMI\n");
+
+ return 0;
+}
+
+static ssize_t cnss_reg_write_debug_write(struct file *fp,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct cnss_plat_data *plat_priv =
+ ((struct seq_file *)fp->private_data)->private;
+ char buf[64];
+ char *sptr, *token;
+ unsigned int len = 0;
+ u32 reg_offset, mem_type, reg_val;
+ const char *delim = " ";
+ int ret = 0;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (!sptr)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &mem_type))
+ return -EINVAL;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (!sptr)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &reg_offset))
+ return -EINVAL;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &reg_val))
+ return -EINVAL;
+
+ if (mem_type == MMIO_REG_ACCESS_MEM_TYPE ||
+ mem_type == MMIO_REG_RAW_ACCESS_MEM_TYPE) {
+ ret = cnss_bus_debug_reg_write(plat_priv, reg_offset, reg_val,
+ mem_type ==
+ MMIO_REG_RAW_ACCESS_MEM_TYPE);
+ if (ret)
+ return ret;
+ cnss_pr_dbg("Wrote 0x%x to register offset 0x%x\n", reg_val,
+ reg_offset);
+ return count;
+ }
+
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ cnss_pr_err("Firmware is not ready yet\n");
+ return -EINVAL;
+ }
+
+ ret = cnss_wlfw_athdiag_write_send_sync(plat_priv, reg_offset, mem_type,
+ sizeof(u32),
+ (u8 *)&reg_val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int cnss_reg_write_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_reg_write_debug_show, inode->i_private);
+}
+
+static const struct file_operations cnss_reg_write_debug_fops = {
+ .read = seq_read,
+ .write = cnss_reg_write_debug_write,
+ .open = cnss_reg_write_debug_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static ssize_t cnss_runtime_pm_debug_write(struct file *fp,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct cnss_plat_data *plat_priv =
+ ((struct seq_file *)fp->private_data)->private;
+ struct cnss_pci_data *pci_priv;
+ char buf[64];
+ char *cmd;
+ unsigned int len = 0;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ pci_priv = plat_priv->bus_priv;
+ if (!pci_priv)
+ return -ENODEV;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ cmd = buf;
+
+ if (sysfs_streq(cmd, "usage_count")) {
+ cnss_pci_pm_runtime_show_usage_count(pci_priv);
+ } else if (sysfs_streq(cmd, "request_resume")) {
+ ret = cnss_pci_pm_request_resume(pci_priv);
+ } else if (sysfs_streq(cmd, "resume")) {
+ ret = cnss_pci_pm_runtime_resume(pci_priv);
+ } else if (sysfs_streq(cmd, "get")) {
+ ret = cnss_pci_pm_runtime_get(pci_priv, RTPM_ID_CNSS);
+ } else if (sysfs_streq(cmd, "get_noresume")) {
+ cnss_pci_pm_runtime_get_noresume(pci_priv, RTPM_ID_CNSS);
+ } else if (sysfs_streq(cmd, "put_autosuspend")) {
+ ret = cnss_pci_pm_runtime_put_autosuspend(pci_priv,
+ RTPM_ID_CNSS);
+ } else if (sysfs_streq(cmd, "put_noidle")) {
+ cnss_pci_pm_runtime_put_noidle(pci_priv, RTPM_ID_CNSS);
+ } else if (sysfs_streq(cmd, "mark_last_busy")) {
+ cnss_pci_pm_runtime_mark_last_busy(pci_priv);
+ } else if (sysfs_streq(cmd, "resume_bus")) {
+ cnss_pci_resume_bus(pci_priv);
+ } else if (sysfs_streq(cmd, "suspend_bus")) {
+ cnss_pci_suspend_bus(pci_priv);
+ } else {
+ cnss_pr_err("Runtime PM debugfs command is invalid\n");
+ ret = -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static int cnss_runtime_pm_debug_show(struct seq_file *s, void *data)
+{
+ struct cnss_plat_data *plat_priv = s->private;
+ struct cnss_pci_data *pci_priv;
+ int i;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ pci_priv = plat_priv->bus_priv;
+ if (!pci_priv)
+ return -ENODEV;
+
+ seq_puts(s, "\nUsage: echo <action> > <debugfs_path>/cnss/runtime_pm\n");
+ seq_puts(s, "<action> can be one of below:\n");
+ seq_puts(s, "usage_count: get runtime PM usage count\n");
+ seq_puts(s, "reques_resume: do async runtime PM resume\n");
+ seq_puts(s, "resume: do sync runtime PM resume\n");
+ seq_puts(s, "get: do runtime PM get\n");
+ seq_puts(s, "get_noresume: do runtime PM get noresume\n");
+ seq_puts(s, "put_noidle: do runtime PM put noidle\n");
+ seq_puts(s, "put_autosuspend: do runtime PM put autosuspend\n");
+ seq_puts(s, "mark_last_busy: do runtime PM mark last busy\n");
+ seq_puts(s, "resume_bus: do bus resume only\n");
+ seq_puts(s, "suspend_bus: do bus suspend only\n");
+
+ seq_puts(s, "\nStats:\n");
+ seq_printf(s, "%s: %u\n", "get count",
+ atomic_read(&pci_priv->pm_stats.runtime_get));
+ seq_printf(s, "%s: %u\n", "put count",
+ atomic_read(&pci_priv->pm_stats.runtime_put));
+ seq_printf(s, "%-10s%-10s%-10s%-15s%-15s\n",
+ "id:", "get", "put", "get time(us)", "put time(us)");
+ for (i = 0; i < RTPM_ID_MAX; i++) {
+ seq_printf(s, "%d%-9s", i, ":");
+ seq_printf(s, "%-10d",
+ atomic_read(&pci_priv->pm_stats.runtime_get_id[i]));
+ seq_printf(s, "%-10d",
+ atomic_read(&pci_priv->pm_stats.runtime_put_id[i]));
+ seq_printf(s, "%-15llu",
+ pci_priv->pm_stats.runtime_get_timestamp_id[i]);
+ seq_printf(s, "%-15llu\n",
+ pci_priv->pm_stats.runtime_put_timestamp_id[i]);
+ }
+
+ return 0;
+}
+
+static int cnss_runtime_pm_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_runtime_pm_debug_show, inode->i_private);
+}
+
+static const struct file_operations cnss_runtime_pm_debug_fops = {
+ .read = seq_read,
+ .write = cnss_runtime_pm_debug_write,
+ .open = cnss_runtime_pm_debug_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static ssize_t cnss_control_params_debug_write(struct file *fp,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct cnss_plat_data *plat_priv =
+ ((struct seq_file *)fp->private_data)->private;
+ char buf[64];
+ char *sptr, *token;
+ char *cmd;
+ u32 val;
+ unsigned int len = 0;
+ const char *delim = " ";
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+ if (!sptr)
+ return -EINVAL;
+ cmd = token;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val))
+ return -EINVAL;
+
+ if (strcmp(cmd, "quirks") == 0)
+ plat_priv->ctrl_params.quirks = val;
+ else if (strcmp(cmd, "mhi_timeout") == 0)
+ plat_priv->ctrl_params.mhi_timeout = val;
+ else if (strcmp(cmd, "mhi_m2_timeout") == 0)
+ plat_priv->ctrl_params.mhi_m2_timeout = val;
+ else if (strcmp(cmd, "qmi_timeout") == 0)
+ plat_priv->ctrl_params.qmi_timeout = val;
+ else if (strcmp(cmd, "bdf_type") == 0)
+ plat_priv->ctrl_params.bdf_type = val;
+ else if (strcmp(cmd, "time_sync_period") == 0)
+ plat_priv->ctrl_params.time_sync_period = val;
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static int cnss_show_quirks_state(struct seq_file *s,
+ struct cnss_plat_data *plat_priv)
+{
+ enum cnss_debug_quirks i;
+ int skip = 0;
+ unsigned long state;
+
+ seq_printf(s, "quirks: 0x%lx (", plat_priv->ctrl_params.quirks);
+ for (i = 0, state = plat_priv->ctrl_params.quirks;
+ state != 0; state >>= 1, i++) {
+ if (!(state & 0x1))
+ continue;
+ if (skip++)
+ seq_puts(s, " | ");
+
+ switch (i) {
+ case LINK_DOWN_SELF_RECOVERY:
+ seq_puts(s, "LINK_DOWN_SELF_RECOVERY");
+ continue;
+ case SKIP_DEVICE_BOOT:
+ seq_puts(s, "SKIP_DEVICE_BOOT");
+ continue;
+ case USE_CORE_ONLY_FW:
+ seq_puts(s, "USE_CORE_ONLY_FW");
+ continue;
+ case SKIP_RECOVERY:
+ seq_puts(s, "SKIP_RECOVERY");
+ continue;
+ case QMI_BYPASS:
+ seq_puts(s, "QMI_BYPASS");
+ continue;
+ case ENABLE_WALTEST:
+ seq_puts(s, "WALTEST");
+ continue;
+ case ENABLE_PCI_LINK_DOWN_PANIC:
+ seq_puts(s, "PCI_LINK_DOWN_PANIC");
+ continue;
+ case FBC_BYPASS:
+ seq_puts(s, "FBC_BYPASS");
+ continue;
+ case ENABLE_DAEMON_SUPPORT:
+ seq_puts(s, "DAEMON_SUPPORT");
+ continue;
+ case DISABLE_DRV:
+ seq_puts(s, "DISABLE_DRV");
+ continue;
+ case DISABLE_IO_COHERENCY:
+ seq_puts(s, "DISABLE_IO_COHERENCY");
+ continue;
+ case IGNORE_PCI_LINK_FAILURE:
+ seq_puts(s, "IGNORE_PCI_LINK_FAILURE");
+ case DISABLE_TIME_SYNC:
+ seq_puts(s, "DISABLE_TIME_SYNC");
+ continue;
+ }
+
+ seq_printf(s, "UNKNOWN-%d", i);
+ }
+ seq_puts(s, ")\n");
+ return 0;
+}
+
+static int cnss_control_params_debug_show(struct seq_file *s, void *data)
+{
+ struct cnss_plat_data *cnss_priv = s->private;
+
+ seq_puts(s, "\nUsage: echo <params_name> <value> > <debugfs_path>/cnss/control_params\n");
+ seq_puts(s, "<params_name> can be one of below:\n");
+ seq_puts(s, "quirks: Debug quirks for driver\n");
+ seq_puts(s, "mhi_timeout: Timeout for MHI operation in milliseconds\n");
+ seq_puts(s, "qmi_timeout: Timeout for QMI message in milliseconds\n");
+ seq_puts(s, "bdf_type: Type of board data file to be downloaded\n");
+ seq_puts(s, "time_sync_period: Time period to do time sync with device in milliseconds\n");
+
+ seq_puts(s, "\nCurrent value:\n");
+ cnss_show_quirks_state(s, cnss_priv);
+ seq_printf(s, "mhi_timeout: %u\n", cnss_priv->ctrl_params.mhi_timeout);
+ seq_printf(s, "mhi_m2_timeout: %u\n",
+ cnss_priv->ctrl_params.mhi_m2_timeout);
+ seq_printf(s, "qmi_timeout: %u\n", cnss_priv->ctrl_params.qmi_timeout);
+ seq_printf(s, "bdf_type: %u\n", cnss_priv->ctrl_params.bdf_type);
+ seq_printf(s, "time_sync_period: %u\n",
+ cnss_priv->ctrl_params.time_sync_period);
+
+ return 0;
+}
+
+static int cnss_control_params_debug_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, cnss_control_params_debug_show,
+ inode->i_private);
+}
+
+static const struct file_operations cnss_control_params_debug_fops = {
+ .read = seq_read,
+ .write = cnss_control_params_debug_write,
+ .open = cnss_control_params_debug_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static ssize_t cnss_dynamic_feature_write(struct file *fp,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct cnss_plat_data *plat_priv =
+ ((struct seq_file *)fp->private_data)->private;
+ int ret = 0;
+ u64 val;
+
+ ret = kstrtou64_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ plat_priv->dynamic_feature = val;
+ ret = cnss_wlfw_dynamic_feature_mask_send_sync(plat_priv);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static int cnss_dynamic_feature_show(struct seq_file *s, void *data)
+{
+ struct cnss_plat_data *cnss_priv = s->private;
+
+ seq_printf(s, "dynamic_feature: 0x%llx\n", cnss_priv->dynamic_feature);
+
+ return 0;
+}
+
+static int cnss_dynamic_feature_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, cnss_dynamic_feature_show,
+ inode->i_private);
+}
+
+static const struct file_operations cnss_dynamic_feature_fops = {
+ .read = seq_read,
+ .write = cnss_dynamic_feature_write,
+ .open = cnss_dynamic_feature_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_CNSS2_DEBUG
+static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
+{
+ struct dentry *root_dentry = plat_priv->root_dentry;
+
+ debugfs_create_file("dev_boot", 0600, root_dentry, plat_priv,
+ &cnss_dev_boot_debug_fops);
+ debugfs_create_file("reg_read", 0600, root_dentry, plat_priv,
+ &cnss_reg_read_debug_fops);
+ debugfs_create_file("reg_write", 0600, root_dentry, plat_priv,
+ &cnss_reg_write_debug_fops);
+ debugfs_create_file("runtime_pm", 0600, root_dentry, plat_priv,
+ &cnss_runtime_pm_debug_fops);
+ debugfs_create_file("control_params", 0600, root_dentry, plat_priv,
+ &cnss_control_params_debug_fops);
+ debugfs_create_file("dynamic_feature", 0600, root_dentry, plat_priv,
+ &cnss_dynamic_feature_fops);
+
+ return 0;
+}
+#else
+static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+#endif
+
+int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct dentry *root_dentry;
+
+ root_dentry = debugfs_create_dir("cnss", 0);
+ if (IS_ERR(root_dentry)) {
+ ret = PTR_ERR(root_dentry);
+ cnss_pr_err("Unable to create debugfs %d\n", ret);
+ goto out;
+ }
+
+ plat_priv->root_dentry = root_dentry;
+
+ debugfs_create_file("pin_connect_result", 0644, root_dentry, plat_priv,
+ &cnss_pin_connect_fops);
+ debugfs_create_file("stats", 0644, root_dentry, plat_priv,
+ &cnss_stats_fops);
+
+ cnss_create_debug_only_node(plat_priv);
+
+out:
+ return ret;
+}
+
+void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv)
+{
+ debugfs_remove_recursive(plat_priv->root_dentry);
+}
+#else
+int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
+{
+ plat_priv->root_dentry = NULL;
+ return 0;
+}
+
+void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPC_LOGGING)
+void cnss_debug_ipc_log_print(void *log_ctx, char *process, const char *fn,
+ const char *log_level, char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list va_args;
+
+ va_start(va_args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &va_args;
+
+ if (log_level)
+ printk("%scnss: %pV", log_level, &vaf);
+
+ ipc_log_string(log_ctx, "[%s] %s: %pV", process, fn, &vaf);
+
+ va_end(va_args);
+}
+
+static int cnss_ipc_logging_init(void)
+{
+ cnss_ipc_log_context = ipc_log_context_create(CNSS_IPC_LOG_PAGES,
+ "cnss", 0);
+ if (!cnss_ipc_log_context) {
+ cnss_pr_err("Unable to create IPC log context\n");
+ return -EINVAL;
+ }
+
+ cnss_ipc_log_long_context = ipc_log_context_create(CNSS_IPC_LOG_PAGES,
+ "cnss-long", 0);
+ if (!cnss_ipc_log_long_context) {
+ cnss_pr_err("Unable to create IPC long log context\n");
+ ipc_log_context_destroy(cnss_ipc_log_context);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void cnss_ipc_logging_deinit(void)
+{
+ if (cnss_ipc_log_long_context) {
+ ipc_log_context_destroy(cnss_ipc_log_long_context);
+ cnss_ipc_log_long_context = NULL;
+ }
+
+ if (cnss_ipc_log_context) {
+ ipc_log_context_destroy(cnss_ipc_log_context);
+ cnss_ipc_log_context = NULL;
+ }
+}
+#else
+static int cnss_ipc_logging_init(void) { return 0; }
+static void cnss_ipc_logging_deinit(void) {}
+void cnss_debug_ipc_log_print(void *log_ctx, char *process, const char *fn,
+ const char *log_level, char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list va_args;
+
+ va_start(va_args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &va_args;
+
+ if (log_level)
+ printk("%scnss: %pV", log_level, &vaf);
+
+ va_end(va_args);
+}
+#endif
+
+int cnss_debug_init(void)
+{
+ return cnss_ipc_logging_init();
+}
+
+void cnss_debug_deinit(void)
+{
+ cnss_ipc_logging_deinit();
+}
diff --git a/cnss2/debug.h b/cnss2/debug.h
new file mode 100644
index 0000000..7796d57
--- /dev/null
+++ b/cnss2/debug.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+
+#ifndef _CNSS_DEBUG_H
+#define _CNSS_DEBUG_H
+
+#include <linux/printk.h>
+
+#if IS_ENABLED(CONFIG_IPC_LOGGING)
+#include <linux/ipc_logging.h>
+#include <asm/current.h>
+
+extern void *cnss_ipc_log_context;
+extern void *cnss_ipc_log_long_context;
+
+#ifdef CONFIG_CNSS2_DEBUG
+#define CNSS_IPC_LOG_PAGES 100
+#else
+#define CNSS_IPC_LOG_PAGES 50
+#endif
+#define cnss_debug_log_print(_x...) \
+ cnss_debug_ipc_log_print(cnss_ipc_log_context, _x)
+
+#define cnss_debug_log_long_print(_x...) \
+ cnss_debug_ipc_log_print(cnss_ipc_log_long_context, _x)
+#else
+#define cnss_debug_log_print(_x...) \
+ cnss_debug_ipc_log_print((void *)NULL, _x)
+#define cnss_debug_log_long_print(_x...) \
+ cnss_debug_ipc_log_print((void *)NULL, _x)
+#endif
+
+#define proc_name (in_irq() ? "irq" : \
+ (in_softirq() ? "soft_irq" : current->comm))
+
+#define cnss_pr_err(_fmt, ...) \
+ cnss_debug_log_print(proc_name, __func__, \
+ KERN_ERR, _fmt, ##__VA_ARGS__)
+
+#define cnss_pr_warn(_fmt, ...) \
+ cnss_debug_log_print(proc_name, __func__, \
+ KERN_WARNING, _fmt, ##__VA_ARGS__)
+
+#define cnss_pr_info(_fmt, ...) \
+ cnss_debug_log_print(proc_name, __func__, \
+ KERN_INFO, _fmt, ##__VA_ARGS__)
+
+#define cnss_pr_dbg(_fmt, ...) \
+ cnss_debug_log_print(proc_name, __func__, \
+ KERN_DEBUG, _fmt, ##__VA_ARGS__)
+
+#define cnss_pr_vdbg(_fmt, ...) \
+ cnss_debug_log_long_print(proc_name, __func__, \
+ KERN_DEBUG, _fmt, ##__VA_ARGS__)
+
+#define cnss_pr_buf(_fmt, ...) \
+ cnss_debug_log_long_print(proc_name, __func__, \
+ NULL, _fmt, ##__VA_ARGS__)
+
+#ifdef CONFIG_CNSS2_DEBUG
+#define CNSS_ASSERT(_condition) do { \
+ if (!(_condition)) { \
+ cnss_pr_err("ASSERT at line %d\n", \
+ __LINE__); \
+ BUG(); \
+ } \
+ } while (0)
+#else
+#define CNSS_ASSERT(_condition) do { \
+ if (!(_condition)) { \
+ cnss_pr_err("ASSERT at line %d\n", \
+ __LINE__); \
+ WARN_ON(1); \
+ } \
+ } while (0)
+#endif
+
+#define cnss_fatal_err(_fmt, ...) \
+ cnss_pr_err("fatal: " _fmt, ##__VA_ARGS__)
+
+int cnss_debug_init(void);
+void cnss_debug_deinit(void);
+int cnss_debugfs_create(struct cnss_plat_data *plat_priv);
+void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv);
+void cnss_debug_ipc_log_print(void *log_ctx, char *process, const char *fn,
+ const char *log_level, char *fmt, ...);
+#endif /* _CNSS_DEBUG_H */
diff --git a/cnss2/genl.c b/cnss2/genl.c
new file mode 100644
index 0000000..2819a1b
--- /dev/null
+++ b/cnss2/genl.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#define pr_fmt(fmt) "cnss_genl: " fmt
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "main.h"
+#include "debug.h"
+
+#define CNSS_GENL_FAMILY_NAME "cnss-genl"
+#define CNSS_GENL_MCAST_GROUP_NAME "cnss-genl-grp"
+#define CNSS_GENL_VERSION 1
+#define CNSS_GENL_DATA_LEN_MAX (15 * 1024)
+#define CNSS_GENL_STR_LEN_MAX 16
+
+enum {
+ CNSS_GENL_ATTR_MSG_UNSPEC,
+ CNSS_GENL_ATTR_MSG_TYPE,
+ CNSS_GENL_ATTR_MSG_FILE_NAME,
+ CNSS_GENL_ATTR_MSG_TOTAL_SIZE,
+ CNSS_GENL_ATTR_MSG_SEG_ID,
+ CNSS_GENL_ATTR_MSG_END,
+ CNSS_GENL_ATTR_MSG_DATA_LEN,
+ CNSS_GENL_ATTR_MSG_DATA,
+ __CNSS_GENL_ATTR_MAX,
+};
+
+#define CNSS_GENL_ATTR_MAX (__CNSS_GENL_ATTR_MAX - 1)
+
+enum {
+ CNSS_GENL_CMD_UNSPEC,
+ CNSS_GENL_CMD_MSG,
+ __CNSS_GENL_CMD_MAX,
+};
+
+#define CNSS_GENL_CMD_MAX (__CNSS_GENL_CMD_MAX - 1)
+
+static struct nla_policy cnss_genl_msg_policy[CNSS_GENL_ATTR_MAX + 1] = {
+ [CNSS_GENL_ATTR_MSG_TYPE] = { .type = NLA_U8 },
+ [CNSS_GENL_ATTR_MSG_FILE_NAME] = { .type = NLA_NUL_STRING,
+ .len = CNSS_GENL_STR_LEN_MAX },
+ [CNSS_GENL_ATTR_MSG_TOTAL_SIZE] = { .type = NLA_U32 },
+ [CNSS_GENL_ATTR_MSG_SEG_ID] = { .type = NLA_U32 },
+ [CNSS_GENL_ATTR_MSG_END] = { .type = NLA_U8 },
+ [CNSS_GENL_ATTR_MSG_DATA_LEN] = { .type = NLA_U32 },
+ [CNSS_GENL_ATTR_MSG_DATA] = { .type = NLA_BINARY,
+ .len = CNSS_GENL_DATA_LEN_MAX },
+};
+
+static int cnss_genl_process_msg(struct sk_buff *skb, struct genl_info *info)
+{
+ return 0;
+}
+
+static struct genl_ops cnss_genl_ops[] = {
+ {
+ .cmd = CNSS_GENL_CMD_MSG,
+ .doit = cnss_genl_process_msg,
+ },
+};
+
+static struct genl_multicast_group cnss_genl_mcast_grp[] = {
+ {
+ .name = CNSS_GENL_MCAST_GROUP_NAME,
+ },
+};
+
+static struct genl_family cnss_genl_family = {
+ .id = 0,
+ .hdrsize = 0,
+ .name = CNSS_GENL_FAMILY_NAME,
+ .version = CNSS_GENL_VERSION,
+ .maxattr = CNSS_GENL_ATTR_MAX,
+ .policy = cnss_genl_msg_policy,
+ .module = THIS_MODULE,
+ .ops = cnss_genl_ops,
+ .n_ops = ARRAY_SIZE(cnss_genl_ops),
+ .mcgrps = cnss_genl_mcast_grp,
+ .n_mcgrps = ARRAY_SIZE(cnss_genl_mcast_grp),
+};
+
+static int cnss_genl_send_data(u8 type, char *file_name, u32 total_size,
+ u32 seg_id, u8 end, u32 data_len, u8 *msg_buff)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+ char filename[CNSS_GENL_STR_LEN_MAX + 1];
+
+ cnss_pr_dbg("type: %u, file_name %s, total_size: %x, seg_id %u, end %u, data_len %u\n",
+ type, file_name, total_size, seg_id, end, data_len);
+
+ if (!file_name)
+ strlcpy(filename, "default", sizeof(filename));
+ else
+ strlcpy(filename, file_name, sizeof(filename));
+
+ skb = genlmsg_new(NLMSG_HDRLEN +
+ nla_total_size(sizeof(type)) +
+ nla_total_size(strlen(filename) + 1) +
+ nla_total_size(sizeof(total_size)) +
+ nla_total_size(sizeof(seg_id)) +
+ nla_total_size(sizeof(end)) +
+ nla_total_size(sizeof(data_len)) +
+ nla_total_size(data_len), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ msg_header = genlmsg_put(skb, 0, 0,
+ &cnss_genl_family, 0,
+ CNSS_GENL_CMD_MSG);
+ if (!msg_header) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = nla_put_u8(skb, CNSS_GENL_ATTR_MSG_TYPE, type);
+ if (ret < 0)
+ goto fail;
+ ret = nla_put_string(skb, CNSS_GENL_ATTR_MSG_FILE_NAME, filename);
+ if (ret < 0)
+ goto fail;
+ ret = nla_put_u32(skb, CNSS_GENL_ATTR_MSG_TOTAL_SIZE, total_size);
+ if (ret < 0)
+ goto fail;
+ ret = nla_put_u32(skb, CNSS_GENL_ATTR_MSG_SEG_ID, seg_id);
+ if (ret < 0)
+ goto fail;
+ ret = nla_put_u8(skb, CNSS_GENL_ATTR_MSG_END, end);
+ if (ret < 0)
+ goto fail;
+ ret = nla_put_u32(skb, CNSS_GENL_ATTR_MSG_DATA_LEN, data_len);
+ if (ret < 0)
+ goto fail;
+ ret = nla_put(skb, CNSS_GENL_ATTR_MSG_DATA, data_len, msg_buff);
+ if (ret < 0)
+ goto fail;
+
+ genlmsg_end(skb, msg_header);
+ ret = genlmsg_multicast(&cnss_genl_family, skb, 0, 0, GFP_KERNEL);
+ if (ret < 0)
+ cnss_pr_err("Fail to send genl msg: %d\n", ret);
+
+ return ret;
+fail:
+ cnss_pr_err("Fail to generate genl msg: %d\n", ret);
+ if (skb)
+ nlmsg_free(skb);
+ return ret;
+}
+
+int cnss_genl_send_msg(void *buff, u8 type, char *file_name, u32 total_size)
+{
+ int ret = 0;
+ u8 *msg_buff = buff;
+ u32 remaining = total_size;
+ u32 seg_id = 0;
+ u32 data_len = 0;
+ u8 end = 0;
+ u8 retry;
+
+ cnss_pr_dbg("type: %u, total_size: %x\n", type, total_size);
+
+ while (remaining) {
+ if (remaining > CNSS_GENL_DATA_LEN_MAX) {
+ data_len = CNSS_GENL_DATA_LEN_MAX;
+ } else {
+ data_len = remaining;
+ end = 1;
+ }
+
+ for (retry = 0; retry < 2; retry++) {
+ ret = cnss_genl_send_data(type, file_name, total_size,
+ seg_id, end, data_len,
+ msg_buff);
+ if (ret >= 0)
+ break;
+ msleep(100);
+ }
+
+ if (ret < 0) {
+ cnss_pr_err("fail to send genl data, ret %d\n", ret);
+ return ret;
+ }
+
+ remaining -= data_len;
+ msg_buff += data_len;
+ seg_id++;
+ }
+
+ return ret;
+}
+
+int cnss_genl_init(void)
+{
+ int ret = 0;
+
+ ret = genl_register_family(&cnss_genl_family);
+ if (ret != 0)
+ cnss_pr_err("genl_register_family fail: %d\n", ret);
+
+ return ret;
+}
+
+void cnss_genl_exit(void)
+{
+ genl_unregister_family(&cnss_genl_family);
+}
diff --git a/cnss2/genl.h b/cnss2/genl.h
new file mode 100644
index 0000000..d38ba5d
--- /dev/null
+++ b/cnss2/genl.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. */
+
+#ifndef __CNSS_GENL_H__
+#define __CNSS_GENL_H__
+
+enum cnss_genl_msg_type {
+ CNSS_GENL_MSG_TYPE_UNSPEC,
+ CNSS_GENL_MSG_TYPE_QDSS,
+};
+
+int cnss_genl_init(void);
+void cnss_genl_exit(void);
+int cnss_genl_send_msg(void *buff, u8 type,
+ char *file_name, u32 total_size);
+
+#endif
diff --git a/cnss2/ip_multimedia_subsystem_private_service_v01.c b/cnss2/ip_multimedia_subsystem_private_service_v01.c
new file mode 100644
index 0000000..2caf949
--- /dev/null
+++ b/cnss2/ip_multimedia_subsystem_private_service_v01.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. */
+
+#include "ip_multimedia_subsystem_private_service_v01.h"
+
+static struct qmi_elem_info ims_private_service_header_value_v01_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = IMS_PRIVATE_SERVICE_HEADER_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset =
+ offsetof(struct ims_private_service_header_value_v01, header),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = IMS_PRIVATE_SERVICE_HEADER_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset =
+ offsetof(struct ims_private_service_header_value_v01, value),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info
+ims_private_service_subscribe_for_indications_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ ims_private_service_subscribe_for_indications_req_msg_v01,
+ mt_invite_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ ims_private_service_subscribe_for_indications_req_msg_v01,
+ mt_invite),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ ims_private_service_subscribe_for_indications_req_msg_v01,
+ wfc_call_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ ims_private_service_subscribe_for_indications_req_msg_v01,
+ wfc_call_status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info
+ims_private_service_subscribe_for_indications_rsp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ ims_private_service_subscribe_for_indications_rsp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info ims_private_service_mt_invite_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum ims_subscription_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset =
+ offsetof(struct ims_private_service_mt_invite_ind_msg_v01,
+ subscription_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset =
+ offsetof(struct ims_private_service_mt_invite_ind_msg_v01,
+ iccid_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = IMS_PRIVATE_SERVICE_MAX_ICCID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset =
+ offsetof(struct ims_private_service_mt_invite_ind_msg_v01,
+ iccid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset =
+ offsetof(struct ims_private_service_mt_invite_ind_msg_v01,
+ header_value_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset =
+ offsetof(struct ims_private_service_mt_invite_ind_msg_v01,
+ header_value_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = IMS_PRIVATE_SERVICE_MAX_MT_INVITE_HEADERS_V01,
+ .elem_size =
+ sizeof(struct ims_private_service_header_value_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset =
+ offsetof(struct ims_private_service_mt_invite_ind_msg_v01,
+ header_value_list),
+ .ei_array = ims_private_service_header_value_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info ims_private_service_wfc_call_status_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset =
+ offsetof(struct ims_private_service_wfc_call_status_ind_msg_v01,
+ wfc_call_active),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset =
+ offsetof(struct ims_private_service_wfc_call_status_ind_msg_v01,
+ all_wfc_calls_held_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset =
+ offsetof(struct ims_private_service_wfc_call_status_ind_msg_v01,
+ all_wfc_calls_held),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset =
+ offsetof(struct ims_private_service_wfc_call_status_ind_msg_v01,
+ is_wfc_emergency_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset =
+ offsetof(struct ims_private_service_wfc_call_status_ind_msg_v01,
+ is_wfc_emergency),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset =
+ offsetof(struct ims_private_service_wfc_call_status_ind_msg_v01,
+ twt_ims_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset =
+ offsetof(struct ims_private_service_wfc_call_status_ind_msg_v01,
+ twt_ims_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset =
+ offsetof(struct ims_private_service_wfc_call_status_ind_msg_v01,
+ twt_ims_int_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset =
+ offsetof(struct ims_private_service_wfc_call_status_ind_msg_v01,
+ twt_ims_int),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset =
+ offsetof(struct ims_private_service_wfc_call_status_ind_msg_v01,
+ media_quality_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wfc_media_quality_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset =
+ offsetof(struct ims_private_service_wfc_call_status_ind_msg_v01,
+ media_quality),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info
+ims_private_service_wfc_call_twt_config_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset =
+ offsetof(struct
+ ims_private_service_wfc_call_twt_config_req_msg_v01,
+ twt_sta_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset =
+ offsetof(struct
+ ims_private_service_wfc_call_twt_config_req_msg_v01,
+ twt_sta_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset =
+ offsetof(struct
+ ims_private_service_wfc_call_twt_config_req_msg_v01,
+ twt_sta_int_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset =
+ offsetof(struct
+ ims_private_service_wfc_call_twt_config_req_msg_v01,
+ twt_sta_int),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset =
+ offsetof(struct
+ ims_private_service_wfc_call_twt_config_req_msg_v01,
+ twt_sta_upo_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset =
+ offsetof(struct
+ ims_private_service_wfc_call_twt_config_req_msg_v01,
+ twt_sta_upo),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset =
+ offsetof(struct
+ ims_private_service_wfc_call_twt_config_req_msg_v01,
+ twt_sta_sp_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset =
+ offsetof(struct
+ ims_private_service_wfc_call_twt_config_req_msg_v01,
+ twt_sta_sp),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset =
+ offsetof(struct
+ ims_private_service_wfc_call_twt_config_req_msg_v01,
+ twt_sta_dl_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset =
+ offsetof(struct
+ ims_private_service_wfc_call_twt_config_req_msg_v01,
+ twt_sta_dl),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset =
+ offsetof(struct
+ ims_private_service_wfc_call_twt_config_req_msg_v01,
+ twt_sta_config_changed_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset =
+ offsetof(struct
+ ims_private_service_wfc_call_twt_config_req_msg_v01,
+ twt_sta_config_changed),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info
+ims_private_service_wfc_call_twt_config_rsp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset =
+ offsetof(struct
+ ims_private_service_wfc_call_twt_config_rsp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
diff --git a/cnss2/ip_multimedia_subsystem_private_service_v01.h b/cnss2/ip_multimedia_subsystem_private_service_v01.h
new file mode 100644
index 0000000..c8cdb59
--- /dev/null
+++ b/cnss2/ip_multimedia_subsystem_private_service_v01.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */
+
+#ifndef IP_MULTIMEDIA_SUBSYSTEM_PRIVATE_SERVICE_V01_H
+#define IP_MULTIMEDIA_SUBSYSTEM_PRIVATE_SERVICE_V01_H
+
+#include <linux/soc/qcom/qmi.h>
+
+#define IMSPRIVATE_SERVICE_ID_V01 0x4D
+#define IMSPRIVATE_SERVICE_VERS_V01 0x01
+
+#define QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_RSP_V01 0x003E
+#define QMI_IMS_PRIVATE_SERVICE_WFC_CALL_STATUS_IND_V01 0x0040
+#define QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01 0x003E
+#define QMI_IMS_PRIVATE_SERVICE_MT_INVITE_IND_V01 0x003F
+#define QMI_IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_RSP_V01 0x0041
+#define QMI_IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_REQ_V01 0x0041
+
+#define IMS_PRIVATE_SERVICE_MAX_MT_INVITE_HEADERS_V01 15
+#define IMS_PRIVATE_SERVICE_HEADER_STR_LEN_V01 1024
+#define IMS_PRIVATE_SERVICE_MAX_ICCID_LEN_V01 21
+
+enum ims_common_resp_enum_v01 {
+ IMS_COMMON_RESP_ENUM_MIN_VAL_V01 = INT_MIN,
+ IMS_COMMON_MSG_NO_ERR_V01 = 0,
+ IMS_COMMON_MSG_IMS_NOT_READY_V01 = 1,
+ IMS_COMMON_MSG_FILE_NOT_AVAILABLE_V01 = 2,
+ IMS_COMMON_MSG_READ_FAILED_V01 = 3,
+ IMS_COMMON_MSG_WRITE_FAILED_V01 = 4,
+ IMS_COMMON_MSG_OTHER_INTERNAL_ERR_V01 = 5,
+ IMS_COMMON_RESP_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum ims_subscription_type_enum_v01 {
+ IMS_SUBSCRIPTION_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+ IMS_SUBSCRIPTION_TYPE_NONE_V01 = -1,
+ IMS_SUBSCRIPTION_TYPE_PRIMARY_V01 = 0,
+ IMS_SUBSCRIPTION_TYPE_SECONDARY_V01 = 1,
+ IMS_SUBSCRIPTION_TYPE_TERTIARY_V01 = 2,
+ IMS_SUBSCRIPTION_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wfc_media_quality_v01 {
+ WFC_MEDIA_QUALITY_MIN_VAL_V01 = INT_MIN,
+ WFC_MEDIA_QUAL_NOT_AVAILABLE_V01 = 0,
+ WFC_MEDIA_QUAL_BAD_V01 = 1,
+ WFC_MEDIA_QUAL_GOOD_V01 = 2,
+ WFC_MEDIA_QUAL_EXCELLENT_V01 = 3,
+ WFC_MEDIA_QUALITY_MAX_VAL_V01 = INT_MAX,
+};
+
+struct ims_private_service_header_value_v01 {
+ char header[IMS_PRIVATE_SERVICE_HEADER_STR_LEN_V01 + 1];
+ char value[IMS_PRIVATE_SERVICE_HEADER_STR_LEN_V01 + 1];
+};
+
+struct ims_private_service_subscribe_for_indications_req_msg_v01 {
+ u8 mt_invite_valid;
+ u8 mt_invite;
+ u8 wfc_call_status_valid;
+ u8 wfc_call_status;
+};
+
+#define IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_MSG_V01_MAX_MSG_LEN 8
+extern struct qmi_elem_info
+ims_private_service_subscribe_for_indications_req_msg_v01_ei[];
+
+struct ims_private_service_subscribe_for_indications_rsp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_RSP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info
+ims_private_service_subscribe_for_indications_rsp_msg_v01_ei[];
+
+struct ims_private_service_mt_invite_ind_msg_v01 {
+ enum ims_subscription_type_enum_v01 subscription_type;
+ u8 iccid_valid;
+ char iccid[IMS_PRIVATE_SERVICE_MAX_ICCID_LEN_V01 + 1];
+ u8 header_value_list_valid;
+ u32 header_value_list_len;
+ struct ims_private_service_header_value_v01
+ header_value_list[IMS_PRIVATE_SERVICE_MAX_MT_INVITE_HEADERS_V01];
+};
+
+#define IMS_PRIVATE_SERVICE_MT_INVITE_IND_MSG_V01_MAX_MSG_LEN 30815
+extern struct qmi_elem_info ims_private_service_mt_invite_ind_msg_v01_ei[];
+
+struct ims_private_service_wfc_call_status_ind_msg_v01 {
+ u8 wfc_call_active;
+ u8 all_wfc_calls_held_valid;
+ u8 all_wfc_calls_held;
+ u8 is_wfc_emergency_valid;
+ u8 is_wfc_emergency;
+ u8 twt_ims_start_valid;
+ u64 twt_ims_start;
+ u8 twt_ims_int_valid;
+ u16 twt_ims_int;
+ u8 media_quality_valid;
+ enum wfc_media_quality_v01 media_quality;
+};
+
+#define IMS_PRIVATE_SERVICE_WFC_CALL_STATUS_IND_MSG_V01_MAX_MSG_LEN 35
+extern struct qmi_elem_info
+ims_private_service_wfc_call_status_ind_msg_v01_ei[];
+
+struct ims_private_service_wfc_call_twt_config_req_msg_v01 {
+ u8 twt_sta_start_valid;
+ u64 twt_sta_start;
+ u8 twt_sta_int_valid;
+ u16 twt_sta_int;
+ u8 twt_sta_upo_valid;
+ u16 twt_sta_upo;
+ u8 twt_sta_sp_valid;
+ u16 twt_sta_sp;
+ u8 twt_sta_dl_valid;
+ u16 twt_sta_dl;
+ u8 twt_sta_config_changed_valid;
+ u8 twt_sta_config_changed;
+};
+
+#define IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_REQ_MSG_V01_MAX_MSG_LEN 35
+extern struct qmi_elem_info
+ims_private_service_wfc_call_twt_config_req_msg_v01_ei[];
+
+struct ims_private_service_wfc_call_twt_config_rsp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_RSP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info
+ims_private_service_wfc_call_twt_config_rsp_msg_v01_ei[];
+
+#endif
diff --git a/cnss2/main.c b/cnss2/main.c
new file mode 100644
index 0000000..018fb81
--- /dev/null
+++ b/cnss2/main.c
@@ -0,0 +1,3470 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/reboot.h>
+#include <linux/rwsem.h>
+#include <linux/suspend.h>
+#include <linux/timer.h>
+#include <linux/cnss_plat_ipc_qmi.h>
+#if IS_ENABLED(CONFIG_QCOM_MINIDUMP)
+#include <soc/qcom/minidump.h>
+#endif
+
+#include "main.h"
+#include "bus.h"
+#include "debug.h"
+#include "genl.h"
+
+#define CNSS_DUMP_FORMAT_VER 0x11
+#define CNSS_DUMP_FORMAT_VER_V2 0x22
+#define CNSS_DUMP_MAGIC_VER_V2 0x42445953
+#define CNSS_DUMP_NAME "CNSS_WLAN"
+#define CNSS_DUMP_DESC_SIZE 0x1000
+#define CNSS_DUMP_SEG_VER 0x1
+#define RECOVERY_DELAY_MS 100
+#define FILE_SYSTEM_READY 1
+#define FW_READY_TIMEOUT 20000
+#define FW_ASSERT_TIMEOUT 5000
+#define CNSS_EVENT_PENDING 2989
+#define COLD_BOOT_CAL_SHUTDOWN_DELAY_MS 50
+
+#define CNSS_QUIRKS_DEFAULT 0
+#ifdef CONFIG_CNSS_EMULATION
+#define CNSS_MHI_TIMEOUT_DEFAULT 90000
+#define CNSS_MHI_M2_TIMEOUT_DEFAULT 2000
+#define CNSS_QMI_TIMEOUT_DEFAULT 90000
+#else
+#define CNSS_MHI_TIMEOUT_DEFAULT 0
+#define CNSS_MHI_M2_TIMEOUT_DEFAULT 25
+#define CNSS_QMI_TIMEOUT_DEFAULT 10000
+#endif
+#define CNSS_BDF_TYPE_DEFAULT CNSS_BDF_ELF
+#define CNSS_TIME_SYNC_PERIOD_DEFAULT 900000
+#define CNSS_DMS_QMI_CONNECTION_WAIT_MS 50
+#define CNSS_DMS_QMI_CONNECTION_WAIT_RETRY 200
+#define CNSS_DAEMON_CONNECT_TIMEOUT_MS 30000
+#define CNSS_CAL_DB_FILE_NAME "wlfw_cal_db.bin"
+
+enum cnss_cal_db_op {
+ CNSS_CAL_DB_UPLOAD,
+ CNSS_CAL_DB_DOWNLOAD,
+ CNSS_CAL_DB_INVALID_OP,
+};
+
+static struct cnss_plat_data *plat_env;
+
+static DECLARE_RWSEM(cnss_pm_sem);
+
+static struct cnss_fw_files FW_FILES_QCA6174_FW_3_0 = {
+ "qwlan30.bin", "bdwlan30.bin", "otp30.bin", "utf30.bin",
+ "utfbd30.bin", "epping30.bin", "evicted30.bin"
+};
+
+static struct cnss_fw_files FW_FILES_DEFAULT = {
+ "qwlan.bin", "bdwlan.bin", "otp.bin", "utf.bin",
+ "utfbd.bin", "epping.bin", "evicted.bin"
+};
+
+struct cnss_driver_event {
+ struct list_head list;
+ enum cnss_driver_event_type type;
+ bool sync;
+ struct completion complete;
+ int ret;
+ void *data;
+};
+
+static void cnss_set_plat_priv(struct platform_device *plat_dev,
+ struct cnss_plat_data *plat_priv)
+{
+ cnss_pr_err("%s: Victor Enter\n", __func__);
+ plat_env = plat_priv;
+}
+
+struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev)
+{
+ cnss_pr_err("%s: Victor Enter\n", __func__);
+ return plat_env;
+}
+
+/**
+ * cnss_get_mem_seg_count - Get segment count of memory
+ * @type: memory type
+ * @seg: segment count
+ *
+ * Return: 0 on success, negative value on failure
+ */
+int cnss_get_mem_seg_count(enum cnss_remote_mem_type type, u32 *seg)
+{
+ struct cnss_plat_data *plat_priv;
+
+ plat_priv = cnss_get_plat_priv(NULL);
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (type) {
+ case CNSS_REMOTE_MEM_TYPE_FW:
+ *seg = plat_priv->fw_mem_seg_len;
+ break;
+ case CNSS_REMOTE_MEM_TYPE_QDSS:
+ *seg = plat_priv->qdss_mem_seg_len;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_mem_seg_count);
+
+/**
+ * cnss_get_mem_segment_info - Get memory info of different type
+ * @type: memory type
+ * @segment: array to save the segment info
+ * @seg: segment count
+ *
+ * Return: 0 on success, negative value on failure
+ */
+int cnss_get_mem_segment_info(enum cnss_remote_mem_type type,
+ struct cnss_mem_segment segment[],
+ u32 segment_count)
+{
+ struct cnss_plat_data *plat_priv;
+ u32 i;
+
+ plat_priv = cnss_get_plat_priv(NULL);
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (type) {
+ case CNSS_REMOTE_MEM_TYPE_FW:
+ if (segment_count > plat_priv->fw_mem_seg_len)
+ segment_count = plat_priv->fw_mem_seg_len;
+ for (i = 0; i < segment_count; i++) {
+ segment[i].size = plat_priv->fw_mem[i].size;
+ segment[i].va = plat_priv->fw_mem[i].va;
+ segment[i].pa = plat_priv->fw_mem[i].pa;
+ }
+ break;
+ case CNSS_REMOTE_MEM_TYPE_QDSS:
+ if (segment_count > plat_priv->qdss_mem_seg_len)
+ segment_count = plat_priv->qdss_mem_seg_len;
+ for (i = 0; i < segment_count; i++) {
+ segment[i].size = plat_priv->qdss_mem[i].size;
+ segment[i].va = plat_priv->qdss_mem[i].va;
+ segment[i].pa = plat_priv->qdss_mem[i].pa;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_mem_segment_info);
+
+int cnss_set_feature_list(struct cnss_plat_data *plat_priv,
+ enum cnss_feature_v01 feature)
+{
+ if (unlikely(!plat_priv || feature >= CNSS_MAX_FEATURE_V01))
+ return -EINVAL;
+
+ plat_priv->feature_list |= 1 << feature;
+ return 0;
+}
+
+int cnss_get_feature_list(struct cnss_plat_data *plat_priv,
+ u64 *feature_list)
+{
+ if (unlikely(!plat_priv))
+ return -EINVAL;
+
+ *feature_list = plat_priv->feature_list;
+ return 0;
+}
+
+static int cnss_pm_notify(struct notifier_block *b,
+ unsigned long event, void *p)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ down_write(&cnss_pm_sem);
+ break;
+ case PM_POST_SUSPEND:
+ up_write(&cnss_pm_sem);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cnss_pm_notifier = {
+ .notifier_call = cnss_pm_notify,
+};
+
+void cnss_pm_stay_awake(struct cnss_plat_data *plat_priv)
+{
+ if (atomic_inc_return(&plat_priv->pm_count) != 1)
+ return;
+
+ cnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n",
+ plat_priv->driver_state,
+ atomic_read(&plat_priv->pm_count));
+ pm_stay_awake(&plat_priv->plat_dev->dev);
+}
+
+void cnss_pm_relax(struct cnss_plat_data *plat_priv)
+{
+ int r = atomic_dec_return(&plat_priv->pm_count);
+
+ WARN_ON(r < 0);
+
+ if (r != 0)
+ return;
+
+ cnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n",
+ plat_priv->driver_state,
+ atomic_read(&plat_priv->pm_count));
+ pm_relax(&plat_priv->plat_dev->dev);
+}
+
+void cnss_lock_pm_sem(struct device *dev)
+{
+ down_read(&cnss_pm_sem);
+}
+EXPORT_SYMBOL(cnss_lock_pm_sem);
+
+void cnss_release_pm_sem(struct device *dev)
+{
+ up_read(&cnss_pm_sem);
+}
+EXPORT_SYMBOL(cnss_release_pm_sem);
+
+int cnss_get_fw_files_for_target(struct device *dev,
+ struct cnss_fw_files *pfw_files,
+ u32 target_type, u32 target_version)
+{
+ if (!pfw_files)
+ return -ENODEV;
+
+ switch (target_version) {
+ case QCA6174_REV3_VERSION:
+ case QCA6174_REV3_2_VERSION:
+ memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files));
+ break;
+ default:
+ memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files));
+ cnss_pr_err("Unknown target version, type: 0x%X, version: 0x%X",
+ target_type, target_version);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_fw_files_for_target);
+
+int cnss_get_platform_cap(struct device *dev, struct cnss_platform_cap *cap)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (!cap)
+ return -EINVAL;
+
+ *cap = plat_priv->cap;
+ cnss_pr_dbg("Platform cap_flag is 0x%x\n", cap->cap_flag);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_platform_cap);
+
+void cnss_request_pm_qos(struct device *dev, u32 qos_val)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!plat_priv)
+ return;
+
+ cpu_latency_qos_add_request(&plat_priv->qos_request, qos_val);
+}
+EXPORT_SYMBOL(cnss_request_pm_qos);
+
+void cnss_remove_pm_qos(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!plat_priv)
+ return;
+
+ cpu_latency_qos_remove_request(&plat_priv->qos_request);
+}
+EXPORT_SYMBOL(cnss_remove_pm_qos);
+
+int cnss_wlan_enable(struct device *dev,
+ struct cnss_wlan_enable_cfg *config,
+ enum cnss_driver_mode mode,
+ const char *host_version)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ int ret = 0;
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks))
+ return 0;
+
+ if (!config || !host_version) {
+ cnss_pr_err("Invalid config or host_version pointer\n");
+ return -EINVAL;
+ }
+
+ cnss_pr_dbg("Mode: %d, config: %pK, host_version: %s\n",
+ mode, config, host_version);
+
+ if (mode == CNSS_WALTEST || mode == CNSS_CCPM)
+ goto skip_cfg;
+
+ ret = cnss_wlfw_wlan_cfg_send_sync(plat_priv, config, host_version);
+ if (ret)
+ goto out;
+
+skip_cfg:
+ ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, mode);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_wlan_enable);
+
+int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ int ret = 0;
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks))
+ return 0;
+
+ ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
+ cnss_bus_free_qdss_mem(plat_priv);
+
+ return ret;
+}
+EXPORT_SYMBOL(cnss_wlan_disable);
+
+int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type,
+ u32 data_len, u8 *output)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ int ret = 0;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -EINVAL;
+ }
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ cnss_pr_err("Invalid state for athdiag read: 0x%lx\n",
+ plat_priv->driver_state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = cnss_wlfw_athdiag_read_send_sync(plat_priv, offset, mem_type,
+ data_len, output);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_athdiag_read);
+
+int cnss_athdiag_write(struct device *dev, u32 offset, u32 mem_type,
+ u32 data_len, u8 *input)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ int ret = 0;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -EINVAL;
+ }
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ cnss_pr_err("Invalid state for athdiag write: 0x%lx\n",
+ plat_priv->driver_state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = cnss_wlfw_athdiag_write_send_sync(plat_priv, offset, mem_type,
+ data_len, input);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_athdiag_write);
+
+int cnss_set_fw_log_mode(struct device *dev, u8 fw_log_mode)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ return cnss_wlfw_ini_send_sync(plat_priv, fw_log_mode);
+}
+EXPORT_SYMBOL(cnss_set_fw_log_mode);
+
+int cnss_set_pcie_gen_speed(struct device *dev, u8 pcie_gen_speed)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!plat_priv)
+ return -EINVAL;
+
+ if (plat_priv->device_id != QCA6490_DEVICE_ID ||
+ !plat_priv->fw_pcie_gen_switch)
+ return -EOPNOTSUPP;
+
+ if (pcie_gen_speed < QMI_PCIE_GEN_SPEED_1_V01 ||
+ pcie_gen_speed > QMI_PCIE_GEN_SPEED_3_V01)
+ return -EINVAL;
+
+ cnss_pr_dbg("WLAN provided PCIE gen speed: %d\n", pcie_gen_speed);
+ plat_priv->pcie_gen_speed = pcie_gen_speed;
+ return 0;
+}
+EXPORT_SYMBOL(cnss_set_pcie_gen_speed);
+
+static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ set_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
+
+ ret = cnss_wlfw_tgt_cap_send_sync(plat_priv);
+ if (ret)
+ goto out;
+
+ if (plat_priv->hds_enabled)
+ cnss_wlfw_bdf_dnld_send_sync(plat_priv, CNSS_BDF_HDS);
+ cnss_wlfw_bdf_dnld_send_sync(plat_priv, CNSS_BDF_REGDB);
+
+ ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv,
+ plat_priv->ctrl_params.bdf_type);
+ if (ret)
+ goto out;
+
+ ret = cnss_bus_load_m3(plat_priv);
+ if (ret)
+ goto out;
+
+ ret = cnss_wlfw_m3_dnld_send_sync(plat_priv);
+ if (ret)
+ goto out;
+
+ if (cnss_wlfw_qdss_dnld_send_sync(plat_priv))
+ cnss_pr_info("Failed to download qdss configuration file");
+
+ return 0;
+out:
+ return ret;
+}
+
+static int cnss_request_antenna_sharing(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ if (!plat_priv->antenna) {
+ ret = cnss_wlfw_antenna_switch_send_sync(plat_priv);
+ if (ret)
+ goto out;
+ }
+
+ if (test_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state)) {
+ ret = coex_antenna_switch_to_wlan_send_sync_msg(plat_priv);
+ if (ret)
+ goto out;
+ }
+
+ ret = cnss_wlfw_antenna_grant_send_sync(plat_priv);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static void cnss_release_antenna_sharing(struct cnss_plat_data *plat_priv)
+{
+ if (test_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state))
+ coex_antenna_switch_to_mdm_send_sync_msg(plat_priv);
+}
+
+static int cnss_setup_dms_mac(struct cnss_plat_data *plat_priv)
+{
+ u32 i;
+ int ret = 0;
+ struct cnss_plat_ipc_user_config *cfg;
+
+ ret = cnss_qmi_get_dms_mac(plat_priv);
+ if (ret == 0 && plat_priv->dms.mac_valid)
+ goto qmi_send;
+
+ /* DTSI property use-nv-mac is used to force DMS MAC address for WLAN.
+ * Thus assert on failure to get MAC from DMS even after retries
+ */
+ if (plat_priv->use_nv_mac) {
+ /* Check if Daemon says platform support DMS MAC provisioning */
+ cfg = cnss_plat_ipc_qmi_user_config();
+ if (cfg) {
+ if (!cfg->dms_mac_addr_supported) {
+ cnss_pr_err("DMS MAC address not supported\n");
+ CNSS_ASSERT(0);
+ return -EINVAL;
+ }
+ }
+ for (i = 0; i < CNSS_DMS_QMI_CONNECTION_WAIT_RETRY; i++) {
+ if (plat_priv->dms.mac_valid)
+ break;
+
+ ret = cnss_qmi_get_dms_mac(plat_priv);
+ if (ret == 0)
+ break;
+ msleep(CNSS_DMS_QMI_CONNECTION_WAIT_MS);
+ }
+ if (!plat_priv->dms.mac_valid) {
+ cnss_pr_err("Unable to get MAC from DMS after retries\n");
+ CNSS_ASSERT(0);
+ return -EINVAL;
+ }
+ }
+qmi_send:
+ if (plat_priv->dms.mac_valid)
+ ret =
+ cnss_wlfw_wlan_mac_req_send_sync(plat_priv, plat_priv->dms.mac,
+ ARRAY_SIZE(plat_priv->dms.mac));
+
+ return ret;
+}
+
+static int cnss_cal_db_mem_update(struct cnss_plat_data *plat_priv,
+ enum cnss_cal_db_op op, u32 *size)
+{
+ int ret = 0;
+ u32 timeout = cnss_get_timeout(plat_priv,
+ CNSS_TIMEOUT_DAEMON_CONNECTION);
+
+ if (op >= CNSS_CAL_DB_INVALID_OP)
+ return -EINVAL;
+
+ if (!plat_priv->cbc_file_download) {
+ cnss_pr_info("CAL DB file not required as per BDF\n");
+ return 0;
+ }
+ if (*size == 0) {
+ cnss_pr_err("Invalid cal file size\n");
+ return -EINVAL;
+ }
+ if (!test_bit(CNSS_DAEMON_CONNECTED, &plat_priv->driver_state)) {
+ cnss_pr_info("Waiting for CNSS Daemon connection\n");
+ ret = wait_for_completion_timeout(&plat_priv->daemon_connected,
+ msecs_to_jiffies(timeout));
+ if (!ret) {
+ cnss_pr_err("Daemon not yet connected\n");
+ CNSS_ASSERT(0);
+ return ret;
+ }
+ }
+ if (!plat_priv->cal_mem->va) {
+ cnss_pr_err("CAL DB Memory not setup for FW\n");
+ return -EINVAL;
+ }
+
+ /* Copy CAL DB file contents to/from CAL_TYPE_DDR mem allocated to FW */
+ if (op == CNSS_CAL_DB_DOWNLOAD) {
+ cnss_pr_dbg("Initiating Calibration file download to mem\n");
+ ret = cnss_plat_ipc_qmi_file_download(CNSS_CAL_DB_FILE_NAME,
+ plat_priv->cal_mem->va,
+ size);
+ } else {
+ cnss_pr_dbg("Initiating Calibration mem upload to file\n");
+ ret = cnss_plat_ipc_qmi_file_upload(CNSS_CAL_DB_FILE_NAME,
+ plat_priv->cal_mem->va,
+ *size);
+ }
+
+ if (ret)
+ cnss_pr_err("Cal DB file %s %s failure\n",
+ CNSS_CAL_DB_FILE_NAME,
+ op == CNSS_CAL_DB_DOWNLOAD ? "download" : "upload");
+ else
+ cnss_pr_dbg("Cal DB file %s %s size %d done\n",
+ CNSS_CAL_DB_FILE_NAME,
+ op == CNSS_CAL_DB_DOWNLOAD ? "download" : "upload",
+ *size);
+
+ return ret;
+}
+
+static int cnss_cal_mem_upload_to_file(struct cnss_plat_data *plat_priv)
+{
+ if (plat_priv->cal_file_size > plat_priv->cal_mem->size) {
+ cnss_pr_err("Cal file size is larger than Cal DB Mem size\n");
+ return -EINVAL;
+ }
+ return cnss_cal_db_mem_update(plat_priv, CNSS_CAL_DB_UPLOAD,
+ &plat_priv->cal_file_size);
+}
+
+static int cnss_cal_file_download_to_mem(struct cnss_plat_data *plat_priv,
+ u32 *cal_file_size)
+{
+ /* To download pass the total size of cal DB mem allocated.
+ * After cal file is download to mem, its size is updated in
+ * return pointer
+ */
+ *cal_file_size = plat_priv->cal_mem->size;
+ return cnss_cal_db_mem_update(plat_priv, CNSS_CAL_DB_DOWNLOAD,
+ cal_file_size);
+}
+
+static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ u32 cal_file_size = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Processing FW Init Done..\n");
+ del_timer(&plat_priv->fw_boot_timer);
+ set_bit(CNSS_FW_READY, &plat_priv->driver_state);
+ clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
+
+ cnss_wlfw_send_pcie_gen_speed_sync(plat_priv);
+
+ if (test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state)) {
+ clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ }
+
+ if (test_bit(ENABLE_WALTEST, &plat_priv->ctrl_params.quirks)) {
+ ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
+ CNSS_WALTEST);
+ } else if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
+ cnss_request_antenna_sharing(plat_priv);
+ cnss_cal_file_download_to_mem(plat_priv, &cal_file_size);
+ cnss_wlfw_cal_report_req_send_sync(plat_priv, cal_file_size);
+ plat_priv->cal_time = jiffies;
+ ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
+ CNSS_CALIBRATION);
+ } else {
+ ret = cnss_setup_dms_mac(plat_priv);
+ ret = cnss_bus_call_driver_probe(plat_priv);
+ }
+
+ if (ret && test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
+ goto out;
+ else if (ret)
+ goto shutdown;
+
+ cnss_vreg_unvote_type(plat_priv, CNSS_VREG_PRIM);
+
+ return 0;
+
+shutdown:
+ cnss_bus_dev_shutdown(plat_priv);
+
+ clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
+ clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
+
+out:
+ return ret;
+}
+
+static char *cnss_driver_event_to_str(enum cnss_driver_event_type type)
+{
+ switch (type) {
+ case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
+ return "SERVER_ARRIVE";
+ case CNSS_DRIVER_EVENT_SERVER_EXIT:
+ return "SERVER_EXIT";
+ case CNSS_DRIVER_EVENT_REQUEST_MEM:
+ return "REQUEST_MEM";
+ case CNSS_DRIVER_EVENT_FW_MEM_READY:
+ return "FW_MEM_READY";
+ case CNSS_DRIVER_EVENT_FW_READY:
+ return "FW_READY";
+ case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
+ return "COLD_BOOT_CAL_START";
+ case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
+ return "COLD_BOOT_CAL_DONE";
+ case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
+ return "REGISTER_DRIVER";
+ case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
+ return "UNREGISTER_DRIVER";
+ case CNSS_DRIVER_EVENT_RECOVERY:
+ return "RECOVERY";
+ case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
+ return "FORCE_FW_ASSERT";
+ case CNSS_DRIVER_EVENT_POWER_UP:
+ return "POWER_UP";
+ case CNSS_DRIVER_EVENT_POWER_DOWN:
+ return "POWER_DOWN";
+ case CNSS_DRIVER_EVENT_IDLE_RESTART:
+ return "IDLE_RESTART";
+ case CNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
+ return "IDLE_SHUTDOWN";
+ case CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND:
+ return "IMS_WFC_CALL_IND";
+ case CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND:
+ return "WLFW_TWC_CFG_IND";
+ case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
+ return "QDSS_TRACE_REQ_MEM";
+ case CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE:
+ return "FW_MEM_FILE_SAVE";
+ case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
+ return "QDSS_TRACE_FREE";
+ case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA:
+ return "QDSS_TRACE_REQ_DATA";
+ case CNSS_DRIVER_EVENT_MAX:
+ return "EVENT_MAX";
+ }
+
+ return "UNKNOWN";
+};
+
+int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
+ enum cnss_driver_event_type type,
+ u32 flags, void *data)
+{
+ struct cnss_driver_event *event;
+ unsigned long irq_flags;
+ int gfp = GFP_KERNEL;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Posting event: %s(%d)%s, state: 0x%lx flags: 0x%0x\n",
+ cnss_driver_event_to_str(type), type,
+ flags ? "-sync" : "", plat_priv->driver_state, flags);
+
+ if (type >= CNSS_DRIVER_EVENT_MAX) {
+ cnss_pr_err("Invalid Event type: %d, can't post", type);
+ return -EINVAL;
+ }
+
+ if (in_interrupt() || irqs_disabled())
+ gfp = GFP_ATOMIC;
+
+ event = kzalloc(sizeof(*event), gfp);
+ if (!event)
+ return -ENOMEM;
+
+ cnss_pm_stay_awake(plat_priv);
+
+ event->type = type;
+ event->data = data;
+ init_completion(&event->complete);
+ event->ret = CNSS_EVENT_PENDING;
+ event->sync = !!(flags & CNSS_EVENT_SYNC);
+
+ spin_lock_irqsave(&plat_priv->event_lock, irq_flags);
+ list_add_tail(&event->list, &plat_priv->event_list);
+ spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
+
+ queue_work(plat_priv->event_wq, &plat_priv->event_work);
+
+ if (!(flags & CNSS_EVENT_SYNC))
+ goto out;
+
+ if (flags & CNSS_EVENT_UNKILLABLE)
+ wait_for_completion(&event->complete);
+ else if (flags & CNSS_EVENT_UNINTERRUPTIBLE)
+ ret = wait_for_completion_killable(&event->complete);
+ else
+ ret = wait_for_completion_interruptible(&event->complete);
+
+ cnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
+ cnss_driver_event_to_str(type), type,
+ plat_priv->driver_state, ret, event->ret);
+ spin_lock_irqsave(&plat_priv->event_lock, irq_flags);
+ if (ret == -ERESTARTSYS && event->ret == CNSS_EVENT_PENDING) {
+ event->sync = false;
+ spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
+ ret = -EINTR;
+ goto out;
+ }
+ spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
+
+ ret = event->ret;
+ kfree(event);
+
+out:
+ cnss_pm_relax(plat_priv);
+ return ret;
+}
+
+/**
+ * cnss_get_timeout - Get timeout for corresponding type.
+ * @plat_priv: Pointer to platform driver context.
+ * @cnss_timeout_type: Timeout type.
+ *
+ * Return: Timeout in milliseconds.
+ */
+unsigned int cnss_get_timeout(struct cnss_plat_data *plat_priv,
+ enum cnss_timeout_type timeout_type)
+{
+ unsigned int qmi_timeout = cnss_get_qmi_timeout(plat_priv);
+
+ switch (timeout_type) {
+ case CNSS_TIMEOUT_QMI:
+ return qmi_timeout;
+ case CNSS_TIMEOUT_POWER_UP:
+ return (qmi_timeout << 2);
+ case CNSS_TIMEOUT_IDLE_RESTART:
+ /* In idle restart power up sequence, we have fw_boot_timer to
+ * handle FW initialization failure.
+ * It uses WLAN_MISSION_MODE_TIMEOUT, so setup 3x that time to
+ * account for FW dump collection and FW re-initialization on
+ * retry.
+ */
+ return (qmi_timeout + WLAN_MISSION_MODE_TIMEOUT * 3);
+ case CNSS_TIMEOUT_CALIBRATION:
+ return (qmi_timeout + WLAN_COLD_BOOT_CAL_TIMEOUT);
+ case CNSS_TIMEOUT_WLAN_WATCHDOG:
+ return ((qmi_timeout << 1) + WLAN_WD_TIMEOUT_MS);
+ case CNSS_TIMEOUT_RDDM:
+ return CNSS_RDDM_TIMEOUT_MS;
+ case CNSS_TIMEOUT_RECOVERY:
+ return RECOVERY_TIMEOUT;
+ case CNSS_TIMEOUT_DAEMON_CONNECTION:
+ return qmi_timeout + CNSS_DAEMON_CONNECT_TIMEOUT_MS;
+ default:
+ return qmi_timeout;
+ }
+}
+
+unsigned int cnss_get_boot_timeout(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return 0;
+ }
+
+ return cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI);
+}
+EXPORT_SYMBOL(cnss_get_boot_timeout);
+
+int cnss_power_up(struct device *dev)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ unsigned int timeout;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ cnss_pr_dbg("Powering up device\n");
+
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_POWER_UP,
+ CNSS_EVENT_SYNC, NULL);
+ if (ret)
+ goto out;
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ goto out;
+
+ timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_POWER_UP);
+
+ reinit_completion(&plat_priv->power_up_complete);
+ ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
+ msecs_to_jiffies(timeout));
+ if (!ret) {
+ cnss_pr_err("Timeout (%ums) waiting for power up to complete\n",
+ timeout);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_power_up);
+
+int cnss_power_down(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ cnss_pr_dbg("Powering down device\n");
+
+ return cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_POWER_DOWN,
+ CNSS_EVENT_SYNC, NULL);
+}
+EXPORT_SYMBOL(cnss_power_down);
+
+int cnss_idle_restart(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ unsigned int timeout;
+ int ret = 0;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (!mutex_trylock(&plat_priv->driver_ops_lock)) {
+ cnss_pr_dbg("Another driver operation is in progress, ignore idle restart\n");
+ return -EBUSY;
+ }
+
+ cnss_pr_dbg("Doing idle restart\n");
+
+ reinit_completion(&plat_priv->power_up_complete);
+
+ if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Reboot or shutdown is in progress, ignore idle restart\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_IDLE_RESTART,
+ CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
+ if (ret)
+ goto out;
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID) {
+ ret = cnss_bus_call_driver_probe(plat_priv);
+ goto out;
+ }
+
+ timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_IDLE_RESTART);
+ ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
+ msecs_to_jiffies(timeout));
+ if (plat_priv->power_up_error) {
+ ret = plat_priv->power_up_error;
+ clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
+ cnss_pr_dbg("Power up error:%d, exiting\n",
+ plat_priv->power_up_error);
+ goto out;
+ }
+
+ if (!ret) {
+ /* This exception occurs after attempting retry of FW recovery.
+ * Thus we can safely power off the device.
+ */
+ cnss_fatal_err("Timeout (%ums) waiting for idle restart to complete\n",
+ timeout);
+ ret = -ETIMEDOUT;
+ cnss_power_down(dev);
+ CNSS_ASSERT(0);
+ goto out;
+ }
+
+ if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Reboot or shutdown is in progress, ignore idle restart\n");
+ del_timer(&plat_priv->fw_boot_timer);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_unlock(&plat_priv->driver_ops_lock);
+ return 0;
+
+out:
+ mutex_unlock(&plat_priv->driver_ops_lock);
+ return ret;
+}
+EXPORT_SYMBOL(cnss_idle_restart);
+
+int cnss_idle_shutdown(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ unsigned int timeout;
+ int ret;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (test_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state)) {
+ cnss_pr_dbg("System suspend or resume in progress, ignore idle shutdown\n");
+ return -EAGAIN;
+ }
+
+ cnss_pr_dbg("Doing idle shutdown\n");
+
+ if (!test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+ !test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
+ goto skip_wait;
+
+ reinit_completion(&plat_priv->recovery_complete);
+ timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY);
+ ret = wait_for_completion_timeout(&plat_priv->recovery_complete,
+ msecs_to_jiffies(timeout));
+ if (!ret) {
+ cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n",
+ timeout);
+ CNSS_ASSERT(0);
+ }
+
+skip_wait:
+ return cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_IDLE_SHUTDOWN,
+ CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
+}
+EXPORT_SYMBOL(cnss_idle_shutdown);
+
+static int cnss_get_resources(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ ret = cnss_get_vreg_type(plat_priv, CNSS_VREG_PRIM);
+ if (ret) {
+ cnss_pr_err("Failed to get vreg, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_get_clk(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to get clocks, err = %d\n", ret);
+ goto put_vreg;
+ }
+
+ ret = cnss_get_pinctrl(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to get pinctrl, err = %d\n", ret);
+ goto put_clk;
+ }
+
+ return 0;
+
+put_clk:
+ cnss_put_clk(plat_priv);
+put_vreg:
+ cnss_put_vreg_type(plat_priv, CNSS_VREG_PRIM);
+out:
+ return ret;
+}
+
+static void cnss_put_resources(struct cnss_plat_data *plat_priv)
+{
+ cnss_put_clk(plat_priv);
+ cnss_put_vreg_type(plat_priv, CNSS_VREG_PRIM);
+}
+
+#if IS_ENABLED(CONFIG_ESOC) && IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
+static int cnss_modem_notifier_nb(struct notifier_block *nb,
+ unsigned long code,
+ void *ss_handle)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(nb, struct cnss_plat_data, modem_nb);
+ struct cnss_esoc_info *esoc_info;
+
+ cnss_pr_dbg("Modem notifier: event %lu\n", code);
+
+ if (!plat_priv)
+ return NOTIFY_DONE;
+
+ esoc_info = &plat_priv->esoc_info;
+
+ if (code == SUBSYS_AFTER_POWERUP)
+ esoc_info->modem_current_status = 1;
+ else if (code == SUBSYS_BEFORE_SHUTDOWN)
+ esoc_info->modem_current_status = 0;
+ else
+ return NOTIFY_DONE;
+
+ if (!cnss_bus_call_driver_modem_status(plat_priv,
+ esoc_info->modem_current_status))
+ return NOTIFY_DONE;
+
+ return NOTIFY_OK;
+}
+
+static int cnss_register_esoc(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct device *dev;
+ struct cnss_esoc_info *esoc_info;
+ struct esoc_desc *esoc_desc;
+ const char *client_desc;
+
+ dev = &plat_priv->plat_dev->dev;
+ esoc_info = &plat_priv->esoc_info;
+
+ esoc_info->notify_modem_status =
+ of_property_read_bool(dev->of_node,
+ "qcom,notify-modem-status");
+
+ if (!esoc_info->notify_modem_status)
+ goto out;
+
+ ret = of_property_read_string_index(dev->of_node, "esoc-names", 0,
+ &client_desc);
+ if (ret) {
+ cnss_pr_dbg("esoc-names is not defined in DT, skip!\n");
+ } else {
+ esoc_desc = devm_register_esoc_client(dev, client_desc);
+ if (IS_ERR_OR_NULL(esoc_desc)) {
+ ret = PTR_RET(esoc_desc);
+ cnss_pr_err("Failed to register esoc_desc, err = %d\n",
+ ret);
+ goto out;
+ }
+ esoc_info->esoc_desc = esoc_desc;
+ }
+
+ plat_priv->modem_nb.notifier_call = cnss_modem_notifier_nb;
+ esoc_info->modem_current_status = 0;
+ esoc_info->modem_notify_handler =
+ subsys_notif_register_notifier(esoc_info->esoc_desc ?
+ esoc_info->esoc_desc->name :
+ "modem", &plat_priv->modem_nb);
+ if (IS_ERR(esoc_info->modem_notify_handler)) {
+ ret = PTR_ERR(esoc_info->modem_notify_handler);
+ cnss_pr_err("Failed to register esoc notifier, err = %d\n",
+ ret);
+ goto unreg_esoc;
+ }
+
+ return 0;
+unreg_esoc:
+ if (esoc_info->esoc_desc)
+ devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
+out:
+ return ret;
+}
+
+static void cnss_unregister_esoc(struct cnss_plat_data *plat_priv)
+{
+ struct device *dev;
+ struct cnss_esoc_info *esoc_info;
+
+ dev = &plat_priv->plat_dev->dev;
+ esoc_info = &plat_priv->esoc_info;
+
+ if (esoc_info->notify_modem_status)
+ subsys_notif_unregister_notifier
+ (esoc_info->modem_notify_handler,
+ &plat_priv->modem_nb);
+ if (esoc_info->esoc_desc)
+ devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
+}
+#else
+static inline int cnss_register_esoc(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline void cnss_unregister_esoc(struct cnss_plat_data *plat_priv) {}
+#endif
+
+#if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
+static int cnss_subsys_powerup(const struct subsys_desc *subsys_desc)
+{
+ struct cnss_plat_data *plat_priv;
+ int ret = 0;
+
+ if (!subsys_desc->dev) {
+ cnss_pr_err("dev from subsys_desc is NULL\n");
+ return -ENODEV;
+ }
+
+ plat_priv = dev_get_drvdata(subsys_desc->dev);
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (!plat_priv->driver_state) {
+ cnss_pr_dbg("Powerup is ignored\n");
+ return 0;
+ }
+
+ ret = cnss_bus_dev_powerup(plat_priv);
+ if (ret)
+ __pm_relax(plat_priv->recovery_ws);
+ return ret;
+}
+
+static int cnss_subsys_shutdown(const struct subsys_desc *subsys_desc,
+ bool force_stop)
+{
+ struct cnss_plat_data *plat_priv;
+
+ if (!subsys_desc->dev) {
+ cnss_pr_err("dev from subsys_desc is NULL\n");
+ return -ENODEV;
+ }
+
+ plat_priv = dev_get_drvdata(subsys_desc->dev);
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (!plat_priv->driver_state) {
+ cnss_pr_dbg("shutdown is ignored\n");
+ return 0;
+ }
+
+ return cnss_bus_dev_shutdown(plat_priv);
+}
+
+void cnss_device_crashed(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ struct cnss_subsys_info *subsys_info;
+
+ if (!plat_priv)
+ return;
+
+ subsys_info = &plat_priv->subsys_info;
+ if (subsys_info->subsys_device) {
+ set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ subsys_set_crash_status(subsys_info->subsys_device, true);
+ subsystem_restart_dev(subsys_info->subsys_device);
+ }
+}
+EXPORT_SYMBOL(cnss_device_crashed);
+
+static void cnss_subsys_crash_shutdown(const struct subsys_desc *subsys_desc)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return;
+ }
+
+ cnss_bus_dev_crash_shutdown(plat_priv);
+}
+
+static int cnss_subsys_ramdump(int enable,
+ const struct subsys_desc *subsys_desc)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (!enable)
+ return 0;
+
+ return cnss_bus_dev_ramdump(plat_priv);
+}
+
+static void cnss_recovery_work_handler(struct work_struct *work)
+{
+}
+#else
+static void cnss_recovery_work_handler(struct work_struct *work)
+{
+ int ret;
+
+ struct cnss_plat_data *plat_priv =
+ container_of(work, struct cnss_plat_data, recovery_work);
+
+ if (!plat_priv->recovery_enabled)
+ panic("subsys-restart: Resetting the SoC wlan crashed\n");
+
+ cnss_bus_dev_shutdown(plat_priv);
+ cnss_bus_dev_ramdump(plat_priv);
+ msleep(RECOVERY_DELAY_MS);
+
+ ret = cnss_bus_dev_powerup(plat_priv);
+ if (ret)
+ __pm_relax(plat_priv->recovery_ws);
+
+ return;
+}
+
+void cnss_device_crashed(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!plat_priv)
+ return;
+
+ set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ schedule_work(&plat_priv->recovery_work);
+}
+EXPORT_SYMBOL(cnss_device_crashed);
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ struct cnss_ramdump_info *ramdump_info;
+
+ if (!plat_priv)
+ return NULL;
+
+ ramdump_info = &plat_priv->ramdump_info;
+ *size = ramdump_info->ramdump_size;
+
+ return ramdump_info->ramdump_va;
+}
+EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
+
+static const char *cnss_recovery_reason_to_str(enum cnss_recovery_reason reason)
+{
+ switch (reason) {
+ case CNSS_REASON_DEFAULT:
+ return "DEFAULT";
+ case CNSS_REASON_LINK_DOWN:
+ return "LINK_DOWN";
+ case CNSS_REASON_RDDM:
+ return "RDDM";
+ case CNSS_REASON_TIMEOUT:
+ return "TIMEOUT";
+ }
+
+ return "UNKNOWN";
+};
+
+static int cnss_do_recovery(struct cnss_plat_data *plat_priv,
+ enum cnss_recovery_reason reason)
+{
+ plat_priv->recovery_count++;
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ goto self_recovery;
+
+ if (test_bit(SKIP_RECOVERY, &plat_priv->ctrl_params.quirks)) {
+ cnss_pr_dbg("Skip device recovery\n");
+ return 0;
+ }
+
+ /* FW recovery sequence has multiple steps and firmware load requires
+ * linux PM in awake state. Thus hold the cnss wake source until
+ * WLAN MISSION enabled. CNSS_TIMEOUT_RECOVERY option should cover all
+ * time taken in this process.
+ */
+ pm_wakeup_ws_event(plat_priv->recovery_ws,
+ cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY),
+ true);
+
+ switch (reason) {
+ case CNSS_REASON_LINK_DOWN:
+ if (!cnss_bus_check_link_status(plat_priv)) {
+ cnss_pr_dbg("Skip link down recovery as link is already up\n");
+ return 0;
+ }
+ if (test_bit(LINK_DOWN_SELF_RECOVERY,
+ &plat_priv->ctrl_params.quirks))
+ goto self_recovery;
+ if (!cnss_bus_recover_link_down(plat_priv)) {
+ /* clear recovery bit here to avoid skipping
+ * the recovery work for RDDM later
+ */
+ clear_bit(CNSS_DRIVER_RECOVERY,
+ &plat_priv->driver_state);
+ return 0;
+ }
+ break;
+ case CNSS_REASON_RDDM:
+ cnss_bus_collect_dump_info(plat_priv, false);
+ break;
+ case CNSS_REASON_DEFAULT:
+ case CNSS_REASON_TIMEOUT:
+ break;
+ default:
+ cnss_pr_err("Unsupported recovery reason: %s(%d)\n",
+ cnss_recovery_reason_to_str(reason), reason);
+ break;
+ }
+ cnss_bus_device_crashed(plat_priv);
+
+ return 0;
+
+self_recovery:
+ cnss_pr_dbg("Going for self recovery\n");
+ cnss_bus_dev_shutdown(plat_priv);
+
+ if (test_bit(LINK_DOWN_SELF_RECOVERY, &plat_priv->ctrl_params.quirks))
+ clear_bit(LINK_DOWN_SELF_RECOVERY,
+ &plat_priv->ctrl_params.quirks);
+
+ cnss_bus_dev_powerup(plat_priv);
+
+ return 0;
+}
+
+static int cnss_driver_recovery_hdlr(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ struct cnss_recovery_data *recovery_data = data;
+ int ret = 0;
+
+ cnss_pr_dbg("Driver recovery is triggered with reason: %s(%d)\n",
+ cnss_recovery_reason_to_str(recovery_data->reason),
+ recovery_data->reason);
+
+ if (!plat_priv->driver_state) {
+ cnss_pr_err("Improper driver state, ignore recovery\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
+ cnss_pr_err("Reboot is in progress, ignore recovery\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ cnss_pr_err("Recovery is already in progress\n");
+ CNSS_ASSERT(0);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
+ cnss_pr_err("Driver unload or idle shutdown is in progress, ignore recovery\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_IDLE_RESTART,
+ &plat_priv->driver_state)) {
+ cnss_pr_err("Driver load or idle restart is in progress, ignore recovery\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ default:
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ set_bit(CNSS_FW_BOOT_RECOVERY,
+ &plat_priv->driver_state);
+ }
+ break;
+ }
+
+ set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ ret = cnss_do_recovery(plat_priv, recovery_data->reason);
+
+out:
+ kfree(data);
+ return ret;
+}
+
+int cnss_self_recovery(struct device *dev,
+ enum cnss_recovery_reason reason)
+{
+ cnss_schedule_recovery(dev, reason);
+ return 0;
+}
+EXPORT_SYMBOL(cnss_self_recovery);
+
+void cnss_schedule_recovery(struct device *dev,
+ enum cnss_recovery_reason reason)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ struct cnss_recovery_data *data;
+ int gfp = GFP_KERNEL;
+
+ if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
+ cnss_bus_update_status(plat_priv, CNSS_FW_DOWN);
+
+ if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Driver unload or idle shutdown is in progress, ignore schedule recovery\n");
+ return;
+ }
+
+ if (in_interrupt() || irqs_disabled())
+ gfp = GFP_ATOMIC;
+
+ data = kzalloc(sizeof(*data), gfp);
+ if (!data)
+ return;
+
+ data->reason = reason;
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_RECOVERY,
+ 0, data);
+}
+EXPORT_SYMBOL(cnss_schedule_recovery);
+
+int cnss_force_fw_assert(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID) {
+ cnss_pr_info("Forced FW assert is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (cnss_bus_is_device_down(plat_priv)) {
+ cnss_pr_info("Device is already in bad state, ignore force assert\n");
+ return 0;
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ cnss_pr_info("Recovery is already in progress, ignore forced FW assert\n");
+ return 0;
+ }
+
+ if (in_interrupt() || irqs_disabled())
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
+ 0, NULL);
+ else
+ cnss_bus_force_fw_assert_hdlr(plat_priv);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_force_fw_assert);
+
+int cnss_force_collect_rddm(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ unsigned int timeout;
+ int ret = 0;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID) {
+ cnss_pr_info("Force collect rddm is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (cnss_bus_is_device_down(plat_priv)) {
+ cnss_pr_info("Device is already in bad state, wait to collect rddm\n");
+ goto wait_rddm;
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ cnss_pr_info("Recovery is already in progress, wait to collect rddm\n");
+ goto wait_rddm;
+ }
+
+ if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
+ cnss_pr_info("Loading/Unloading/idle restart/shutdown is in progress, ignore forced collect rddm\n");
+ return 0;
+ }
+
+ ret = cnss_bus_force_fw_assert_hdlr(plat_priv);
+ if (ret)
+ return ret;
+
+wait_rddm:
+ reinit_completion(&plat_priv->rddm_complete);
+ timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RDDM);
+ ret = wait_for_completion_timeout(&plat_priv->rddm_complete,
+ msecs_to_jiffies(timeout));
+ if (!ret) {
+ cnss_pr_err("Timeout (%ums) waiting for RDDM to complete\n",
+ timeout);
+ ret = -ETIMEDOUT;
+ } else if (ret > 0) {
+ ret = 0;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cnss_force_collect_rddm);
+
+int cnss_qmi_send_get(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
+ return 0;
+
+ return cnss_bus_qmi_send_get(plat_priv);
+}
+EXPORT_SYMBOL(cnss_qmi_send_get);
+
+int cnss_qmi_send_put(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
+ return 0;
+
+ return cnss_bus_qmi_send_put(plat_priv);
+}
+EXPORT_SYMBOL(cnss_qmi_send_put);
+
+int cnss_qmi_send(struct device *dev, int type, void *cmd,
+ int cmd_len, void *cb_ctx,
+ int (*cb)(void *ctx, void *event, int event_len))
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ int ret;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
+ return -EINVAL;
+
+ plat_priv->get_info_cb = cb;
+ plat_priv->get_info_cb_ctx = cb_ctx;
+
+ ret = cnss_wlfw_get_info_send_sync(plat_priv, type, cmd, cmd_len);
+ if (ret) {
+ plat_priv->get_info_cb = NULL;
+ plat_priv->get_info_cb_ctx = NULL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cnss_qmi_send);
+
+static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Calibration complete. Ignore calibration req\n");
+ goto out;
+ } else if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Calibration in progress. Ignore new calibration req\n");
+ goto out;
+ }
+
+ if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state) ||
+ test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ cnss_pr_err("WLAN in mission mode before cold boot calibration\n");
+ CNSS_ASSERT(0);
+ return -EINVAL;
+ }
+
+ set_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
+ reinit_completion(&plat_priv->cal_complete);
+ ret = cnss_bus_dev_powerup(plat_priv);
+ if (ret) {
+ complete(&plat_priv->cal_complete);
+ clear_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
+ /* Set CBC done in driver state to mark attempt and note error
+ * since calibration cannot be retried at boot.
+ */
+ plat_priv->cal_done = CNSS_CAL_FAILURE;
+ set_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state);
+ }
+
+out:
+ return ret;
+}
+
+static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ struct cnss_cal_info *cal_info = data;
+
+ if (!test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) ||
+ test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
+ goto out;
+
+ switch (cal_info->cal_status) {
+ case CNSS_CAL_DONE:
+ cnss_pr_dbg("Calibration completed successfully\n");
+ plat_priv->cal_done = true;
+ break;
+ case CNSS_CAL_TIMEOUT:
+ case CNSS_CAL_FAILURE:
+ cnss_pr_dbg("Calibration failed. Status: %d, force shutdown\n",
+ cal_info->cal_status);
+ break;
+ default:
+ cnss_pr_err("Unknown calibration status: %u\n",
+ cal_info->cal_status);
+ break;
+ }
+
+ cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
+ cnss_bus_free_qdss_mem(plat_priv);
+ cnss_release_antenna_sharing(plat_priv);
+ cnss_bus_dev_shutdown(plat_priv);
+ msleep(COLD_BOOT_CAL_SHUTDOWN_DELAY_MS);
+ complete(&plat_priv->cal_complete);
+ clear_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
+ set_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state);
+
+ if (cal_info->cal_status == CNSS_CAL_DONE) {
+ cnss_cal_mem_upload_to_file(plat_priv);
+ if (cancel_delayed_work_sync(&plat_priv->wlan_reg_driver_work)
+ ) {
+ cnss_pr_dbg("Schedule WLAN driver load\n");
+ schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
+ 0);
+ }
+ }
+out:
+ kfree(data);
+ return 0;
+}
+
+static int cnss_power_up_hdlr(struct cnss_plat_data *plat_priv)
+{
+ int ret;
+
+ ret = cnss_bus_dev_powerup(plat_priv);
+ if (ret)
+ clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
+
+ return ret;
+}
+
+static int cnss_power_down_hdlr(struct cnss_plat_data *plat_priv)
+{
+ cnss_bus_dev_shutdown(plat_priv);
+
+ return 0;
+}
+
+static int cnss_qdss_trace_req_mem_hdlr(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ ret = cnss_bus_alloc_qdss_mem(plat_priv);
+ if (ret < 0)
+ return ret;
+
+ return cnss_wlfw_qdss_trace_mem_info_send_sync(plat_priv);
+}
+
+static void *cnss_get_fw_mem_pa_to_va(struct cnss_fw_mem *fw_mem,
+ u32 mem_seg_len, u64 pa, u32 size)
+{
+ int i = 0;
+ u64 offset = 0;
+ void *va = NULL;
+ u64 local_pa;
+ u32 local_size;
+
+ for (i = 0; i < mem_seg_len; i++) {
+ local_pa = (u64)fw_mem[i].pa;
+ local_size = (u32)fw_mem[i].size;
+ if (pa == local_pa && size <= local_size) {
+ va = fw_mem[i].va;
+ break;
+ }
+ if (pa > local_pa &&
+ pa < local_pa + local_size &&
+ pa + size <= local_pa + local_size) {
+ offset = pa - local_pa;
+ va = fw_mem[i].va + offset;
+ break;
+ }
+ }
+ return va;
+}
+
+static int cnss_fw_mem_file_save_hdlr(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ struct cnss_qmi_event_fw_mem_file_save_data *event_data = data;
+ struct cnss_fw_mem *fw_mem_seg;
+ int ret = 0L;
+ void *va = NULL;
+ u32 i, fw_mem_seg_len;
+
+ switch (event_data->mem_type) {
+ case QMI_WLFW_MEM_TYPE_DDR_V01:
+ if (!plat_priv->fw_mem_seg_len)
+ goto invalid_mem_save;
+
+ fw_mem_seg = plat_priv->fw_mem;
+ fw_mem_seg_len = plat_priv->fw_mem_seg_len;
+ break;
+ case QMI_WLFW_MEM_QDSS_V01:
+ if (!plat_priv->qdss_mem_seg_len)
+ goto invalid_mem_save;
+
+ fw_mem_seg = plat_priv->qdss_mem;
+ fw_mem_seg_len = plat_priv->qdss_mem_seg_len;
+ break;
+ default:
+ goto invalid_mem_save;
+ }
+
+ for (i = 0; i < event_data->mem_seg_len; i++) {
+ va = cnss_get_fw_mem_pa_to_va(fw_mem_seg, fw_mem_seg_len,
+ event_data->mem_seg[i].addr,
+ event_data->mem_seg[i].size);
+ if (!va) {
+ cnss_pr_err("Fail to find matching va of pa %pa for mem type: %d\n",
+ &event_data->mem_seg[i].addr,
+ event_data->mem_type);
+ ret = -EINVAL;
+ break;
+ }
+ ret = cnss_genl_send_msg(va, CNSS_GENL_MSG_TYPE_QDSS,
+ event_data->file_name,
+ event_data->mem_seg[i].size);
+ if (ret < 0) {
+ cnss_pr_err("Fail to save fw mem data: %d\n",
+ ret);
+ break;
+ }
+ }
+ kfree(data);
+ return ret;
+
+invalid_mem_save:
+ cnss_pr_err("FW Mem type %d not allocated. Invalid save request\n",
+ event_data->mem_type);
+ kfree(data);
+ return -EINVAL;
+}
+
+static int cnss_qdss_trace_free_hdlr(struct cnss_plat_data *plat_priv)
+{
+ cnss_bus_free_qdss_mem(plat_priv);
+
+ return 0;
+}
+
+static int cnss_qdss_trace_req_data_hdlr(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ int ret = 0;
+ struct cnss_qmi_event_fw_mem_file_save_data *event_data = data;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ ret = cnss_wlfw_qdss_data_send_sync(plat_priv, event_data->file_name,
+ event_data->total_size);
+
+ kfree(data);
+ return ret;
+}
+
+static void cnss_driver_event_work(struct work_struct *work)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(work, struct cnss_plat_data, event_work);
+ struct cnss_driver_event *event;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return;
+ }
+
+ cnss_pm_stay_awake(plat_priv);
+
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+
+ while (!list_empty(&plat_priv->event_list)) {
+ event = list_first_entry(&plat_priv->event_list,
+ struct cnss_driver_event, list);
+ list_del(&event->list);
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+ cnss_pr_dbg("Processing driver event: %s%s(%d), state: 0x%lx\n",
+ cnss_driver_event_to_str(event->type),
+ event->sync ? "-sync" : "", event->type,
+ plat_priv->driver_state);
+
+ switch (event->type) {
+ case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
+ ret = cnss_wlfw_server_arrive(plat_priv, event->data);
+ break;
+ case CNSS_DRIVER_EVENT_SERVER_EXIT:
+ ret = cnss_wlfw_server_exit(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_REQUEST_MEM:
+ ret = cnss_bus_alloc_fw_mem(plat_priv);
+ if (ret)
+ break;
+ ret = cnss_wlfw_respond_mem_send_sync(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_FW_MEM_READY:
+ ret = cnss_fw_mem_ready_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_FW_READY:
+ ret = cnss_fw_ready_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
+ ret = cnss_cold_boot_cal_start_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
+ ret = cnss_cold_boot_cal_done_hdlr(plat_priv,
+ event->data);
+ break;
+ case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
+ ret = cnss_bus_register_driver_hdlr(plat_priv,
+ event->data);
+ break;
+ case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
+ ret = cnss_bus_unregister_driver_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_RECOVERY:
+ ret = cnss_driver_recovery_hdlr(plat_priv,
+ event->data);
+ break;
+ case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
+ ret = cnss_bus_force_fw_assert_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_IDLE_RESTART:
+ set_bit(CNSS_DRIVER_IDLE_RESTART,
+ &plat_priv->driver_state);
+ /* fall through */
+ case CNSS_DRIVER_EVENT_POWER_UP:
+ ret = cnss_power_up_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
+ set_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
+ &plat_priv->driver_state);
+ /* fall through */
+ case CNSS_DRIVER_EVENT_POWER_DOWN:
+ ret = cnss_power_down_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND:
+ ret = cnss_process_wfc_call_ind_event(plat_priv,
+ event->data);
+ break;
+ case CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND:
+ ret = cnss_process_twt_cfg_ind_event(plat_priv,
+ event->data);
+ break;
+ case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
+ ret = cnss_qdss_trace_req_mem_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE:
+ ret = cnss_fw_mem_file_save_hdlr(plat_priv,
+ event->data);
+ break;
+ case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
+ ret = cnss_qdss_trace_free_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA:
+ ret = cnss_qdss_trace_req_data_hdlr(plat_priv,
+ event->data);
+ break;
+ default:
+ cnss_pr_err("Invalid driver event type: %d",
+ event->type);
+ kfree(event);
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+ continue;
+ }
+
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+ if (event->sync) {
+ event->ret = ret;
+ complete(&event->complete);
+ continue;
+ }
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+ kfree(event);
+
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+ }
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+ cnss_pm_relax(plat_priv);
+}
+
+int cnss_va_to_pa(struct device *dev, size_t size, void *va, dma_addr_t dma,
+ phys_addr_t *pa, unsigned long attrs)
+{
+ struct sg_table sgt;
+ int ret;
+
+ ret = dma_get_sgtable_attrs(dev, &sgt, va, dma, size, attrs);
+ if (ret) {
+ cnss_pr_err("Failed to get sgtable for va: 0x%pK, dma: %pa, size: 0x%zx, attrs: 0x%x\n",
+ va, &dma, size, attrs);
+ return -EINVAL;
+ }
+
+ *pa = page_to_phys(sg_page(sgt.sgl));
+ sg_free_table(&sgt);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
+int cnss_register_subsys(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_subsys_info *subsys_info;
+
+ subsys_info = &plat_priv->subsys_info;
+
+ subsys_info->subsys_desc.name = "wlan";
+ subsys_info->subsys_desc.owner = THIS_MODULE;
+ subsys_info->subsys_desc.powerup = cnss_subsys_powerup;
+ subsys_info->subsys_desc.shutdown = cnss_subsys_shutdown;
+ subsys_info->subsys_desc.ramdump = cnss_subsys_ramdump;
+ subsys_info->subsys_desc.crash_shutdown = cnss_subsys_crash_shutdown;
+ subsys_info->subsys_desc.dev = &plat_priv->plat_dev->dev;
+
+ subsys_info->subsys_device = subsys_register(&subsys_info->subsys_desc);
+ if (IS_ERR(subsys_info->subsys_device)) {
+ ret = PTR_ERR(subsys_info->subsys_device);
+ cnss_pr_err("Failed to register subsys, err = %d\n", ret);
+ goto out;
+ }
+
+ subsys_info->subsys_handle =
+ subsystem_get(subsys_info->subsys_desc.name);
+ if (!subsys_info->subsys_handle) {
+ cnss_pr_err("Failed to get subsys_handle!\n");
+ ret = -EINVAL;
+ goto unregister_subsys;
+ } else if (IS_ERR(subsys_info->subsys_handle)) {
+ ret = PTR_ERR(subsys_info->subsys_handle);
+ cnss_pr_err("Failed to do subsystem_get, err = %d\n", ret);
+ goto unregister_subsys;
+ }
+
+ return 0;
+
+unregister_subsys:
+ subsys_unregister(subsys_info->subsys_device);
+out:
+ return ret;
+}
+
+void cnss_unregister_subsys(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_subsys_info *subsys_info;
+
+ subsys_info = &plat_priv->subsys_info;
+ subsystem_put(subsys_info->subsys_handle);
+ subsys_unregister(subsys_info->subsys_device);
+}
+
+static void *cnss_create_ramdump_device(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
+
+ return create_ramdump_device(subsys_info->subsys_desc.name,
+ subsys_info->subsys_desc.dev);
+}
+
+static void cnss_destroy_ramdump_device(struct cnss_plat_data *plat_priv,
+ void *ramdump_dev)
+{
+ destroy_ramdump_device(ramdump_dev);
+}
+
+int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_ramdump_info *ramdump_info = &plat_priv->ramdump_info;
+ struct ramdump_segment segment;
+
+ memset(&segment, 0, sizeof(segment));
+ segment.v_address = (void __iomem *)ramdump_info->ramdump_va;
+ segment.size = ramdump_info->ramdump_size;
+
+ return qcom_ramdump(ramdump_info->ramdump_dev, &segment, 1);
+}
+
+int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
+ struct cnss_dump_data *dump_data = &info_v2->dump_data;
+ struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
+ struct ramdump_segment *ramdump_segs, *s;
+ struct cnss_dump_meta_info meta_info = {0};
+ int i, ret = 0;
+
+ ramdump_segs = kcalloc(dump_data->nentries + 1,
+ sizeof(*ramdump_segs),
+ GFP_KERNEL);
+ if (!ramdump_segs)
+ return -ENOMEM;
+
+ s = ramdump_segs + 1;
+ for (i = 0; i < dump_data->nentries; i++) {
+ if (dump_seg->type >= CNSS_FW_DUMP_TYPE_MAX) {
+ cnss_pr_err("Unsupported dump type: %d",
+ dump_seg->type);
+ continue;
+ }
+
+ if (meta_info.entry[dump_seg->type].entry_start == 0) {
+ meta_info.entry[dump_seg->type].type = dump_seg->type;
+ meta_info.entry[dump_seg->type].entry_start = i + 1;
+ }
+ meta_info.entry[dump_seg->type].entry_num++;
+
+ s->address = dump_seg->address;
+ s->v_address = (void __iomem *)dump_seg->v_address;
+ s->size = dump_seg->size;
+ s++;
+ dump_seg++;
+ }
+
+ meta_info.magic = CNSS_RAMDUMP_MAGIC;
+ meta_info.version = CNSS_RAMDUMP_VERSION;
+ meta_info.chipset = plat_priv->device_id;
+ meta_info.total_entries = CNSS_FW_DUMP_TYPE_MAX;
+
+ ramdump_segs->v_address = (void __iomem *)(&meta_info);
+ ramdump_segs->size = sizeof(meta_info);
+
+ ret = qcom_elf_ramdump(info_v2->ramdump_dev, ramdump_segs,
+ dump_data->nentries + 1);
+ kfree(ramdump_segs);
+
+ return ret;
+}
+#else
+static int cnss_panic_handler(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(nb, struct cnss_plat_data, panic_nb);
+
+ cnss_bus_dev_crash_shutdown(plat_priv);
+
+ return NOTIFY_DONE;
+}
+
+int cnss_register_subsys(struct cnss_plat_data *plat_priv)
+{
+ int ret;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ plat_priv->panic_nb.notifier_call = cnss_panic_handler;
+ ret = atomic_notifier_chain_register(&panic_notifier_list,
+ &plat_priv->panic_nb);
+ if (ret) {
+ cnss_pr_err("Failed to register panic handler\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void cnss_unregister_subsys(struct cnss_plat_data *plat_priv)
+{
+ int ret;
+
+ ret = atomic_notifier_chain_unregister(&panic_notifier_list,
+ &plat_priv->panic_nb);
+ if (ret)
+ cnss_pr_err("Failed to unregister panic handler\n");
+}
+
+#if IS_ENABLED(CONFIG_QCOM_MEMORY_DUMP_V2)
+static void *cnss_create_ramdump_device(struct cnss_plat_data *plat_priv)
+{
+ return &plat_priv->plat_dev->dev;
+}
+
+static void cnss_destroy_ramdump_device(struct cnss_plat_data *plat_priv,
+ void *ramdump_dev)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_QCOM_RAMDUMP)
+int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_ramdump_info *ramdump_info = &plat_priv->ramdump_info;
+ struct qcom_dump_segment segment;
+ struct list_head head;
+
+ INIT_LIST_HEAD(&head);
+ memset(&segment, 0, sizeof(segment));
+ segment.va = ramdump_info->ramdump_va;
+ segment.size = ramdump_info->ramdump_size;
+ list_add(&segment.node, &head);
+
+ return qcom_dump(&head, ramdump_info->ramdump_dev);
+}
+
+int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
+ struct cnss_dump_data *dump_data = &info_v2->dump_data;
+ struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
+ struct qcom_dump_segment *seg;
+ struct cnss_dump_meta_info meta_info = {0};
+ struct list_head head;
+ int i, ret = 0;
+
+ INIT_LIST_HEAD(&head);
+ for (i = 0; i < dump_data->nentries; i++) {
+ if (dump_seg->type >= CNSS_FW_DUMP_TYPE_MAX) {
+ cnss_pr_err("Unsupported dump type: %d",
+ dump_seg->type);
+ continue;
+ }
+
+ seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
+ if (!seg)
+ continue;
+
+ if (meta_info.entry[dump_seg->type].entry_start == 0) {
+ meta_info.entry[dump_seg->type].type = dump_seg->type;
+ meta_info.entry[dump_seg->type].entry_start = i + 1;
+ }
+ meta_info.entry[dump_seg->type].entry_num++;
+ seg->da = dump_seg->address;
+ seg->va = dump_seg->v_address;
+ seg->size = dump_seg->size;
+ list_add_tail(&seg->node, &head);
+ dump_seg++;
+ }
+
+ seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
+ if (!seg)
+ goto do_elf_dump;
+
+ meta_info.magic = CNSS_RAMDUMP_MAGIC;
+ meta_info.version = CNSS_RAMDUMP_VERSION;
+ meta_info.chipset = plat_priv->device_id;
+ meta_info.total_entries = CNSS_FW_DUMP_TYPE_MAX;
+ seg->va = &meta_info;
+ seg->size = sizeof(meta_info);
+ list_add(&seg->node, &head);
+
+do_elf_dump:
+ ret = qcom_elf_dump(&head, info_v2->ramdump_dev);
+
+ while (!list_empty(&head)) {
+ seg = list_first_entry(&head, struct qcom_dump_segment, node);
+ list_del(&seg->node);
+ kfree(seg);
+ }
+
+ return ret;
+}
+#else
+int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+#endif /* CONFIG_QCOM_RAMDUMP */
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+#if IS_ENABLED(CONFIG_QCOM_MEMORY_DUMP_V2)
+static int cnss_init_dump_entry(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_ramdump_info *ramdump_info;
+ struct msm_dump_entry dump_entry;
+
+ ramdump_info = &plat_priv->ramdump_info;
+ ramdump_info->dump_data.addr = ramdump_info->ramdump_pa;
+ ramdump_info->dump_data.len = ramdump_info->ramdump_size;
+ ramdump_info->dump_data.version = CNSS_DUMP_FORMAT_VER;
+ ramdump_info->dump_data.magic = CNSS_DUMP_MAGIC_VER_V2;
+ strlcpy(ramdump_info->dump_data.name, CNSS_DUMP_NAME,
+ sizeof(ramdump_info->dump_data.name));
+ dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+ dump_entry.addr = virt_to_phys(&ramdump_info->dump_data);
+
+ return msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS,
+ &dump_entry);
+}
+
+static int cnss_register_ramdump_v1(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct device *dev;
+ struct cnss_ramdump_info *ramdump_info;
+ u32 ramdump_size = 0;
+
+ dev = &plat_priv->plat_dev->dev;
+ ramdump_info = &plat_priv->ramdump_info;
+
+ if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic",
+ &ramdump_size) == 0) {
+ ramdump_info->ramdump_va =
+ dma_alloc_coherent(dev, ramdump_size,
+ &ramdump_info->ramdump_pa,
+ GFP_KERNEL);
+
+ if (ramdump_info->ramdump_va)
+ ramdump_info->ramdump_size = ramdump_size;
+ }
+
+ cnss_pr_dbg("ramdump va: %pK, pa: %pa\n",
+ ramdump_info->ramdump_va, &ramdump_info->ramdump_pa);
+
+ if (ramdump_info->ramdump_size == 0) {
+ cnss_pr_info("Ramdump will not be collected");
+ goto out;
+ }
+
+ ret = cnss_init_dump_entry(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to setup dump table, err = %d\n", ret);
+ goto free_ramdump;
+ }
+
+ ramdump_info->ramdump_dev = cnss_create_ramdump_device(plat_priv);
+ if (!ramdump_info->ramdump_dev) {
+ cnss_pr_err("Failed to create ramdump device!");
+ ret = -ENOMEM;
+ goto free_ramdump;
+ }
+
+ return 0;
+free_ramdump:
+ dma_free_coherent(dev, ramdump_info->ramdump_size,
+ ramdump_info->ramdump_va, ramdump_info->ramdump_pa);
+out:
+ return ret;
+}
+
+static void cnss_unregister_ramdump_v1(struct cnss_plat_data *plat_priv)
+{
+ struct device *dev;
+ struct cnss_ramdump_info *ramdump_info;
+
+ dev = &plat_priv->plat_dev->dev;
+ ramdump_info = &plat_priv->ramdump_info;
+
+ if (ramdump_info->ramdump_dev)
+ cnss_destroy_ramdump_device(plat_priv,
+ ramdump_info->ramdump_dev);
+
+ if (ramdump_info->ramdump_va)
+ dma_free_coherent(dev, ramdump_info->ramdump_size,
+ ramdump_info->ramdump_va,
+ ramdump_info->ramdump_pa);
+}
+
+/**
+ * cnss_ignore_dump_data_reg_fail - Ignore Ramdump table register failure
+ * @ret: Error returned by msm_dump_data_register_nominidump
+ *
+ * For Lahaina GKI boot, we dont have support for mem dump feature. So
+ * ignore failure.
+ *
+ * Return: Same given error code if mem dump feature enabled, 0 otherwise
+ */
+static int cnss_ignore_dump_data_reg_fail(int ret)
+{
+ return ret;
+}
+
+static int cnss_register_ramdump_v2(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_ramdump_info_v2 *info_v2;
+ struct cnss_dump_data *dump_data;
+ struct msm_dump_entry dump_entry;
+ struct device *dev = &plat_priv->plat_dev->dev;
+ u32 ramdump_size = 0;
+
+ info_v2 = &plat_priv->ramdump_info_v2;
+ dump_data = &info_v2->dump_data;
+
+ if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic",
+ &ramdump_size) == 0)
+ info_v2->ramdump_size = ramdump_size;
+
+ cnss_pr_dbg("Ramdump size 0x%lx\n", info_v2->ramdump_size);
+
+ info_v2->dump_data_vaddr = kzalloc(CNSS_DUMP_DESC_SIZE, GFP_KERNEL);
+ if (!info_v2->dump_data_vaddr)
+ return -ENOMEM;
+
+ dump_data->paddr = virt_to_phys(info_v2->dump_data_vaddr);
+ dump_data->version = CNSS_DUMP_FORMAT_VER_V2;
+ dump_data->magic = CNSS_DUMP_MAGIC_VER_V2;
+ dump_data->seg_version = CNSS_DUMP_SEG_VER;
+ strlcpy(dump_data->name, CNSS_DUMP_NAME,
+ sizeof(dump_data->name));
+ dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+ dump_entry.addr = virt_to_phys(dump_data);
+
+ ret = msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS,
+ &dump_entry);
+ if (ret) {
+ ret = cnss_ignore_dump_data_reg_fail(ret);
+ cnss_pr_err("Failed to setup dump table, %s (%d)\n",
+ ret ? "Error" : "Ignoring", ret);
+ goto free_ramdump;
+ }
+
+ info_v2->ramdump_dev = cnss_create_ramdump_device(plat_priv);
+ if (!info_v2->ramdump_dev) {
+ cnss_pr_err("Failed to create ramdump device!\n");
+ ret = -ENOMEM;
+ goto free_ramdump;
+ }
+
+ return 0;
+
+free_ramdump:
+ kfree(info_v2->dump_data_vaddr);
+ info_v2->dump_data_vaddr = NULL;
+ return ret;
+}
+
+static void cnss_unregister_ramdump_v2(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_ramdump_info_v2 *info_v2;
+
+ info_v2 = &plat_priv->ramdump_info_v2;
+
+ if (info_v2->ramdump_dev)
+ cnss_destroy_ramdump_device(plat_priv, info_v2->ramdump_dev);
+
+ kfree(info_v2->dump_data_vaddr);
+ info_v2->dump_data_vaddr = NULL;
+ info_v2->dump_data_valid = false;
+}
+
+int cnss_register_ramdump(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ ret = cnss_register_ramdump_v1(plat_priv);
+ break;
+ case QCA6290_DEVICE_ID:
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ ret = cnss_register_ramdump_v2(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
+ ret = -ENODEV;
+ break;
+ }
+ return ret;
+}
+
+void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv)
+{
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ cnss_unregister_ramdump_v1(plat_priv);
+ break;
+ case QCA6290_DEVICE_ID:
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ cnss_unregister_ramdump_v2(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
+ break;
+ }
+}
+#else
+int cnss_register_ramdump(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv) {}
+#endif /* CONFIG_QCOM_MEMORY_DUMP_V2 */
+
+#if IS_ENABLED(CONFIG_QCOM_MINIDUMP)
+int cnss_minidump_add_region(struct cnss_plat_data *plat_priv,
+ enum cnss_fw_dump_type type, int seg_no,
+ void *va, phys_addr_t pa, size_t size)
+{
+ struct md_region md_entry;
+ int ret;
+
+ switch (type) {
+ case CNSS_FW_IMAGE:
+ snprintf(md_entry.name, sizeof(md_entry.name), "FBC_%X",
+ seg_no);
+ break;
+ case CNSS_FW_RDDM:
+ snprintf(md_entry.name, sizeof(md_entry.name), "RDDM_%X",
+ seg_no);
+ break;
+ case CNSS_FW_REMOTE_HEAP:
+ snprintf(md_entry.name, sizeof(md_entry.name), "RHEAP_%X",
+ seg_no);
+ break;
+ default:
+ cnss_pr_err("Unknown dump type ID: %d\n", type);
+ return -EINVAL;
+ }
+
+ md_entry.phys_addr = pa;
+ md_entry.virt_addr = (uintptr_t)va;
+ md_entry.size = size;
+ md_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+
+ cnss_pr_dbg("Mini dump region: %s, va: %pK, pa: %pa, size: 0x%zx\n",
+ md_entry.name, va, &pa, size);
+
+ ret = msm_minidump_add_region(&md_entry);
+ if (ret < 0)
+ cnss_pr_err("Failed to add mini dump region, err = %d\n", ret);
+
+ return ret;
+}
+
+int cnss_minidump_remove_region(struct cnss_plat_data *plat_priv,
+ enum cnss_fw_dump_type type, int seg_no,
+ void *va, phys_addr_t pa, size_t size)
+{
+ struct md_region md_entry;
+ int ret;
+
+ switch (type) {
+ case CNSS_FW_IMAGE:
+ snprintf(md_entry.name, sizeof(md_entry.name), "FBC_%X",
+ seg_no);
+ break;
+ case CNSS_FW_RDDM:
+ snprintf(md_entry.name, sizeof(md_entry.name), "RDDM_%X",
+ seg_no);
+ break;
+ case CNSS_FW_REMOTE_HEAP:
+ snprintf(md_entry.name, sizeof(md_entry.name), "RHEAP_%X",
+ seg_no);
+ break;
+ default:
+ cnss_pr_err("Unknown dump type ID: %d\n", type);
+ return -EINVAL;
+ }
+
+ md_entry.phys_addr = pa;
+ md_entry.virt_addr = (uintptr_t)va;
+ md_entry.size = size;
+ md_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+
+ cnss_pr_dbg("Remove mini dump region: %s, va: %pK, pa: %pa, size: 0x%zx\n",
+ md_entry.name, va, &pa, size);
+
+ ret = msm_minidump_remove_region(&md_entry);
+ if (ret)
+ cnss_pr_err("Failed to remove mini dump region, err = %d\n",
+ ret);
+
+ return ret;
+}
+#else
+int cnss_minidump_add_region(struct cnss_plat_data *plat_priv,
+ enum cnss_fw_dump_type type, int seg_no,
+ void *va, phys_addr_t pa, size_t size)
+{
+ return 0;
+}
+
+int cnss_minidump_remove_region(struct cnss_plat_data *plat_priv,
+ enum cnss_fw_dump_type type, int seg_no,
+ void *va, phys_addr_t pa, size_t size)
+{
+ return 0;
+}
+#endif /* CONFIG_QCOM_MINIDUMP */
+
+int cnss_request_firmware_direct(struct cnss_plat_data *plat_priv,
+ const struct firmware **fw_entry,
+ const char *filename)
+{
+ if (IS_ENABLED(CONFIG_CNSS_REQ_FW_DIRECT))
+ return request_firmware_direct(fw_entry, filename,
+ &plat_priv->plat_dev->dev);
+ else
+ return firmware_request_nowarn(fw_entry, filename,
+ &plat_priv->plat_dev->dev);
+}
+
+#if IS_ENABLED(CONFIG_INTERCONNECT_QCOM)
+/**
+ * cnss_register_bus_scale() - Setup interconnect voting data
+ * @plat_priv: Platform data structure
+ *
+ * For different interconnect path configured in device tree setup voting data
+ * for list of bandwidth requirements.
+ *
+ * Result: 0 for success. -EINVAL if not configured
+ */
+static int cnss_register_bus_scale(struct cnss_plat_data *plat_priv)
+{
+ int ret = -EINVAL;
+ u32 idx, i, j, cfg_arr_size, *cfg_arr = NULL;
+ struct cnss_bus_bw_info *bus_bw_info, *tmp;
+ struct device *dev = &plat_priv->plat_dev->dev;
+
+ INIT_LIST_HEAD(&plat_priv->icc.list_head);
+ ret = of_property_read_u32(dev->of_node,
+ "qcom,icc-path-count",
+ &plat_priv->icc.path_count);
+ if (ret) {
+ cnss_pr_err("Platform Bus Interconnect path not configured\n");
+ return -EINVAL;
+ }
+ ret = of_property_read_u32(plat_priv->plat_dev->dev.of_node,
+ "qcom,bus-bw-cfg-count",
+ &plat_priv->icc.bus_bw_cfg_count);
+ if (ret) {
+ cnss_pr_err("Failed to get Bus BW Config table size\n");
+ goto cleanup;
+ }
+ cfg_arr_size = plat_priv->icc.path_count *
+ plat_priv->icc.bus_bw_cfg_count * CNSS_ICC_VOTE_MAX;
+ cfg_arr = kcalloc(cfg_arr_size, sizeof(*cfg_arr), GFP_KERNEL);
+ if (!cfg_arr) {
+ cnss_pr_err("Failed to alloc cfg table mem\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = of_property_read_u32_array(plat_priv->plat_dev->dev.of_node,
+ "qcom,bus-bw-cfg", cfg_arr,
+ cfg_arr_size);
+ if (ret) {
+ cnss_pr_err("Invalid Bus BW Config Table\n");
+ goto cleanup;
+ }
+
+ cnss_pr_dbg("ICC Path_Count: %d BW_CFG_Count: %d\n",
+ plat_priv->icc.path_count, plat_priv->icc.bus_bw_cfg_count);
+
+ for (idx = 0; idx < plat_priv->icc.path_count; idx++) {
+ bus_bw_info = devm_kzalloc(dev, sizeof(*bus_bw_info),
+ GFP_KERNEL);
+ if (!bus_bw_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = of_property_read_string_index(dev->of_node,
+ "interconnect-names", idx,
+ &bus_bw_info->icc_name);
+ if (ret)
+ goto out;
+
+ bus_bw_info->icc_path =
+ of_icc_get(&plat_priv->plat_dev->dev,
+ bus_bw_info->icc_name);
+
+ if (IS_ERR(bus_bw_info->icc_path)) {
+ ret = PTR_ERR(bus_bw_info->icc_path);
+ if (ret != -EPROBE_DEFER) {
+ cnss_pr_err("Failed to get Interconnect path for %s. Err: %d\n",
+ bus_bw_info->icc_name, ret);
+ goto out;
+ }
+ }
+
+ bus_bw_info->cfg_table =
+ devm_kcalloc(dev, plat_priv->icc.bus_bw_cfg_count,
+ sizeof(*bus_bw_info->cfg_table),
+ GFP_KERNEL);
+ if (!bus_bw_info->cfg_table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ cnss_pr_dbg("ICC Vote CFG for path: %s\n",
+ bus_bw_info->icc_name);
+ for (i = 0, j = (idx * plat_priv->icc.bus_bw_cfg_count *
+ CNSS_ICC_VOTE_MAX);
+ i < plat_priv->icc.bus_bw_cfg_count;
+ i++, j += 2) {
+ bus_bw_info->cfg_table[i].avg_bw = cfg_arr[j];
+ bus_bw_info->cfg_table[i].peak_bw = cfg_arr[j + 1];
+ cnss_pr_dbg("ICC Vote BW: %d avg: %d peak: %d\n",
+ i, bus_bw_info->cfg_table[i].avg_bw,
+ bus_bw_info->cfg_table[i].peak_bw);
+ }
+ list_add_tail(&bus_bw_info->list,
+ &plat_priv->icc.list_head);
+ }
+ kfree(cfg_arr);
+ return 0;
+out:
+ list_for_each_entry_safe(bus_bw_info, tmp,
+ &plat_priv->icc.list_head, list) {
+ list_del(&bus_bw_info->list);
+ }
+cleanup:
+ kfree(cfg_arr);
+ memset(&plat_priv->icc, 0, sizeof(plat_priv->icc));
+ return ret;
+}
+
+static void cnss_unregister_bus_scale(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_bus_bw_info *bus_bw_info, *tmp;
+
+ list_for_each_entry_safe(bus_bw_info, tmp,
+ &plat_priv->icc.list_head, list) {
+ list_del(&bus_bw_info->list);
+ if (bus_bw_info->icc_path)
+ icc_put(bus_bw_info->icc_path);
+ }
+ memset(&plat_priv->icc, 0, sizeof(plat_priv->icc));
+}
+#else
+static int cnss_register_bus_scale(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static void cnss_unregister_bus_scale(struct cnss_plat_data *plat_priv) {}
+#endif /* CONFIG_INTERCONNECT_QCOM */
+
+void cnss_daemon_connection_update_cb(void *cb_ctx, bool status)
+{
+ struct cnss_plat_data *plat_priv = cb_ctx;
+
+ if (!plat_priv) {
+ cnss_pr_err("%s: Invalid context\n", __func__);
+ return;
+ }
+ if (status) {
+ cnss_pr_info("CNSS Daemon connected\n");
+ set_bit(CNSS_DAEMON_CONNECTED, &plat_priv->driver_state);
+ complete(&plat_priv->daemon_connected);
+ } else {
+ cnss_pr_info("CNSS Daemon disconnected\n");
+ reinit_completion(&plat_priv->daemon_connected);
+ clear_bit(CNSS_DAEMON_CONNECTED, &plat_priv->driver_state);
+ }
+}
+
+static ssize_t enable_hds_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+ unsigned int enable_hds = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (sscanf(buf, "%du", &enable_hds) != 1) {
+ cnss_pr_err("Invalid enable_hds sysfs command\n");
+ return -EINVAL;
+ }
+
+ if (enable_hds)
+ plat_priv->hds_enabled = true;
+ else
+ plat_priv->hds_enabled = false;
+
+ cnss_pr_dbg("%s HDS file download, count is %zu\n",
+ plat_priv->hds_enabled ? "Enable" : "Disable", count);
+
+ return count;
+}
+
+static ssize_t recovery_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+ unsigned int recovery = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (sscanf(buf, "%du", &recovery) != 1) {
+ cnss_pr_err("Invalid recovery sysfs command\n");
+ return -EINVAL;
+ }
+
+ if (recovery)
+ plat_priv->recovery_enabled = true;
+ else
+ plat_priv->recovery_enabled = false;
+
+ cnss_pr_dbg("%s WLAN recovery, count is %zu\n",
+ plat_priv->recovery_enabled ? "Enable" : "Disable", count);
+
+ return count;
+}
+
+static ssize_t shutdown_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+
+ if (plat_priv) {
+ set_bit(CNSS_IN_REBOOT, &plat_priv->driver_state);
+ del_timer(&plat_priv->fw_boot_timer);
+ complete_all(&plat_priv->power_up_complete);
+ complete_all(&plat_priv->cal_complete);
+ }
+
+ cnss_pr_dbg("Received shutdown notification\n");
+
+ return count;
+}
+
+static ssize_t fs_ready_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int fs_ready = 0;
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+
+ if (sscanf(buf, "%du", &fs_ready) != 1)
+ return -EINVAL;
+
+ cnss_pr_dbg("File system is ready, fs_ready is %d, count is %zu\n",
+ fs_ready, count);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return count;
+ }
+
+ if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) {
+ cnss_pr_dbg("QMI is bypassed\n");
+ return count;
+ }
+
+ switch (plat_priv->device_id) {
+ case QCA6290_DEVICE_ID:
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ break;
+ default:
+ cnss_pr_err("Not supported for device ID 0x%lx\n",
+ plat_priv->device_id);
+ return count;
+ }
+
+ if (fs_ready == FILE_SYSTEM_READY && plat_priv->cbc_enabled) {
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
+ 0, NULL);
+ }
+
+ return count;
+}
+
+static ssize_t qdss_trace_start_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+
+ wlfw_qdss_trace_start(plat_priv);
+ cnss_pr_dbg("Received QDSS start command\n");
+ return count;
+}
+
+static ssize_t qdss_trace_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+ u32 option = 0;
+
+ if (sscanf(buf, "%du", &option) != 1)
+ return -EINVAL;
+
+ wlfw_qdss_trace_stop(plat_priv, option);
+ cnss_pr_dbg("Received QDSS stop command\n");
+ return count;
+}
+
+static ssize_t qdss_conf_download_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+
+ cnss_wlfw_qdss_dnld_send_sync(plat_priv);
+ cnss_pr_dbg("Received QDSS download config command\n");
+ return count;
+}
+
+static ssize_t hw_trace_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+ int tmp = 0;
+
+ if (sscanf(buf, "%du", &tmp) != 1)
+ return -EINVAL;
+
+ plat_priv->hw_trc_override = tmp;
+ cnss_pr_dbg("Received QDSS hw_trc_override indication\n");
+ return count;
+}
+
+static DEVICE_ATTR_WO(fs_ready);
+static DEVICE_ATTR_WO(shutdown);
+static DEVICE_ATTR_WO(recovery);
+static DEVICE_ATTR_WO(enable_hds);
+static DEVICE_ATTR_WO(qdss_trace_start);
+static DEVICE_ATTR_WO(qdss_trace_stop);
+static DEVICE_ATTR_WO(qdss_conf_download);
+static DEVICE_ATTR_WO(hw_trace_override);
+
+static struct attribute *cnss_attrs[] = {
+ &dev_attr_fs_ready.attr,
+ &dev_attr_shutdown.attr,
+ &dev_attr_recovery.attr,
+ &dev_attr_enable_hds.attr,
+ &dev_attr_qdss_trace_start.attr,
+ &dev_attr_qdss_trace_stop.attr,
+ &dev_attr_qdss_conf_download.attr,
+ &dev_attr_hw_trace_override.attr,
+ NULL,
+};
+
+static struct attribute_group cnss_attr_group = {
+ .attrs = cnss_attrs,
+};
+
+static int cnss_create_sysfs_link(struct cnss_plat_data *plat_priv)
+{
+ struct device *dev = &plat_priv->plat_dev->dev;
+ int ret;
+
+ ret = sysfs_create_link(kernel_kobj, &dev->kobj, "cnss");
+ if (ret) {
+ cnss_pr_err("Failed to create cnss link, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ /* This is only for backward compatibility. */
+ ret = sysfs_create_link(kernel_kobj, &dev->kobj, "shutdown_wlan");
+ if (ret) {
+ cnss_pr_err("Failed to create shutdown_wlan link, err = %d\n",
+ ret);
+ goto rm_cnss_link;
+ }
+
+ return 0;
+
+rm_cnss_link:
+ sysfs_remove_link(kernel_kobj, "cnss");
+out:
+ return ret;
+}
+
+static void cnss_remove_sysfs_link(struct cnss_plat_data *plat_priv)
+{
+ sysfs_remove_link(kernel_kobj, "shutdown_wlan");
+ sysfs_remove_link(kernel_kobj, "cnss");
+}
+
+static int cnss_create_sysfs(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ ret = devm_device_add_group(&plat_priv->plat_dev->dev,
+ &cnss_attr_group);
+ if (ret) {
+ cnss_pr_err("Failed to create cnss device group, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ cnss_create_sysfs_link(plat_priv);
+
+ return 0;
+out:
+ return ret;
+}
+
+static void cnss_remove_sysfs(struct cnss_plat_data *plat_priv)
+{
+ cnss_remove_sysfs_link(plat_priv);
+ devm_device_remove_group(&plat_priv->plat_dev->dev, &cnss_attr_group);
+}
+
+static int cnss_event_work_init(struct cnss_plat_data *plat_priv)
+{
+ spin_lock_init(&plat_priv->event_lock);
+ plat_priv->event_wq = alloc_workqueue("cnss_driver_event",
+ WQ_UNBOUND, 1);
+ if (!plat_priv->event_wq) {
+ cnss_pr_err("Failed to create event workqueue!\n");
+ return -EFAULT;
+ }
+
+ INIT_WORK(&plat_priv->event_work, cnss_driver_event_work);
+ INIT_LIST_HEAD(&plat_priv->event_list);
+
+ return 0;
+}
+
+static void cnss_event_work_deinit(struct cnss_plat_data *plat_priv)
+{
+ destroy_workqueue(plat_priv->event_wq);
+}
+
+static int cnss_reboot_notifier(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(nb, struct cnss_plat_data, reboot_nb);
+
+ set_bit(CNSS_IN_REBOOT, &plat_priv->driver_state);
+ del_timer(&plat_priv->fw_boot_timer);
+ complete_all(&plat_priv->power_up_complete);
+ complete_all(&plat_priv->cal_complete);
+ cnss_pr_dbg("Reboot is in progress with action %d\n", action);
+
+ return NOTIFY_DONE;
+}
+
+static int cnss_misc_init(struct cnss_plat_data *plat_priv)
+{
+ int ret;
+
+ timer_setup(&plat_priv->fw_boot_timer,
+ cnss_bus_fw_boot_timeout_hdlr, 0);
+
+ ret = register_pm_notifier(&cnss_pm_notifier);
+ if (ret)
+ cnss_pr_err("Failed to register PM notifier, err = %d\n", ret);
+
+ plat_priv->reboot_nb.notifier_call = cnss_reboot_notifier;
+ ret = register_reboot_notifier(&plat_priv->reboot_nb);
+ if (ret)
+ cnss_pr_err("Failed to register reboot notifier, err = %d\n",
+ ret);
+
+ ret = device_init_wakeup(&plat_priv->plat_dev->dev, true);
+ if (ret)
+ cnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
+ ret);
+
+ INIT_WORK(&plat_priv->recovery_work, cnss_recovery_work_handler);
+ init_completion(&plat_priv->power_up_complete);
+ init_completion(&plat_priv->cal_complete);
+ init_completion(&plat_priv->rddm_complete);
+ init_completion(&plat_priv->recovery_complete);
+ init_completion(&plat_priv->daemon_connected);
+ mutex_init(&plat_priv->dev_lock);
+ mutex_init(&plat_priv->driver_ops_lock);
+ plat_priv->recovery_ws =
+ wakeup_source_register(&plat_priv->plat_dev->dev,
+ "CNSS_FW_RECOVERY");
+ if (!plat_priv->recovery_ws)
+ cnss_pr_err("Failed to setup FW recovery wake source\n");
+
+ ret = cnss_plat_ipc_register(cnss_daemon_connection_update_cb,
+ plat_priv);
+ if (ret)
+ cnss_pr_err("QMI IPC connection call back register failed, err = %d\n",
+ ret);
+
+ return 0;
+}
+
+static void cnss_misc_deinit(struct cnss_plat_data *plat_priv)
+{
+ complete_all(&plat_priv->recovery_complete);
+ complete_all(&plat_priv->rddm_complete);
+ complete_all(&plat_priv->cal_complete);
+ complete_all(&plat_priv->power_up_complete);
+ complete_all(&plat_priv->daemon_connected);
+ device_init_wakeup(&plat_priv->plat_dev->dev, false);
+ unregister_reboot_notifier(&plat_priv->reboot_nb);
+ unregister_pm_notifier(&cnss_pm_notifier);
+ del_timer(&plat_priv->fw_boot_timer);
+ wakeup_source_unregister(plat_priv->recovery_ws);
+ cnss_plat_ipc_unregister(plat_priv);
+}
+
+static void cnss_init_control_params(struct cnss_plat_data *plat_priv)
+{
+ plat_priv->ctrl_params.quirks = CNSS_QUIRKS_DEFAULT;
+
+ plat_priv->cbc_enabled = !IS_ENABLED(CONFIG_CNSS_EMULATION) &&
+ of_property_read_bool(plat_priv->plat_dev->dev.of_node,
+ "qcom,wlan-cbc-enabled");
+
+ plat_priv->ctrl_params.mhi_timeout = CNSS_MHI_TIMEOUT_DEFAULT;
+ plat_priv->ctrl_params.mhi_m2_timeout = CNSS_MHI_M2_TIMEOUT_DEFAULT;
+ plat_priv->ctrl_params.qmi_timeout = CNSS_QMI_TIMEOUT_DEFAULT;
+ plat_priv->ctrl_params.bdf_type = CNSS_BDF_TYPE_DEFAULT;
+ plat_priv->ctrl_params.time_sync_period = CNSS_TIME_SYNC_PERIOD_DEFAULT;
+ /* Set adsp_pc_enabled default value to true as ADSP pc is always
+ * enabled by default
+ */
+ plat_priv->adsp_pc_enabled = true;
+}
+
+static void cnss_get_pm_domain_info(struct cnss_plat_data *plat_priv)
+{
+ struct device *dev = &plat_priv->plat_dev->dev;
+
+ plat_priv->use_pm_domain =
+ of_property_read_bool(dev->of_node, "use-pm-domain");
+
+ cnss_pr_dbg("use-pm-domain is %d\n", plat_priv->use_pm_domain);
+}
+
+static void cnss_get_wlaon_pwr_ctrl_info(struct cnss_plat_data *plat_priv)
+{
+ struct device *dev = &plat_priv->plat_dev->dev;
+
+ plat_priv->set_wlaon_pwr_ctrl =
+ of_property_read_bool(dev->of_node, "qcom,set-wlaon-pwr-ctrl");
+
+ cnss_pr_dbg("set_wlaon_pwr_ctrl is %d\n",
+ plat_priv->set_wlaon_pwr_ctrl);
+}
+
+static bool cnss_use_fw_path_with_prefix(struct cnss_plat_data *plat_priv)
+{
+ return (of_property_read_bool(plat_priv->plat_dev->dev.of_node,
+ "qcom,converged-dt") ||
+ of_property_read_bool(plat_priv->plat_dev->dev.of_node,
+ "qcom,same-dt-multi-dev"));
+}
+
+static const struct platform_device_id cnss_platform_id_table[] = {
+ { .name = "qca6174", .driver_data = QCA6174_DEVICE_ID, },
+ { .name = "qca6290", .driver_data = QCA6290_DEVICE_ID, },
+ { .name = "qca6390", .driver_data = QCA6390_DEVICE_ID, },
+ { .name = "qca6490", .driver_data = QCA6490_DEVICE_ID, },
+ { .name = "wcn7850", .driver_data = WCN7850_DEVICE_ID, },
+ { },
+};
+
+static const struct of_device_id cnss_of_match_table[] = {
+ {
+ .compatible = "qcom,cnss",
+ .data = (void *)&cnss_platform_id_table[0]},
+ {
+ .compatible = "qcom,cnss-qca6290",
+ .data = (void *)&cnss_platform_id_table[1]},
+ {
+ .compatible = "qcom,cnss-qca6390",
+ .data = (void *)&cnss_platform_id_table[2]},
+ {
+ .compatible = "qcom,cnss-qca6490",
+ .data = (void *)&cnss_platform_id_table[3]},
+ {
+ .compatible = "qcom,cnss-wcn7850",
+ .data = (void *)&cnss_platform_id_table[4]},
+ { },
+};
+MODULE_DEVICE_TABLE(of, cnss_of_match_table);
+
+static inline bool
+cnss_use_nv_mac(struct cnss_plat_data *plat_priv)
+{
+ return of_property_read_bool(plat_priv->plat_dev->dev.of_node,
+ "use-nv-mac");
+}
+
+static int cnss_probe(struct platform_device *plat_dev)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv;
+ const struct of_device_id *of_id;
+ const struct platform_device_id *device_id;
+ int retry = 0;
+
+ cnss_pr_err("%s: Victor Enter\n", __func__);
+
+ if (cnss_get_plat_priv(plat_dev)) {
+ cnss_pr_err("Driver is already initialized!\n");
+ ret = -EEXIST;
+ goto out;
+ }
+
+ of_id = of_match_device(cnss_of_match_table, &plat_dev->dev);
+ if (!of_id || !of_id->data) {
+ cnss_pr_err("Failed to find of match device!\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ device_id = of_id->data;
+
+ cnss_pr_err("%s: Victor device_id = %d\n", __func__, device_id);
+
+ plat_priv = devm_kzalloc(&plat_dev->dev, sizeof(*plat_priv),
+ GFP_KERNEL);
+ if (!plat_priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ plat_priv->plat_dev = plat_dev;
+ plat_priv->device_id = device_id->driver_data;
+ plat_priv->bus_type = cnss_get_bus_type(plat_priv->device_id);
+ plat_priv->use_nv_mac = cnss_use_nv_mac(plat_priv);
+ plat_priv->use_fw_path_with_prefix =
+ cnss_use_fw_path_with_prefix(plat_priv);
+ cnss_set_plat_priv(plat_dev, plat_priv);
+ platform_set_drvdata(plat_dev, plat_priv);
+ INIT_LIST_HEAD(&plat_priv->vreg_list);
+ INIT_LIST_HEAD(&plat_priv->clk_list);
+
+ cnss_get_pm_domain_info(plat_priv);
+ cnss_get_wlaon_pwr_ctrl_info(plat_priv);
+ cnss_get_tcs_info(plat_priv);
+ cnss_get_cpr_info(plat_priv);
+ cnss_aop_mbox_init(plat_priv);
+ cnss_init_control_params(plat_priv);
+
+ ret = cnss_get_resources(plat_priv);
+ if (ret)
+ goto reset_ctx;
+
+ ret = cnss_register_esoc(plat_priv);
+ if (ret)
+ goto free_res;
+
+ ret = cnss_register_bus_scale(plat_priv);
+ if (ret)
+ goto unreg_esoc;
+
+ ret = cnss_create_sysfs(plat_priv);
+ if (ret)
+ goto unreg_bus_scale;
+
+ ret = cnss_event_work_init(plat_priv);
+ if (ret)
+ goto remove_sysfs;
+
+ ret = cnss_qmi_init(plat_priv);
+ if (ret)
+ goto deinit_event_work;
+
+ ret = cnss_dms_init(plat_priv);
+ if (ret)
+ goto deinit_qmi;
+
+ ret = cnss_debugfs_create(plat_priv);
+ if (ret)
+ goto deinit_dms;
+
+ ret = cnss_misc_init(plat_priv);
+ if (ret)
+ goto destroy_debugfs;
+
+ /* Make sure all platform related init are done before
+ * device power on and bus init.
+ */
+ if (!test_bit(SKIP_DEVICE_BOOT, &plat_priv->ctrl_params.quirks)) {
+retry:
+ ret = cnss_power_on_device(plat_priv);
+ if (ret)
+ goto deinit_misc;
+
+ ret = cnss_bus_init(plat_priv);
+ if (ret) {
+ if ((ret != -EPROBE_DEFER) &&
+ retry++ < POWER_ON_RETRY_MAX_TIMES) {
+ cnss_power_off_device(plat_priv);
+ cnss_pr_dbg("Retry cnss_bus_init #%d\n", retry);
+ msleep(POWER_ON_RETRY_DELAY_MS * retry);
+ goto retry;
+ }
+ goto power_off;
+ }
+ }
+
+ cnss_register_coex_service(plat_priv);
+ cnss_register_ims_service(plat_priv);
+
+ ret = cnss_genl_init();
+ if (ret < 0)
+ cnss_pr_err("CNSS genl init failed %d\n", ret);
+
+ cnss_pr_info("Platform driver probed successfully.\n");
+
+ return 0;
+
+power_off:
+ if (!test_bit(SKIP_DEVICE_BOOT, &plat_priv->ctrl_params.quirks))
+ cnss_power_off_device(plat_priv);
+deinit_misc:
+ cnss_misc_deinit(plat_priv);
+destroy_debugfs:
+ cnss_debugfs_destroy(plat_priv);
+deinit_dms:
+ cnss_dms_deinit(plat_priv);
+deinit_qmi:
+ cnss_qmi_deinit(plat_priv);
+deinit_event_work:
+ cnss_event_work_deinit(plat_priv);
+remove_sysfs:
+ cnss_remove_sysfs(plat_priv);
+unreg_bus_scale:
+ cnss_unregister_bus_scale(plat_priv);
+unreg_esoc:
+ cnss_unregister_esoc(plat_priv);
+free_res:
+ cnss_put_resources(plat_priv);
+reset_ctx:
+ platform_set_drvdata(plat_dev, NULL);
+ cnss_set_plat_priv(plat_dev, NULL);
+out:
+ return ret;
+}
+
+static int cnss_remove(struct platform_device *plat_dev)
+{
+ struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
+
+ cnss_genl_exit();
+ cnss_unregister_ims_service(plat_priv);
+ cnss_unregister_coex_service(plat_priv);
+ cnss_bus_deinit(plat_priv);
+ cnss_misc_deinit(plat_priv);
+ cnss_debugfs_destroy(plat_priv);
+ cnss_dms_deinit(plat_priv);
+ cnss_qmi_deinit(plat_priv);
+ cnss_event_work_deinit(plat_priv);
+ cnss_remove_sysfs(plat_priv);
+ cnss_unregister_bus_scale(plat_priv);
+ cnss_unregister_esoc(plat_priv);
+ cnss_put_resources(plat_priv);
+ platform_set_drvdata(plat_dev, NULL);
+ plat_env = NULL;
+
+ return 0;
+}
+
+static void cnss_shutdown(struct platform_device *plat_dev)
+{
+ struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
+
+ cnss_pr_dbg("cnss shutdown\n");
+ set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+ cnss_bus_dev_shutdown(plat_priv);
+}
+
+static struct platform_driver cnss_platform_driver = {
+ .probe = cnss_probe,
+ .remove = cnss_remove,
+ .shutdown = cnss_shutdown,
+ .driver = {
+ .name = "cnss2",
+ .of_match_table = cnss_of_match_table,
+#ifdef CONFIG_CNSS_ASYNC
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+#endif
+ },
+};
+
+/**
+ * cnss_is_valid_dt_node_found - Check if valid device tree node present
+ *
+ * Valid device tree node means a node with "compatible" property from the
+ * device match table and "status" property is not disabled.
+ *
+ * Return: true if valid device tree node found, false if not found
+ */
+static bool cnss_is_valid_dt_node_found(void)
+{
+ struct device_node *dn = NULL;
+
+ for_each_matching_node(dn, cnss_of_match_table) {
+ if (of_device_is_available(dn))
+ break;
+ }
+
+ if (dn)
+ return true;
+
+ return false;
+}
+
+static int __init cnss_initialize(void)
+{
+ int ret = 0;
+
+ if (!cnss_is_valid_dt_node_found())
+ return -ENODEV;
+
+ cnss_pr_err("%s: Victor Enter\n", __func__);
+
+ cnss_debug_init();
+ ret = platform_driver_register(&cnss_platform_driver);
+ if (ret)
+ cnss_debug_deinit();
+
+ cnss_pr_err("%s: Victor Exit\n", __func__);
+
+ return ret;
+}
+
+static void __exit cnss_exit(void)
+{
+ platform_driver_unregister(&cnss_platform_driver);
+ cnss_debug_deinit();
+}
+
+module_init(cnss_initialize);
+module_exit(cnss_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CNSS2 Platform Driver");
diff --git a/cnss2/main.h b/cnss2/main.h
new file mode 100644
index 0000000..92cdab1
--- /dev/null
+++ b/cnss2/main.h
@@ -0,0 +1,593 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+
+#ifndef _CNSS_MAIN_H
+#define _CNSS_MAIN_H
+
+#if IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64)
+#include <asm/arch_timer.h>
+#endif
+#if IS_ENABLED(CONFIG_ESOC)
+#include <linux/esoc_client.h>
+#endif
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#if IS_ENABLED(CONFIG_INTERCONNECT_QCOM)
+#include <linux/interconnect.h>
+#endif
+#include <linux/mailbox_client.h>
+#include <linux/pm_qos.h>
+#include <linux/platform_device.h>
+#include <linux/time64.h>
+#include <net/cnss2.h>
+#if IS_ENABLED(CONFIG_QCOM_MEMORY_DUMP_V2)
+#include <soc/qcom/memory_dump.h>
+#endif
+#if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART) || \
+ IS_ENABLED(CONFIG_QCOM_RAMDUMP)
+#include <soc/qcom/qcom_ramdump.h>
+#endif
+#if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+#endif
+
+#include "qmi.h"
+
+#define MAX_NO_OF_MAC_ADDR 4
+#define QMI_WLFW_MAX_TIMESTAMP_LEN 32
+#define QMI_WLFW_MAX_NUM_MEM_SEG 32
+#define QMI_WLFW_MAX_BUILD_ID_LEN 128
+#define CNSS_RDDM_TIMEOUT_MS 20000
+#define RECOVERY_TIMEOUT 60000
+#define WLAN_WD_TIMEOUT_MS 60000
+#define WLAN_COLD_BOOT_CAL_TIMEOUT 60000
+#define WLAN_MISSION_MODE_TIMEOUT 30000
+#define TIME_CLOCK_FREQ_HZ 19200000
+#define CNSS_RAMDUMP_MAGIC 0x574C414E
+#define CNSS_RAMDUMP_VERSION 0
+#define MAX_FIRMWARE_NAME_LEN 40
+#define FW_V2_NUMBER 2
+#define POWER_ON_RETRY_MAX_TIMES 3
+#define POWER_ON_RETRY_DELAY_MS 200
+
+#define CNSS_EVENT_SYNC BIT(0)
+#define CNSS_EVENT_UNINTERRUPTIBLE BIT(1)
+#define CNSS_EVENT_UNKILLABLE BIT(2)
+#define CNSS_EVENT_SYNC_UNINTERRUPTIBLE (CNSS_EVENT_SYNC | \
+ CNSS_EVENT_UNINTERRUPTIBLE)
+#define CNSS_EVENT_SYNC_UNKILLABLE (CNSS_EVENT_SYNC | CNSS_EVENT_UNKILLABLE)
+
+enum cnss_dev_bus_type {
+ CNSS_BUS_NONE = -1,
+ CNSS_BUS_PCI,
+};
+
+struct cnss_vreg_cfg {
+ const char *name;
+ u32 min_uv;
+ u32 max_uv;
+ u32 load_ua;
+ u32 delay_us;
+ u32 need_unvote;
+};
+
+struct cnss_vreg_info {
+ struct list_head list;
+ struct regulator *reg;
+ struct cnss_vreg_cfg cfg;
+ u32 enabled;
+};
+
+enum cnss_vreg_type {
+ CNSS_VREG_PRIM,
+};
+
+struct cnss_clk_cfg {
+ const char *name;
+ u32 freq;
+ u32 required;
+};
+
+struct cnss_clk_info {
+ struct list_head list;
+ struct clk *clk;
+ struct cnss_clk_cfg cfg;
+ u32 enabled;
+};
+
+struct cnss_pinctrl_info {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *bootstrap_active;
+ struct pinctrl_state *wlan_en_active;
+ struct pinctrl_state *wlan_en_sleep;
+ int bt_en_gpio;
+ int xo_clk_gpio; /*qca6490 only */
+};
+
+#if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
+struct cnss_subsys_info {
+ struct subsys_device *subsys_device;
+ struct subsys_desc subsys_desc;
+ void *subsys_handle;
+};
+#endif
+
+struct cnss_ramdump_info {
+ void *ramdump_dev;
+ unsigned long ramdump_size;
+ void *ramdump_va;
+ phys_addr_t ramdump_pa;
+#if IS_ENABLED(CONFIG_QCOM_MEMORY_DUMP_V2)
+ struct msm_dump_data dump_data;
+#endif
+};
+
+struct cnss_dump_seg {
+ unsigned long address;
+ void *v_address;
+ unsigned long size;
+ u32 type;
+};
+
+struct cnss_dump_data {
+ u32 version;
+ u32 magic;
+ char name[32];
+ phys_addr_t paddr;
+ int nentries;
+ u32 seg_version;
+};
+
+struct cnss_ramdump_info_v2 {
+ void *ramdump_dev;
+ unsigned long ramdump_size;
+ void *dump_data_vaddr;
+ u8 dump_data_valid;
+ struct cnss_dump_data dump_data;
+};
+
+#if IS_ENABLED(CONFIG_ESOC)
+struct cnss_esoc_info {
+ struct esoc_desc *esoc_desc;
+ u8 notify_modem_status;
+ void *modem_notify_handler;
+ int modem_current_status;
+};
+#endif
+
+#if IS_ENABLED(CONFIG_INTERCONNECT_QCOM)
+/**
+ * struct cnss_bus_bw_cfg - Interconnect vote data
+ * @avg_bw: Vote for average bandwidth
+ * @peak_bw: Vote for peak bandwidth
+ */
+struct cnss_bus_bw_cfg {
+ u32 avg_bw;
+ u32 peak_bw;
+};
+
+/* Number of bw votes (avg, peak) entries that ICC requires */
+#define CNSS_ICC_VOTE_MAX 2
+
+/**
+ * struct cnss_bus_bw_info - Bus bandwidth config for interconnect path
+ * @list: Kernel linked list
+ * @icc_name: Name of interconnect path as defined in Device tree
+ * @icc_path: Interconnect path data structure
+ * @cfg_table: Interconnect vote data for average and peak bandwidth
+ */
+struct cnss_bus_bw_info {
+ struct list_head list;
+ const char *icc_name;
+ struct icc_path *icc_path;
+ struct cnss_bus_bw_cfg *cfg_table;
+};
+#endif
+
+/**
+ * struct cnss_interconnect_cfg - CNSS platform interconnect config
+ * @list_head: List of interconnect path bandwidth configs
+ * @path_count: Count of interconnect path configured in device tree
+ * @current_bw_vote: WLAN driver provided bandwidth vote
+ * @bus_bw_cfg_count: Number of bandwidth configs for voting. It is the array
+ * size of struct cnss_bus_bw_info.cfg_table
+ */
+struct cnss_interconnect_cfg {
+ struct list_head list_head;
+ u32 path_count;
+ int current_bw_vote;
+ u32 bus_bw_cfg_count;
+};
+
+struct cnss_fw_mem {
+ size_t size;
+ void *va;
+ phys_addr_t pa;
+ u8 valid;
+ u32 type;
+ unsigned long attrs;
+};
+
+struct wlfw_rf_chip_info {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct wlfw_rf_board_info {
+ u32 board_id;
+};
+
+struct wlfw_soc_info {
+ u32 soc_id;
+};
+
+struct wlfw_fw_version_info {
+ u32 fw_version;
+ char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN + 1];
+};
+
+enum cnss_mem_type {
+ CNSS_MEM_TYPE_MSA,
+ CNSS_MEM_TYPE_DDR,
+ CNSS_MEM_BDF,
+ CNSS_MEM_M3,
+ CNSS_MEM_CAL_V01,
+ CNSS_MEM_DPD_V01,
+};
+
+enum cnss_fw_dump_type {
+ CNSS_FW_IMAGE,
+ CNSS_FW_RDDM,
+ CNSS_FW_REMOTE_HEAP,
+ CNSS_FW_DUMP_TYPE_MAX,
+};
+
+struct cnss_dump_entry {
+ u32 type;
+ u32 entry_start;
+ u32 entry_num;
+};
+
+struct cnss_dump_meta_info {
+ u32 magic;
+ u32 version;
+ u32 chipset;
+ u32 total_entries;
+ struct cnss_dump_entry entry[CNSS_FW_DUMP_TYPE_MAX];
+};
+
+enum cnss_driver_event_type {
+ CNSS_DRIVER_EVENT_SERVER_ARRIVE,
+ CNSS_DRIVER_EVENT_SERVER_EXIT,
+ CNSS_DRIVER_EVENT_REQUEST_MEM,
+ CNSS_DRIVER_EVENT_FW_MEM_READY,
+ CNSS_DRIVER_EVENT_FW_READY,
+ CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
+ CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
+ CNSS_DRIVER_EVENT_REGISTER_DRIVER,
+ CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+ CNSS_DRIVER_EVENT_RECOVERY,
+ CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
+ CNSS_DRIVER_EVENT_POWER_UP,
+ CNSS_DRIVER_EVENT_POWER_DOWN,
+ CNSS_DRIVER_EVENT_IDLE_RESTART,
+ CNSS_DRIVER_EVENT_IDLE_SHUTDOWN,
+ CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND,
+ CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND,
+ CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM,
+ CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE,
+ CNSS_DRIVER_EVENT_QDSS_TRACE_FREE,
+ CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA,
+ CNSS_DRIVER_EVENT_MAX,
+};
+
+enum cnss_driver_state {
+ CNSS_QMI_WLFW_CONNECTED = 0,
+ CNSS_FW_MEM_READY,
+ CNSS_FW_READY,
+ CNSS_IN_COLD_BOOT_CAL,
+ CNSS_DRIVER_LOADING,
+ CNSS_DRIVER_UNLOADING = 5,
+ CNSS_DRIVER_IDLE_RESTART,
+ CNSS_DRIVER_IDLE_SHUTDOWN,
+ CNSS_DRIVER_PROBED,
+ CNSS_DRIVER_RECOVERY,
+ CNSS_FW_BOOT_RECOVERY = 10,
+ CNSS_DEV_ERR_NOTIFY,
+ CNSS_DRIVER_DEBUG,
+ CNSS_COEX_CONNECTED,
+ CNSS_IMS_CONNECTED,
+ CNSS_IN_SUSPEND_RESUME = 15,
+ CNSS_IN_REBOOT,
+ CNSS_COLD_BOOT_CAL_DONE,
+ CNSS_IN_PANIC,
+ CNSS_QMI_DEL_SERVER,
+ CNSS_QMI_DMS_CONNECTED = 20,
+ CNSS_DAEMON_CONNECTED,
+};
+
+struct cnss_recovery_data {
+ enum cnss_recovery_reason reason;
+};
+
+enum cnss_pins {
+ CNSS_WLAN_EN,
+ CNSS_PCIE_TXP,
+ CNSS_PCIE_TXN,
+ CNSS_PCIE_RXP,
+ CNSS_PCIE_RXN,
+ CNSS_PCIE_REFCLKP,
+ CNSS_PCIE_REFCLKN,
+ CNSS_PCIE_RST,
+ CNSS_PCIE_WAKE,
+};
+
+struct cnss_pin_connect_result {
+ u32 fw_pwr_pin_result;
+ u32 fw_phy_io_pin_result;
+ u32 fw_rf_pin_result;
+ u32 host_pin_result;
+};
+
+enum cnss_debug_quirks {
+ LINK_DOWN_SELF_RECOVERY,
+ SKIP_DEVICE_BOOT,
+ USE_CORE_ONLY_FW,
+ SKIP_RECOVERY,
+ QMI_BYPASS,
+ ENABLE_WALTEST,
+ ENABLE_PCI_LINK_DOWN_PANIC,
+ FBC_BYPASS,
+ ENABLE_DAEMON_SUPPORT,
+ DISABLE_DRV,
+ DISABLE_IO_COHERENCY,
+ IGNORE_PCI_LINK_FAILURE,
+ DISABLE_TIME_SYNC,
+};
+
+enum cnss_bdf_type {
+ CNSS_BDF_BIN,
+ CNSS_BDF_ELF,
+ CNSS_BDF_REGDB = 4,
+ CNSS_BDF_HDS = 6,
+};
+
+enum cnss_cal_status {
+ CNSS_CAL_DONE,
+ CNSS_CAL_TIMEOUT,
+ CNSS_CAL_FAILURE,
+};
+
+struct cnss_cal_info {
+ enum cnss_cal_status cal_status;
+};
+
+struct cnss_control_params {
+ unsigned long quirks;
+ unsigned int mhi_timeout;
+ unsigned int mhi_m2_timeout;
+ unsigned int qmi_timeout;
+ unsigned int bdf_type;
+ unsigned int time_sync_period;
+};
+
+struct cnss_tcs_info {
+ resource_size_t cmd_base_addr;
+ void __iomem *cmd_base_addr_io;
+};
+
+struct cnss_cpr_info {
+ resource_size_t tcs_cmd_data_addr;
+ void __iomem *tcs_cmd_data_addr_io;
+ u32 cpr_pmic_addr;
+ u32 voltage;
+};
+
+enum cnss_ce_index {
+ CNSS_CE_00,
+ CNSS_CE_01,
+ CNSS_CE_02,
+ CNSS_CE_03,
+ CNSS_CE_04,
+ CNSS_CE_05,
+ CNSS_CE_06,
+ CNSS_CE_07,
+ CNSS_CE_08,
+ CNSS_CE_09,
+ CNSS_CE_10,
+ CNSS_CE_11,
+ CNSS_CE_COMMON,
+};
+
+struct cnss_dms_data {
+ u32 mac_valid;
+ u8 mac[QMI_WLFW_MAC_ADDR_SIZE_V01];
+};
+
+enum cnss_timeout_type {
+ CNSS_TIMEOUT_QMI,
+ CNSS_TIMEOUT_POWER_UP,
+ CNSS_TIMEOUT_IDLE_RESTART,
+ CNSS_TIMEOUT_CALIBRATION,
+ CNSS_TIMEOUT_WLAN_WATCHDOG,
+ CNSS_TIMEOUT_RDDM,
+ CNSS_TIMEOUT_RECOVERY,
+ CNSS_TIMEOUT_DAEMON_CONNECTION,
+};
+
+struct cnss_plat_data {
+ struct platform_device *plat_dev;
+ void *bus_priv;
+ enum cnss_dev_bus_type bus_type;
+ struct list_head vreg_list;
+ struct list_head clk_list;
+ struct cnss_pinctrl_info pinctrl_info;
+#if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
+ struct cnss_subsys_info subsys_info;
+#endif
+ struct cnss_ramdump_info ramdump_info;
+ struct cnss_ramdump_info_v2 ramdump_info_v2;
+#if IS_ENABLED(CONFIG_ESOC)
+ struct cnss_esoc_info esoc_info;
+#endif
+ struct cnss_interconnect_cfg icc;
+ struct notifier_block modem_nb;
+ struct notifier_block reboot_nb;
+ struct notifier_block panic_nb;
+ struct cnss_platform_cap cap;
+ struct pm_qos_request qos_request;
+ struct cnss_device_version device_version;
+ u32 rc_num;
+ unsigned long device_id;
+ enum cnss_driver_status driver_status;
+ u32 recovery_count;
+ u8 recovery_enabled;
+ u8 hds_enabled;
+ unsigned long driver_state;
+ struct list_head event_list;
+ spinlock_t event_lock; /* spinlock for driver work event handling */
+ struct work_struct event_work;
+ struct workqueue_struct *event_wq;
+ struct work_struct recovery_work;
+ struct delayed_work wlan_reg_driver_work;
+ struct qmi_handle qmi_wlfw;
+ struct qmi_handle qmi_dms;
+ struct wlfw_rf_chip_info chip_info;
+ struct wlfw_rf_board_info board_info;
+ struct wlfw_soc_info soc_info;
+ struct wlfw_fw_version_info fw_version_info;
+ struct cnss_dev_mem_info dev_mem_info[CNSS_MAX_DEV_MEM_NUM];
+ char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN + 1];
+ u32 otp_version;
+ u32 fw_mem_seg_len;
+ struct cnss_fw_mem fw_mem[QMI_WLFW_MAX_NUM_MEM_SEG];
+ struct cnss_fw_mem m3_mem;
+ struct cnss_fw_mem *cal_mem;
+ u64 cal_time;
+ bool cbc_file_download;
+ u32 cal_file_size;
+ struct completion daemon_connected;
+ u32 qdss_mem_seg_len;
+ struct cnss_fw_mem qdss_mem[QMI_WLFW_MAX_NUM_MEM_SEG];
+ u32 *qdss_reg;
+ struct cnss_pin_connect_result pin_result;
+ struct dentry *root_dentry;
+ atomic_t pm_count;
+ struct timer_list fw_boot_timer;
+ struct completion power_up_complete;
+ struct completion cal_complete;
+ struct mutex dev_lock; /* mutex for register access through debugfs */
+ struct mutex driver_ops_lock; /* mutex for external driver ops */
+ u32 device_freq_hz;
+ u32 diag_reg_read_addr;
+ u32 diag_reg_read_mem_type;
+ u32 diag_reg_read_len;
+ u8 *diag_reg_read_buf;
+ u8 cal_done;
+ u8 powered_on;
+ u8 use_fw_path_with_prefix;
+ char firmware_name[MAX_FIRMWARE_NAME_LEN];
+ char fw_fallback_name[MAX_FIRMWARE_NAME_LEN];
+ struct completion rddm_complete;
+ struct completion recovery_complete;
+ struct cnss_control_params ctrl_params;
+ struct cnss_cpr_info cpr_info;
+ u64 antenna;
+ u64 grant;
+ struct qmi_handle coex_qmi;
+ struct qmi_handle ims_qmi;
+ struct qmi_txn txn;
+ struct wakeup_source *recovery_ws;
+ u64 dynamic_feature;
+ void *get_info_cb_ctx;
+ int (*get_info_cb)(void *ctx, void *event, int event_len);
+ bool cbc_enabled;
+ u8 use_pm_domain;
+ u8 use_nv_mac;
+ u8 set_wlaon_pwr_ctrl;
+ struct cnss_tcs_info tcs_info;
+ bool fw_pcie_gen_switch;
+ u8 pcie_gen_speed;
+ struct cnss_dms_data dms;
+ int power_up_error;
+ u32 hw_trc_override;
+ struct mbox_client mbox_client_data;
+ struct mbox_chan *mbox_chan;
+ const char *vreg_ol_cpr, *vreg_ipa;
+ bool adsp_pc_enabled;
+ u64 feature_list;
+};
+
+#if IS_ENABLED(CONFIG_ARCH_QCOM)
+static inline u64 cnss_get_host_timestamp(struct cnss_plat_data *plat_priv)
+{
+ u64 ticks = __arch_counter_get_cntvct();
+
+ do_div(ticks, TIME_CLOCK_FREQ_HZ / 100000);
+
+ return ticks * 10;
+}
+#else
+static inline u64 cnss_get_host_timestamp(struct cnss_plat_data *plat_priv)
+{
+ struct timespec64 ts;
+
+ ktime_get_ts64(&ts);
+
+ return (ts.tv_sec * 1000000) + (ts.tv_nsec / 1000);
+}
+#endif
+
+struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev);
+void cnss_pm_stay_awake(struct cnss_plat_data *plat_priv);
+void cnss_pm_relax(struct cnss_plat_data *plat_priv);
+int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
+ enum cnss_driver_event_type type,
+ u32 flags, void *data);
+int cnss_get_vreg_type(struct cnss_plat_data *plat_priv,
+ enum cnss_vreg_type type);
+void cnss_put_vreg_type(struct cnss_plat_data *plat_priv,
+ enum cnss_vreg_type type);
+int cnss_vreg_on_type(struct cnss_plat_data *plat_priv,
+ enum cnss_vreg_type type);
+int cnss_vreg_off_type(struct cnss_plat_data *plat_priv,
+ enum cnss_vreg_type type);
+int cnss_get_clk(struct cnss_plat_data *plat_priv);
+void cnss_put_clk(struct cnss_plat_data *plat_priv);
+int cnss_vreg_unvote_type(struct cnss_plat_data *plat_priv,
+ enum cnss_vreg_type type);
+int cnss_get_pinctrl(struct cnss_plat_data *plat_priv);
+int cnss_power_on_device(struct cnss_plat_data *plat_priv);
+void cnss_power_off_device(struct cnss_plat_data *plat_priv);
+bool cnss_is_device_powered_on(struct cnss_plat_data *plat_priv);
+int cnss_register_subsys(struct cnss_plat_data *plat_priv);
+void cnss_unregister_subsys(struct cnss_plat_data *plat_priv);
+int cnss_register_ramdump(struct cnss_plat_data *plat_priv);
+void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv);
+int cnss_do_ramdump(struct cnss_plat_data *plat_priv);
+int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv);
+void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv);
+int cnss_get_cpr_info(struct cnss_plat_data *plat_priv);
+int cnss_update_cpr_info(struct cnss_plat_data *plat_priv);
+int cnss_va_to_pa(struct device *dev, size_t size, void *va, dma_addr_t dma,
+ phys_addr_t *pa, unsigned long attrs);
+int cnss_minidump_add_region(struct cnss_plat_data *plat_priv,
+ enum cnss_fw_dump_type type, int seg_no,
+ void *va, phys_addr_t pa, size_t size);
+int cnss_minidump_remove_region(struct cnss_plat_data *plat_priv,
+ enum cnss_fw_dump_type type, int seg_no,
+ void *va, phys_addr_t pa, size_t size);
+int cnss_enable_int_pow_amp_vreg(struct cnss_plat_data *plat_priv);
+int cnss_get_tcs_info(struct cnss_plat_data *plat_priv);
+unsigned int cnss_get_timeout(struct cnss_plat_data *plat_priv,
+ enum cnss_timeout_type);
+int cnss_aop_mbox_init(struct cnss_plat_data *plat_priv);
+int cnss_request_firmware_direct(struct cnss_plat_data *plat_priv,
+ const struct firmware **fw_entry,
+ const char *filename);
+int cnss_set_feature_list(struct cnss_plat_data *plat_priv,
+ enum cnss_feature_v01 feature);
+int cnss_get_feature_list(struct cnss_plat_data *plat_priv,
+ u64 *feature_list);
+#endif /* _CNSS_MAIN_H */
diff --git a/cnss2/pci.c b/cnss2/pci.c
new file mode 100644
index 0000000..4f3e0b8
--- /dev/null
+++ b/cnss2/pci.c
@@ -0,0 +1,6292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+
+#include <linux/cma.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
+#include <linux/memblock.h>
+#include <linux/completion.h>
+
+#include "main.h"
+#include "bus.h"
+#include "debug.h"
+#include "pci.h"
+#include "reg.h"
+
+#define PCI_LINK_UP 1
+#define PCI_LINK_DOWN 0
+
+#define SAVE_PCI_CONFIG_SPACE 1
+#define RESTORE_PCI_CONFIG_SPACE 0
+
+#define PM_OPTIONS_DEFAULT 0
+
+#define PCI_BAR_NUM 0
+#define PCI_INVALID_READ(val) ((val) == U32_MAX)
+
+#define PCI_DMA_MASK_32_BIT DMA_BIT_MASK(32)
+#define PCI_DMA_MASK_36_BIT DMA_BIT_MASK(36)
+#define PCI_DMA_MASK_64_BIT DMA_BIT_MASK(64)
+
+#define MHI_NODE_NAME "qcom,mhi"
+#define MHI_MSI_NAME "MHI"
+
+#define QCA6390_PATH_PREFIX "qca6390/"
+#define QCA6490_PATH_PREFIX "qca6490/"
+#define WCN7850_PATH_PREFIX "wcn7850/"
+#define DEFAULT_PHY_M3_FILE_NAME "m3.bin"
+#define DEFAULT_PHY_UCODE_FILE_NAME "phy_ucode.elf"
+#define DEFAULT_FW_FILE_NAME "amss.bin"
+#define FW_V2_FILE_NAME "amss20.bin"
+#define DEVICE_MAJOR_VERSION_MASK 0xF
+
+#define WAKE_MSI_NAME "WAKE"
+
+#define DEV_RDDM_TIMEOUT 5000
+#define WAKE_EVENT_TIMEOUT 5000
+
+#ifdef CONFIG_CNSS_EMULATION
+#define EMULATION_HW 1
+#else
+#define EMULATION_HW 0
+#endif
+
+#define RAMDUMP_SIZE_DEFAULT 0x420000
+#define DEVICE_RDDM_COOKIE 0xCAFECACE
+
+static DEFINE_SPINLOCK(pci_link_down_lock);
+static DEFINE_SPINLOCK(pci_reg_window_lock);
+static DEFINE_SPINLOCK(time_sync_lock);
+
+#define MHI_TIMEOUT_OVERWRITE_MS (plat_priv->ctrl_params.mhi_timeout)
+#define MHI_M2_TIMEOUT_MS (plat_priv->ctrl_params.mhi_m2_timeout)
+
+#define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US 1000
+#define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US 2000
+
+#define FORCE_WAKE_DELAY_MIN_US 4000
+#define FORCE_WAKE_DELAY_MAX_US 6000
+#define FORCE_WAKE_DELAY_TIMEOUT_US 60000
+
+#define LINK_TRAINING_RETRY_MAX_TIMES 3
+#define LINK_TRAINING_RETRY_DELAY_MS 500
+
+#define MHI_SUSPEND_RETRY_MAX_TIMES 3
+#define MHI_SUSPEND_RETRY_DELAY_US 5000
+
+#define BOOT_DEBUG_TIMEOUT_MS 7000
+
+#define HANG_DATA_LENGTH 384
+#define HST_HANG_DATA_OFFSET ((3 * 1024 * 1024) - HANG_DATA_LENGTH)
+#define HSP_HANG_DATA_OFFSET ((2 * 1024 * 1024) - HANG_DATA_LENGTH)
+
+static const struct mhi_channel_config cnss_mhi_channels[] = {
+ {
+ .num = 0,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 1,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 4,
+ .name = "DIAG",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 5,
+ .name = "DIAG",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ },
+};
+
+static const struct mhi_event_config cnss_mhi_events[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_CTRL,
+ .priority = 0,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 0,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+#if IS_ENABLED(CONFIG_MHI_BUS_MISC)
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_BW_SCALE,
+ .priority = 2,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+#endif
+};
+
+static const struct mhi_controller_config cnss_mhi_config = {
+ .max_channels = 32,
+ .timeout_ms = 10000,
+ .use_bounce_buf = false,
+ .buf_len = 0x8000,
+ .num_channels = ARRAY_SIZE(cnss_mhi_channels),
+ .ch_cfg = cnss_mhi_channels,
+ .num_events = ARRAY_SIZE(cnss_mhi_events),
+ .event_cfg = cnss_mhi_events,
+};
+
+static struct cnss_pci_reg ce_src[] = {
+ { "SRC_RING_BASE_LSB", CE_SRC_RING_BASE_LSB_OFFSET },
+ { "SRC_RING_BASE_MSB", CE_SRC_RING_BASE_MSB_OFFSET },
+ { "SRC_RING_ID", CE_SRC_RING_ID_OFFSET },
+ { "SRC_RING_MISC", CE_SRC_RING_MISC_OFFSET },
+ { "SRC_CTRL", CE_SRC_CTRL_OFFSET },
+ { "SRC_R0_CE_CH_SRC_IS", CE_SRC_R0_CE_CH_SRC_IS_OFFSET },
+ { "SRC_RING_HP", CE_SRC_RING_HP_OFFSET },
+ { "SRC_RING_TP", CE_SRC_RING_TP_OFFSET },
+ { NULL },
+};
+
+static struct cnss_pci_reg ce_dst[] = {
+ { "DEST_RING_BASE_LSB", CE_DEST_RING_BASE_LSB_OFFSET },
+ { "DEST_RING_BASE_MSB", CE_DEST_RING_BASE_MSB_OFFSET },
+ { "DEST_RING_ID", CE_DEST_RING_ID_OFFSET },
+ { "DEST_RING_MISC", CE_DEST_RING_MISC_OFFSET },
+ { "DEST_CTRL", CE_DEST_CTRL_OFFSET },
+ { "CE_CH_DST_IS", CE_CH_DST_IS_OFFSET },
+ { "CE_CH_DEST_CTRL2", CE_CH_DEST_CTRL2_OFFSET },
+ { "DEST_RING_HP", CE_DEST_RING_HP_OFFSET },
+ { "DEST_RING_TP", CE_DEST_RING_TP_OFFSET },
+ { "STATUS_RING_BASE_LSB", CE_STATUS_RING_BASE_LSB_OFFSET },
+ { "STATUS_RING_BASE_MSB", CE_STATUS_RING_BASE_MSB_OFFSET },
+ { "STATUS_RING_ID", CE_STATUS_RING_ID_OFFSET },
+ { "STATUS_RING_MISC", CE_STATUS_RING_MISC_OFFSET },
+ { "STATUS_RING_HP", CE_STATUS_RING_HP_OFFSET },
+ { "STATUS_RING_TP", CE_STATUS_RING_TP_OFFSET },
+ { NULL },
+};
+
+static struct cnss_pci_reg ce_cmn[] = {
+ { "GXI_ERR_INTS", CE_COMMON_GXI_ERR_INTS },
+ { "GXI_ERR_STATS", CE_COMMON_GXI_ERR_STATS },
+ { "GXI_WDOG_STATUS", CE_COMMON_GXI_WDOG_STATUS },
+ { "TARGET_IE_0", CE_COMMON_TARGET_IE_0 },
+ { "TARGET_IE_1", CE_COMMON_TARGET_IE_1 },
+ { NULL },
+};
+
+static struct cnss_pci_reg qdss_csr[] = {
+ { "QDSSCSR_ETRIRQCTRL", QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET },
+ { "QDSSCSR_PRESERVEETF", QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET },
+ { "QDSSCSR_PRESERVEETR0", QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET },
+ { "QDSSCSR_PRESERVEETR1", QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET },
+ { NULL },
+};
+
+/* First field of the structure is the device bit mask. Use
+ * enum cnss_pci_reg_mask as reference for the value.
+ */
+static struct cnss_misc_reg wcss_reg_access_seq[] = {
+ {1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
+ {1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x802},
+ {1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE, 0},
+ {1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x805},
+ {1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
+ {1, 0, QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL, 0},
+ {1, 0, QCA6390_WCSS_PMM_TOP_PMU_CX_CSR, 0},
+ {1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT, 0},
+ {1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_EN, 0},
+ {1, 0, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS, 0},
+ {1, 1, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL, 0xD},
+ {1, 0, QCA6390_WCSS_PMM_TOP_TESTBUS_STS, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
+ {1, 1, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
+ {1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x8},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6, 0},
+ {1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30040},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30105},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
+ {1, 0, QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR, 0},
+ {1, 0, QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR, 0},
+ {1, 0, QCA6390_WCSS_CC_WCSS_UMAC_GDSCR, 0},
+ {1, 0, QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR, 0},
+ {1, 0, QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR, 0},
+ {1, 0, QCA6390_WCSS_PMM_TOP_PMM_INT_CLR, 0},
+ {1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN, 0},
+};
+
+static struct cnss_misc_reg pcie_reg_access_seq[] = {
+ {1, 0, QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG, 0},
+ {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
+ {1, 1, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0x18},
+ {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
+ {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
+ {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG, 0},
+ {1, 0, QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG, 0},
+ {1, 0, QCA6390_TLMM_GPIO_IN_OUT57, 0},
+ {1, 0, QCA6390_TLMM_GPIO_INTR_CFG57, 0},
+ {1, 0, QCA6390_TLMM_GPIO_INTR_STATUS57, 0},
+ {1, 0, QCA6390_TLMM_GPIO_IN_OUT59, 0},
+ {1, 0, QCA6390_TLMM_GPIO_INTR_CFG59, 0},
+ {1, 0, QCA6390_TLMM_GPIO_INTR_STATUS59, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_LTSSM, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS_1, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_INT_STATUS, 0},
+ {1, 0, QCA6390_PCIE_PCIE_INT_ALL_STATUS, 0},
+ {1, 0, QCA6390_PCIE_PCIE_INT_ALL_MASK, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB, 0},
+ {1, 0, QCA6390_PCIE_PCIE_CORE_CONFIG, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1, 0},
+ {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
+ {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH, 0},
+ {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW, 0},
+ {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH, 0},
+ {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW, 0},
+ {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2, 0},
+ {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2, 0},
+ {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1, 0},
+ {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1, 0},
+ {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1, 0},
+ {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1, 0},
+ {1, 0, QCA6390_PCIE_PCIE_BHI_EXECENV_REG, 0},
+};
+
+static struct cnss_misc_reg wlaon_reg_access_seq[] = {
+ {3, 0, WLAON_SOC_POWER_CTRL, 0},
+ {3, 0, WLAON_SOC_PWR_WDG_BARK_THRSHD, 0},
+ {3, 0, WLAON_SOC_PWR_WDG_BITE_THRSHD, 0},
+ {3, 0, WLAON_SW_COLD_RESET, 0},
+ {3, 0, WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE, 0},
+ {3, 0, WLAON_GDSC_DELAY_SETTING, 0},
+ {3, 0, WLAON_GDSC_DELAY_SETTING2, 0},
+ {3, 0, WLAON_WL_PWR_STATUS_REG, 0},
+ {3, 0, WLAON_WL_AON_DBG_CFG_REG, 0},
+ {2, 0, WLAON_WL_AON_DBG_ENABLE_GRP0_REG, 0},
+ {2, 0, WLAON_WL_AON_DBG_ENABLE_GRP1_REG, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL0, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL1, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL2, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL3, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL4, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL5, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL5_1, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL6, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL6_1, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL7, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL8, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL8_1, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL9, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL9_1, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL10, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL11, 0},
+ {2, 0, WLAON_WL_AON_APM_CFG_CTRL12, 0},
+ {2, 0, WLAON_WL_AON_APM_OVERRIDE_REG, 0},
+ {2, 0, WLAON_WL_AON_CXPC_REG, 0},
+ {2, 0, WLAON_WL_AON_APM_STATUS0, 0},
+ {2, 0, WLAON_WL_AON_APM_STATUS1, 0},
+ {2, 0, WLAON_WL_AON_APM_STATUS2, 0},
+ {2, 0, WLAON_WL_AON_APM_STATUS3, 0},
+ {2, 0, WLAON_WL_AON_APM_STATUS4, 0},
+ {2, 0, WLAON_WL_AON_APM_STATUS5, 0},
+ {2, 0, WLAON_WL_AON_APM_STATUS6, 0},
+ {3, 0, WLAON_GLOBAL_COUNTER_CTRL1, 0},
+ {3, 0, WLAON_GLOBAL_COUNTER_CTRL6, 0},
+ {3, 0, WLAON_GLOBAL_COUNTER_CTRL7, 0},
+ {3, 0, WLAON_GLOBAL_COUNTER_CTRL3, 0},
+ {3, 0, WLAON_GLOBAL_COUNTER_CTRL4, 0},
+ {3, 0, WLAON_GLOBAL_COUNTER_CTRL5, 0},
+ {3, 0, WLAON_GLOBAL_COUNTER_CTRL8, 0},
+ {3, 0, WLAON_GLOBAL_COUNTER_CTRL2, 0},
+ {3, 0, WLAON_GLOBAL_COUNTER_CTRL9, 0},
+ {3, 0, WLAON_RTC_CLK_CAL_CTRL1, 0},
+ {3, 0, WLAON_RTC_CLK_CAL_CTRL2, 0},
+ {3, 0, WLAON_RTC_CLK_CAL_CTRL3, 0},
+ {3, 0, WLAON_RTC_CLK_CAL_CTRL4, 0},
+ {3, 0, WLAON_RTC_CLK_CAL_CTRL5, 0},
+ {3, 0, WLAON_RTC_CLK_CAL_CTRL6, 0},
+ {3, 0, WLAON_RTC_CLK_CAL_CTRL7, 0},
+ {3, 0, WLAON_RTC_CLK_CAL_CTRL8, 0},
+ {3, 0, WLAON_RTC_CLK_CAL_CTRL9, 0},
+ {3, 0, WLAON_WCSSAON_CONFIG_REG, 0},
+ {3, 0, WLAON_WLAN_OEM_DEBUG_REG, 0},
+ {3, 0, WLAON_WLAN_RAM_DUMP_REG, 0},
+ {3, 0, WLAON_QDSS_WCSS_REG, 0},
+ {3, 0, WLAON_QDSS_WCSS_ACK, 0},
+ {3, 0, WLAON_WL_CLK_CNTL_KDF_REG, 0},
+ {3, 0, WLAON_WL_CLK_CNTL_PMU_HFRC_REG, 0},
+ {3, 0, WLAON_QFPROM_PWR_CTRL_REG, 0},
+ {3, 0, WLAON_DLY_CONFIG, 0},
+ {3, 0, WLAON_WLAON_Q6_IRQ_REG, 0},
+ {3, 0, WLAON_PCIE_INTF_SW_CFG_REG, 0},
+ {3, 0, WLAON_PCIE_INTF_STICKY_SW_CFG_REG, 0},
+ {3, 0, WLAON_PCIE_INTF_PHY_SW_CFG_REG, 0},
+ {3, 0, WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG, 0},
+ {3, 0, WLAON_Q6_COOKIE_BIT, 0},
+ {3, 0, WLAON_WARM_SW_ENTRY, 0},
+ {3, 0, WLAON_RESET_DBG_SW_ENTRY, 0},
+ {3, 0, WLAON_WL_PMUNOC_CFG_REG, 0},
+ {3, 0, WLAON_RESET_CAUSE_CFG_REG, 0},
+ {3, 0, WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG, 0},
+ {3, 0, WLAON_DEBUG, 0},
+ {3, 0, WLAON_SOC_PARAMETERS, 0},
+ {3, 0, WLAON_WLPM_SIGNAL, 0},
+ {3, 0, WLAON_SOC_RESET_CAUSE_REG, 0},
+ {3, 0, WLAON_WAKEUP_PCIE_SOC_REG, 0},
+ {3, 0, WLAON_PBL_STACK_CANARY, 0},
+ {3, 0, WLAON_MEM_TOT_NUM_GRP_REG, 0},
+ {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP0_REG, 0},
+ {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP1_REG, 0},
+ {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP2_REG, 0},
+ {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP3_REG, 0},
+ {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP0_REG, 0},
+ {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP1_REG, 0},
+ {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP2_REG, 0},
+ {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP3_REG, 0},
+ {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG, 0},
+ {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG, 0},
+ {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG, 0},
+ {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG, 0},
+ {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG, 0},
+ {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG, 0},
+ {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG, 0},
+ {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG, 0},
+ {3, 0, WLAON_MEM_CNT_SEL_REG, 0},
+ {3, 0, WLAON_MEM_NO_EXTBHS_REG, 0},
+ {3, 0, WLAON_MEM_DEBUG_REG, 0},
+ {3, 0, WLAON_MEM_DEBUG_BUS_REG, 0},
+ {3, 0, WLAON_MEM_REDUN_CFG_REG, 0},
+ {3, 0, WLAON_WL_AON_SPARE2, 0},
+ {3, 0, WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG, 0},
+ {3, 0, WLAON_BTFM_WLAN_IPC_STATUS_REG, 0},
+ {3, 0, WLAON_MPM_COUNTER_CHICKEN_BITS, 0},
+ {3, 0, WLAON_WLPM_CHICKEN_BITS, 0},
+ {3, 0, WLAON_PCIE_PHY_PWR_REG, 0},
+ {3, 0, WLAON_WL_CLK_CNTL_PMU_LPO2M_REG, 0},
+ {3, 0, WLAON_WL_SS_ROOT_CLK_SWITCH_REG, 0},
+ {3, 0, WLAON_POWERCTRL_PMU_REG, 0},
+ {3, 0, WLAON_POWERCTRL_MEM_REG, 0},
+ {3, 0, WLAON_PCIE_PWR_CTRL_REG, 0},
+ {3, 0, WLAON_SOC_PWR_PROFILE_REG, 0},
+ {3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0},
+ {3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0},
+ {3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0},
+ {3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0},
+ {3, 0, WLAON_MEM_SVS_CFG_REG, 0},
+ {3, 0, WLAON_CMN_AON_MISC_REG, 0},
+ {3, 0, WLAON_INTR_STATUS, 0},
+ {2, 0, WLAON_INTR_ENABLE, 0},
+ {2, 0, WLAON_NOC_DBG_BUS_SEL_REG, 0},
+ {2, 0, WLAON_NOC_DBG_BUS_REG, 0},
+ {2, 0, WLAON_WL_CTRL_MISC_REG, 0},
+ {2, 0, WLAON_DBG_STATUS0, 0},
+ {2, 0, WLAON_DBG_STATUS1, 0},
+ {2, 0, WLAON_TIMERSYNC_OFFSET_L, 0},
+ {2, 0, WLAON_TIMERSYNC_OFFSET_H, 0},
+ {2, 0, WLAON_PMU_LDO_SETTLE_REG, 0},
+};
+
+static struct cnss_misc_reg syspm_reg_access_seq[] = {
+ {1, 0, QCA6390_SYSPM_SYSPM_PWR_STATUS, 0},
+ {1, 0, QCA6390_SYSPM_DBG_BTFM_AON_REG, 0},
+ {1, 0, QCA6390_SYSPM_DBG_BUS_SEL_REG, 0},
+ {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+};
+
+#define WCSS_REG_SIZE ARRAY_SIZE(wcss_reg_access_seq)
+#define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq)
+#define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq)
+#define SYSPM_REG_SIZE ARRAY_SIZE(syspm_reg_access_seq)
+
+#if IS_ENABLED(CONFIG_PCI_MSM)
+/**
+ * _cnss_pci_enumerate() - Enumerate PCIe endpoints
+ * @plat_priv: driver platform context pointer
+ * @rc_num: root complex index that an endpoint connects to
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to power on root complex and enumerate the endpoint connected to it.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static int _cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
+{
+ return msm_pcie_enumerate(rc_num);
+}
+
+/**
+ * cnss_pci_assert_perst() - Assert PCIe PERST GPIO
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to assert PCIe PERST GPIO.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static int cnss_pci_assert_perst(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+
+ return msm_pcie_pm_control(MSM_PCIE_HANDLE_LINKDOWN,
+ pci_dev->bus->number, pci_dev, NULL,
+ PM_OPTIONS_DEFAULT);
+}
+
+/**
+ * cnss_pci_disable_pc() - Disable PCIe link power collapse from RC driver
+ * @pci_priv: driver PCI bus context pointer
+ * @vote: value to indicate disable (true) or enable (false)
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to disable PCIe power collapse. The purpose of this API is to avoid
+ * root complex driver still controlling PCIe link from callbacks of
+ * system suspend/resume. Device driver itself should take full control
+ * of the link in such cases.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static int cnss_pci_disable_pc(struct cnss_pci_data *pci_priv, bool vote)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+
+ return msm_pcie_pm_control(vote ? MSM_PCIE_DISABLE_PC :
+ MSM_PCIE_ENABLE_PC,
+ pci_dev->bus->number, pci_dev, NULL,
+ PM_OPTIONS_DEFAULT);
+}
+
+/**
+ * cnss_pci_set_link_bandwidth() - Update number of lanes and speed of
+ * PCIe link
+ * @pci_priv: driver PCI bus context pointer
+ * @link_speed: PCIe link gen speed
+ * @link_width: number of lanes for PCIe link
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to update number of lanes and speed of the link.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static int cnss_pci_set_link_bandwidth(struct cnss_pci_data *pci_priv,
+ u16 link_speed, u16 link_width)
+{
+ return msm_pcie_set_link_bandwidth(pci_priv->pci_dev,
+ link_speed, link_width);
+}
+
+/**
+ * cnss_pci_set_max_link_speed() - Set the maximum speed PCIe can link up with
+ * @pci_priv: driver PCI bus context pointer
+ * @rc_num: root complex index that an endpoint connects to
+ * @link_speed: PCIe link gen speed
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to update the maximum speed that PCIe can link up with.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static int cnss_pci_set_max_link_speed(struct cnss_pci_data *pci_priv,
+ u32 rc_num, u16 link_speed)
+{
+ return msm_pcie_set_target_link_speed(rc_num, link_speed);
+}
+
+/**
+ * _cnss_pci_prevent_l1() - Prevent PCIe L1 and L1 sub-states
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to prevent PCIe link enter L1 and L1 sub-states. The APIs should also
+ * bring link out of L1 or L1 sub-states if any and avoid synchronization
+ * issues if any.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static int _cnss_pci_prevent_l1(struct cnss_pci_data *pci_priv)
+{
+ return msm_pcie_prevent_l1(pci_priv->pci_dev);
+}
+
+/**
+ * _cnss_pci_allow_l1() - Allow PCIe L1 and L1 sub-states
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to allow PCIe link enter L1 and L1 sub-states. The APIs should avoid
+ * synchronization issues if any.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static void _cnss_pci_allow_l1(struct cnss_pci_data *pci_priv)
+{
+ msm_pcie_allow_l1(pci_priv->pci_dev);
+}
+
+/**
+ * cnss_pci_set_link_up() - Power on or resume PCIe link
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to Power on or resume PCIe link.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static int cnss_pci_set_link_up(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ enum msm_pcie_pm_opt pm_ops = MSM_PCIE_RESUME;
+ u32 pm_options = PM_OPTIONS_DEFAULT;
+ int ret;
+
+ ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
+ NULL, pm_options);
+ if (ret)
+ cnss_pr_err("Failed to resume PCI link with default option, err = %d\n",
+ ret);
+
+ return ret;
+}
+
+/**
+ * cnss_pci_set_link_down() - Power off or suspend PCIe link
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to power off or suspend PCIe link.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static int cnss_pci_set_link_down(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ enum msm_pcie_pm_opt pm_ops;
+ u32 pm_options = PM_OPTIONS_DEFAULT;
+ int ret;
+
+ if (pci_priv->drv_connected_last) {
+ cnss_pr_vdbg("Use PCIe DRV suspend\n");
+ pm_ops = MSM_PCIE_DRV_SUSPEND;
+ } else {
+ pm_ops = MSM_PCIE_SUSPEND;
+ }
+
+ ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
+ NULL, pm_options);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link with default option, err = %d\n",
+ ret);
+
+ return ret;
+}
+#else
+#ifndef CONFIG_WCN_GOOGLE
+static int _cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
+{
+ return -EOPNOTSUPP;
+}
+static int _cnss_pci_prevent_l1(struct cnss_pci_data *pci_priv)
+{
+ return 0;
+}
+
+static void _cnss_pci_allow_l1(struct cnss_pci_data *pci_priv) {}
+
+static int cnss_pci_set_link_up(struct cnss_pci_data *pci_priv)
+{
+ return 0;
+}
+
+static int cnss_pci_set_link_down(struct cnss_pci_data *pci_priv)
+{
+ return 0;
+}
+#endif
+static int cnss_pci_assert_perst(struct cnss_pci_data *pci_priv)
+{
+ return -EOPNOTSUPP;
+}
+
+#ifndef CONFIG_WCN_GOOGLE
+static int cnss_pci_disable_pc(struct cnss_pci_data *pci_priv, bool vote)
+{
+ return 0;
+}
+#endif
+
+static int cnss_pci_set_link_bandwidth(struct cnss_pci_data *pci_priv,
+ u16 link_speed, u16 link_width)
+{
+ return 0;
+}
+
+static int cnss_pci_set_max_link_speed(struct cnss_pci_data *pci_priv,
+ u32 rc_num, u16 link_speed)
+{
+ return 0;
+}
+#endif /* CONFIG_PCI_MSM */
+
+#if IS_ENABLED(CONFIG_MHI_BUS_MISC)
+static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
+{
+ mhi_debug_reg_dump(pci_priv->mhi_ctrl);
+}
+
+static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
+{
+ mhi_dump_sfr(pci_priv->mhi_ctrl);
+}
+
+static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
+ u32 cookie)
+{
+ return mhi_scan_rddm_cookie(pci_priv->mhi_ctrl, cookie);
+}
+
+static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
+ bool notify_clients)
+{
+ return mhi_pm_fast_suspend(pci_priv->mhi_ctrl, notify_clients);
+}
+
+static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
+ bool notify_clients)
+{
+ return mhi_pm_fast_resume(pci_priv->mhi_ctrl, notify_clients);
+}
+
+static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
+ u32 timeout)
+{
+ return mhi_set_m2_timeout_ms(pci_priv->mhi_ctrl, timeout);
+}
+
+static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
+ int timeout_us, bool in_panic)
+{
+ return mhi_device_get_sync_atomic(pci_priv->mhi_ctrl->mhi_dev,
+ timeout_us, in_panic);
+}
+
+static void
+cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
+ int (*cb)(struct mhi_controller *mhi_ctrl,
+ struct mhi_link_info *link_info))
+{
+ mhi_controller_set_bw_scale_cb(pci_priv->mhi_ctrl, cb);
+}
+#else
+static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
+{
+}
+
+static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
+{
+}
+
+static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
+ u32 cookie)
+{
+ return false;
+}
+
+static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
+ bool notify_clients)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
+ bool notify_clients)
+{
+ return -EOPNOTSUPP;
+}
+
+static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
+ u32 timeout)
+{
+}
+
+static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
+ int timeout_us, bool in_panic)
+{
+ return -EOPNOTSUPP;
+}
+
+static void
+cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
+ int (*cb)(struct mhi_controller *mhi_ctrl,
+ struct mhi_link_info *link_info))
+{
+}
+#endif /* CONFIG_MHI_BUS_MISC */
+
+int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv)
+{
+ u16 device_id;
+
+ if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
+ cnss_pr_dbg("%ps: PCIe link is in suspend state\n",
+ (void *)_RET_IP_);
+ return -EACCES;
+ }
+
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_err("%ps: PCIe link is down\n", (void *)_RET_IP_);
+ return -EIO;
+ }
+
+ pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id);
+ if (device_id != pci_priv->device_id) {
+ cnss_fatal_err("%ps: PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
+ (void *)_RET_IP_, device_id,
+ pci_priv->device_id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
+ u32 window_enable = WINDOW_ENABLE_BIT | window;
+ u32 val;
+
+ writel_relaxed(window_enable, pci_priv->bar +
+ QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
+
+ if (window != pci_priv->remap_window) {
+ pci_priv->remap_window = window;
+ cnss_pr_dbg("Config PCIe remap window register to 0x%x\n",
+ window_enable);
+ }
+
+ /* Read it back to make sure the write has taken effect */
+ val = readl_relaxed(pci_priv->bar + QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
+ if (val != window_enable) {
+ cnss_pr_err("Failed to config window register to 0x%x, current value: 0x%x\n",
+ window_enable, val);
+ if (!cnss_pci_check_link_status(pci_priv) &&
+ !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
+ CNSS_ASSERT(0);
+ }
+}
+
+static int cnss_pci_reg_read(struct cnss_pci_data *pci_priv,
+ u32 offset, u32 *val)
+{
+ int ret;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ if (!in_interrupt() && !irqs_disabled()) {
+ ret = cnss_pci_check_link_status(pci_priv);
+ if (ret)
+ return ret;
+ }
+
+ if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
+ offset < MAX_UNWINDOWED_ADDRESS) {
+ *val = readl_relaxed(pci_priv->bar + offset);
+ return 0;
+ }
+
+ /* If in panic, assumption is kernel panic handler will hold all threads
+ * and interrupts. Further pci_reg_window_lock could be held before
+ * panic. So only lock during normal operation.
+ */
+ if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
+ cnss_pci_select_window(pci_priv, offset);
+ *val = readl_relaxed(pci_priv->bar + WINDOW_START +
+ (offset & WINDOW_RANGE_MASK));
+ } else {
+ spin_lock_bh(&pci_reg_window_lock);
+ cnss_pci_select_window(pci_priv, offset);
+ *val = readl_relaxed(pci_priv->bar + WINDOW_START +
+ (offset & WINDOW_RANGE_MASK));
+ spin_unlock_bh(&pci_reg_window_lock);
+ }
+
+ return 0;
+}
+
+static int cnss_pci_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
+ u32 val)
+{
+ int ret;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ if (!in_interrupt() && !irqs_disabled()) {
+ ret = cnss_pci_check_link_status(pci_priv);
+ if (ret)
+ return ret;
+ }
+
+ if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
+ offset < MAX_UNWINDOWED_ADDRESS) {
+ writel_relaxed(val, pci_priv->bar + offset);
+ return 0;
+ }
+
+ /* Same constraint as PCI register read in panic */
+ if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
+ cnss_pci_select_window(pci_priv, offset);
+ writel_relaxed(val, pci_priv->bar + WINDOW_START +
+ (offset & WINDOW_RANGE_MASK));
+ } else {
+ spin_lock_bh(&pci_reg_window_lock);
+ cnss_pci_select_window(pci_priv, offset);
+ writel_relaxed(val, pci_priv->bar + WINDOW_START +
+ (offset & WINDOW_RANGE_MASK));
+ spin_unlock_bh(&pci_reg_window_lock);
+ }
+
+ return 0;
+}
+
+static int cnss_pci_force_wake_get(struct cnss_pci_data *pci_priv)
+{
+ struct device *dev = &pci_priv->pci_dev->dev;
+ int ret;
+
+ ret = cnss_pci_force_wake_request_sync(dev,
+ FORCE_WAKE_DELAY_TIMEOUT_US);
+ if (ret) {
+ if (ret != -EAGAIN)
+ cnss_pr_err("Failed to request force wake\n");
+ return ret;
+ }
+
+ /* If device's M1 state-change event races here, it can be ignored,
+ * as the device is expected to immediately move from M2 to M0
+ * without entering low power state.
+ */
+ if (cnss_pci_is_device_awake(dev) != true)
+ cnss_pr_warn("MHI not in M0, while reg still accessible\n");
+
+ return 0;
+}
+
+static int cnss_pci_force_wake_put(struct cnss_pci_data *pci_priv)
+{
+ struct device *dev = &pci_priv->pci_dev->dev;
+ int ret;
+
+ ret = cnss_pci_force_wake_release(dev);
+ if (ret && ret != -EAGAIN)
+ cnss_pr_err("Failed to release force wake\n");
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_INTERCONNECT_QCOM)
+/**
+ * cnss_setup_bus_bandwidth() - Setup interconnect vote for given bandwidth
+ * @plat_priv: Platform private data struct
+ * @bw: bandwidth
+ * @save: toggle flag to save bandwidth to current_bw_vote
+ *
+ * Setup bandwidth votes for configured interconnect paths
+ *
+ * Return: 0 for success
+ */
+static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
+ u32 bw, bool save)
+{
+ int ret = 0;
+ struct cnss_bus_bw_info *bus_bw_info;
+
+ if (!plat_priv->icc.path_count)
+ return -EOPNOTSUPP;
+
+ if (bw >= plat_priv->icc.bus_bw_cfg_count) {
+ cnss_pr_err("Invalid bus bandwidth Type: %d", bw);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(bus_bw_info, &plat_priv->icc.list_head, list) {
+ ret = icc_set_bw(bus_bw_info->icc_path,
+ bus_bw_info->cfg_table[bw].avg_bw,
+ bus_bw_info->cfg_table[bw].peak_bw);
+ if (ret) {
+ cnss_pr_err("Could not set BW Cfg: %d, err = %d ICC Path: %s Val: %d %d\n",
+ bw, ret, bus_bw_info->icc_name,
+ bus_bw_info->cfg_table[bw].avg_bw,
+ bus_bw_info->cfg_table[bw].peak_bw);
+ break;
+ }
+ }
+ if (ret == 0 && save)
+ plat_priv->icc.current_bw_vote = bw;
+ return ret;
+}
+
+int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (bandwidth < 0)
+ return -EINVAL;
+
+ return cnss_setup_bus_bandwidth(plat_priv, (u32)bandwidth, true);
+}
+#else
+static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
+ u32 bw, bool save)
+{
+ return 0;
+}
+
+int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
+{
+ return 0;
+}
+#endif
+EXPORT_SYMBOL(cnss_request_bus_bandwidth);
+
+int cnss_pci_debug_reg_read(struct cnss_pci_data *pci_priv, u32 offset,
+ u32 *val, bool raw_access)
+{
+ int ret = 0;
+ bool do_force_wake_put = true;
+
+ if (raw_access) {
+ ret = cnss_pci_reg_read(pci_priv, offset, val);
+ goto out;
+ }
+
+ ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
+ if (ret)
+ goto out;
+
+ ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
+ if (ret < 0)
+ goto runtime_pm_put;
+
+ ret = cnss_pci_force_wake_get(pci_priv);
+ if (ret)
+ do_force_wake_put = false;
+
+ ret = cnss_pci_reg_read(pci_priv, offset, val);
+ if (ret) {
+ cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
+ offset, ret);
+ goto force_wake_put;
+ }
+
+force_wake_put:
+ if (do_force_wake_put)
+ cnss_pci_force_wake_put(pci_priv);
+runtime_pm_put:
+ cnss_pci_pm_runtime_mark_last_busy(pci_priv);
+ cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
+out:
+ return ret;
+}
+
+int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
+ u32 val, bool raw_access)
+{
+ int ret = 0;
+ bool do_force_wake_put = true;
+
+ if (raw_access) {
+ ret = cnss_pci_reg_write(pci_priv, offset, val);
+ goto out;
+ }
+
+ ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
+ if (ret)
+ goto out;
+
+ ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
+ if (ret < 0)
+ goto runtime_pm_put;
+
+ ret = cnss_pci_force_wake_get(pci_priv);
+ if (ret)
+ do_force_wake_put = false;
+
+ ret = cnss_pci_reg_write(pci_priv, offset, val);
+ if (ret) {
+ cnss_pr_err("Failed to write 0x%x to register offset 0x%x, err = %d\n",
+ val, offset, ret);
+ goto force_wake_put;
+ }
+
+force_wake_put:
+ if (do_force_wake_put)
+ cnss_pci_force_wake_put(pci_priv);
+runtime_pm_put:
+ cnss_pci_pm_runtime_mark_last_busy(pci_priv);
+ cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
+out:
+ return ret;
+}
+
+static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ bool link_down_or_recovery;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ link_down_or_recovery = pci_priv->pci_link_down_ind ||
+ (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
+
+ if (save) {
+ if (link_down_or_recovery) {
+ pci_priv->saved_state = NULL;
+ } else {
+ pci_save_state(pci_dev);
+ pci_priv->saved_state = pci_store_saved_state(pci_dev);
+ }
+ } else {
+ if (link_down_or_recovery) {
+ pci_load_saved_state(pci_dev, pci_priv->default_state);
+ pci_restore_state(pci_dev);
+ } else if (pci_priv->saved_state) {
+ pci_load_and_free_saved_state(pci_dev,
+ &pci_priv->saved_state);
+ pci_restore_state(pci_dev);
+ }
+ }
+
+ return 0;
+}
+
+static int cnss_pci_get_link_status(struct cnss_pci_data *pci_priv)
+{
+ u16 link_status;
+ int ret;
+
+ ret = pcie_capability_read_word(pci_priv->pci_dev, PCI_EXP_LNKSTA,
+ &link_status);
+ if (ret)
+ return ret;
+
+ cnss_pr_dbg("Get PCI link status register: %u\n", link_status);
+
+ pci_priv->def_link_speed = link_status & PCI_EXP_LNKSTA_CLS;
+ pci_priv->def_link_width =
+ (link_status & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
+ pci_priv->cur_link_speed = pci_priv->def_link_speed;
+
+ cnss_pr_dbg("Default PCI link speed is 0x%x, link width is 0x%x\n",
+ pci_priv->def_link_speed, pci_priv->def_link_width);
+
+ return 0;
+}
+#ifndef CONFIG_WCN_GOOGLE
+static int cnss_set_pci_link_status(struct cnss_pci_data *pci_priv,
+ enum pci_link_status status)
+{
+ u16 link_speed, link_width;
+ int ret;
+
+ cnss_pr_vdbg("Set PCI link status to: %u\n", status);
+
+ switch (status) {
+ case PCI_GEN1:
+ link_speed = PCI_EXP_LNKSTA_CLS_2_5GB;
+ link_width = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
+ break;
+ case PCI_GEN2:
+ link_speed = PCI_EXP_LNKSTA_CLS_5_0GB;
+ link_width = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
+ break;
+ case PCI_DEF:
+ link_speed = pci_priv->def_link_speed;
+ link_width = pci_priv->def_link_width;
+ if (!link_speed && !link_width) {
+ cnss_pr_err("PCI link speed or width is not valid\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ cnss_pr_err("Unknown PCI link status config: %u\n", status);
+ return -EINVAL;
+ }
+
+ ret = cnss_pci_set_link_bandwidth(pci_priv, link_speed, link_width);
+ if (!ret)
+ pci_priv->cur_link_speed = link_speed;
+
+ return ret;
+}
+
+static int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
+{
+ int ret = 0, retry = 0;
+
+ cnss_pr_vdbg("%s PCI link\n", link_up ? "Resuming" : "Suspending");
+
+ if (link_up) {
+retry:
+ ret = cnss_pci_set_link_up(pci_priv);
+ if (ret && retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
+ cnss_pr_dbg("Retry PCI link training #%d\n", retry);
+ if (pci_priv->pci_link_down_ind)
+ msleep(LINK_TRAINING_RETRY_DELAY_MS * retry);
+ goto retry;
+ }
+ } else {
+ /* Since DRV suspend cannot be done in Gen 3, set it to
+ * Gen 2 if current link speed is larger than Gen 2.
+ */
+ if (pci_priv->drv_connected_last &&
+ pci_priv->cur_link_speed > PCI_EXP_LNKSTA_CLS_5_0GB)
+ cnss_set_pci_link_status(pci_priv, PCI_GEN2);
+
+ ret = cnss_pci_set_link_down(pci_priv);
+ }
+
+ if (pci_priv->drv_connected_last) {
+ if ((link_up && !ret) || (!link_up && ret))
+ cnss_set_pci_link_status(pci_priv, PCI_DEF);
+ }
+
+ return ret;
+}
+#else
+static int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
+{
+ cnss_pr_vdbg("%s PCI link\n", link_up ? "Resuming" : "Suspending");
+
+ if (link_up) {
+ return exynos_pcie_pm_resume(GOOGLE_RC_ID);
+ } else {
+ exynos_pcie_pm_suspend(GOOGLE_RC_ID);
+ return 0;
+ }
+}
+#endif
+int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
+ cnss_pr_info("PCI link is already suspended\n");
+ goto out;
+ }
+
+ pci_clear_master(pci_priv->pci_dev);
+
+ ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
+ if (ret)
+ goto out;
+
+ pci_disable_device(pci_priv->pci_dev);
+
+ if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
+ if (pci_set_power_state(pci_priv->pci_dev, PCI_D3hot))
+ cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
+ }
+
+ /* Always do PCIe L2 suspend during power off/PCIe link recovery */
+ pci_priv->drv_connected_last = 0;
+
+ ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
+ if (ret)
+ goto out;
+
+ pci_priv->pci_link_state = PCI_LINK_DOWN;
+
+ return 0;
+out:
+ return ret;
+}
+
+int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (pci_priv->pci_link_state == PCI_LINK_UP) {
+ cnss_pr_info("PCI link is already resumed\n");
+ goto out;
+ }
+
+ ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
+ if (ret) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ pci_priv->pci_link_state = PCI_LINK_UP;
+
+ if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
+ ret = pci_set_power_state(pci_priv->pci_dev, PCI_D0);
+ if (ret) {
+ cnss_pr_err("Failed to set D0, err = %d\n", ret);
+ goto out;
+ }
+ }
+
+ ret = pci_enable_device(pci_priv->pci_dev);
+ if (ret) {
+ cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
+ if (ret)
+ goto out;
+
+ pci_set_master(pci_priv->pci_dev);
+
+ if (pci_priv->pci_link_down_ind)
+ pci_priv->pci_link_down_ind = false;
+
+ return 0;
+out:
+ return ret;
+}
+
+int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv)
+{
+ int ret;
+
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Always wait here to avoid missing WAKE assert for RDDM
+ * before link recovery
+ */
+ msleep(WAKE_EVENT_TIMEOUT);
+
+ mhi_irq_setup(pci_priv->mhi_ctrl, false);
+
+ ret = cnss_suspend_pci_link(pci_priv);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+
+ ret = cnss_resume_pci_link(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+ del_timer(&pci_priv->dev_rddm_timer);
+ return ret;
+ }
+
+ mod_timer(&pci_priv->dev_rddm_timer,
+ jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
+
+ mhi_irq_setup(pci_priv->mhi_ctrl, true);
+
+ cnss_mhi_debug_reg_dump(pci_priv);
+
+ return 0;
+}
+
+#ifndef CONFIG_WCN_GOOGLE
+int cnss_pci_prevent_l1(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ int ret;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
+ cnss_pr_dbg("PCIe link is in suspend state\n");
+ return -EIO;
+ }
+
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_err("PCIe link is down\n");
+ return -EIO;
+ }
+
+ ret = _cnss_pci_prevent_l1(pci_priv);
+ if (ret == -EIO) {
+ cnss_pr_err("Failed to prevent PCIe L1, considered as link down\n");
+ cnss_pci_link_down(dev);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cnss_pci_prevent_l1);
+
+void cnss_pci_allow_l1(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL\n");
+ return;
+ }
+
+ if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
+ cnss_pr_dbg("PCIe link is in suspend state\n");
+ return;
+ }
+
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_err("PCIe link is down\n");
+ return;
+ }
+
+ _cnss_pci_allow_l1(pci_priv);
+}
+EXPORT_SYMBOL(cnss_pci_allow_l1);
+#else
+int cnss_pci_prevent_l1(struct device *dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL(cnss_pci_prevent_l1);
+
+void cnss_pci_allow_l1(struct device *dev)
+{
+ return;
+}
+EXPORT_SYMBOL(cnss_pci_allow_l1);
+#endif
+
+static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
+ enum cnss_bus_event_type type,
+ void *data)
+{
+ struct cnss_bus_event bus_event;
+
+ bus_event.etype = type;
+ bus_event.event_data = data;
+ cnss_pci_call_driver_uevent(pci_priv, CNSS_BUS_EVENT, &bus_event);
+}
+
+static void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ unsigned long flags;
+
+ if (test_bit(ENABLE_PCI_LINK_DOWN_PANIC,
+ &plat_priv->ctrl_params.quirks))
+ panic("cnss: PCI link is down\n");
+
+ spin_lock_irqsave(&pci_link_down_lock, flags);
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_dbg("PCI link down recovery is in progress, ignore\n");
+ spin_unlock_irqrestore(&pci_link_down_lock, flags);
+ return;
+ }
+ pci_priv->pci_link_down_ind = true;
+ spin_unlock_irqrestore(&pci_link_down_lock, flags);
+
+ if (pci_dev->device == QCA6174_DEVICE_ID)
+ disable_irq(pci_dev->irq);
+
+ /* Notify bus related event. Now for all supported chips.
+ * Here PCIe LINK_DOWN notification taken care.
+ * uevent buffer can be extended later, to cover more bus info.
+ */
+ cnss_pci_update_link_event(pci_priv, BUS_EVENT_PCI_LINK_DOWN, NULL);
+
+ cnss_fatal_err("PCI link down, schedule recovery\n");
+ cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
+}
+
+int cnss_pci_link_down(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv = NULL;
+ int ret;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL\n");
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_WCN_GOOGLE
+ //exynos_pcie_set_perst(GOOGLE_RC_ID, false);
+ exynos_pcie_set_perst_gpio(GOOGLE_RC_ID, false);
+#endif
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (pci_priv->drv_connected_last &&
+ of_property_read_bool(plat_priv->plat_dev->dev.of_node,
+ "cnss-enable-self-recovery"))
+ plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
+
+ cnss_pr_err("PCI link down is detected by drivers\n");
+
+ ret = cnss_pci_assert_perst(pci_priv);
+ if (ret)
+ cnss_pci_handle_linkdown(pci_priv);
+
+ return ret;
+}
+EXPORT_SYMBOL(cnss_pci_link_down);
+
+int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ return test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) |
+ pci_priv->pci_link_down_ind;
+}
+
+int cnss_pci_is_device_down(struct device *dev)
+{
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+
+ return cnss_pcie_is_device_down(pci_priv);
+}
+EXPORT_SYMBOL(cnss_pci_is_device_down);
+
+void cnss_pci_lock_reg_window(struct device *dev, unsigned long *flags)
+{
+ spin_lock_bh(&pci_reg_window_lock);
+}
+EXPORT_SYMBOL(cnss_pci_lock_reg_window);
+
+void cnss_pci_unlock_reg_window(struct device *dev, unsigned long *flags)
+{
+ spin_unlock_bh(&pci_reg_window_lock);
+}
+EXPORT_SYMBOL(cnss_pci_unlock_reg_window);
+
+/**
+ * cnss_pci_dump_bl_sram_mem - Dump WLAN device bootloader debug log
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * Dump primary and secondary bootloader debug log data. For SBL check the
+ * log struct address and size for validity.
+ *
+ * Return: None
+ */
+static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ u32 mem_addr, val, pbl_log_max_size, sbl_log_max_size;
+ u32 sbl_log_def_start, sbl_log_def_end;
+ u32 pbl_stage, sbl_log_start, sbl_log_size;
+ u32 pbl_wlan_boot_cfg, pbl_bootstrap_status;
+ u32 pbl_bootstrap_status_reg = PBL_BOOTSTRAP_STATUS;
+ u32 pbl_log_sram_start_reg = DEBUG_PBL_LOG_SRAM_START;
+ int i;
+
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ pbl_log_max_size = QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
+ sbl_log_max_size = QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
+ sbl_log_def_start = QCA6390_V2_SBL_DATA_START;
+ sbl_log_def_end = QCA6390_V2_SBL_DATA_END;
+ case QCA6490_DEVICE_ID:
+ pbl_log_max_size = QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
+ sbl_log_max_size = QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
+ if (plat_priv->device_version.major_version == FW_V2_NUMBER) {
+ sbl_log_def_start = QCA6490_V2_SBL_DATA_START;
+ sbl_log_def_end = QCA6490_V2_SBL_DATA_END;
+ } else {
+ sbl_log_def_start = QCA6490_V1_SBL_DATA_START;
+ sbl_log_def_end = QCA6490_V1_SBL_DATA_END;
+ }
+ break;
+ case WCN7850_DEVICE_ID:
+ pbl_bootstrap_status_reg = WCN7850_PBL_BOOTSTRAP_STATUS;
+ pbl_log_sram_start_reg = WCN7850_DEBUG_PBL_LOG_SRAM_START;
+ pbl_log_max_size = WCN7850_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
+ sbl_log_max_size = WCN7850_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
+ sbl_log_def_start = WCN7850_SBL_DATA_START;
+ sbl_log_def_end = WCN7850_SBL_DATA_END;
+ default:
+ return;
+ }
+
+ if (cnss_pci_check_link_status(pci_priv))
+ return;
+
+ cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
+ cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
+ cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
+ cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
+ cnss_pci_reg_read(pci_priv, pbl_bootstrap_status_reg,
+ &pbl_bootstrap_status);
+ cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x\n",
+ pbl_stage, sbl_log_start, sbl_log_size);
+ cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x PBL_BOOTSTRAP_STATUS: 0x%08x\n",
+ pbl_wlan_boot_cfg, pbl_bootstrap_status);
+
+ cnss_pr_dbg("Dumping PBL log data\n");
+ for (i = 0; i < pbl_log_max_size; i += sizeof(val)) {
+ mem_addr = pbl_log_sram_start_reg + i;
+ if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
+ break;
+ cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
+ }
+
+ sbl_log_size = (sbl_log_size > sbl_log_max_size ?
+ sbl_log_max_size : sbl_log_size);
+ if (sbl_log_start < sbl_log_def_start ||
+ sbl_log_start > sbl_log_def_end ||
+ (sbl_log_start + sbl_log_size) > sbl_log_def_end) {
+ cnss_pr_err("Invalid SBL log data\n");
+ return;
+ }
+
+ cnss_pr_dbg("Dumping SBL log data\n");
+ for (i = 0; i < sbl_log_size; i += sizeof(val)) {
+ mem_addr = sbl_log_start + i;
+ if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
+ break;
+ cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
+ }
+}
+
+static int cnss_pci_handle_mhi_poweron_timeout(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ cnss_fatal_err("MHI power up returns timeout\n");
+
+ if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE)) {
+ /* Wait for RDDM if RDDM cookie is set. If RDDM times out,
+ * PBL/SBL error region may have been erased so no need to
+ * dump them either.
+ */
+ if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
+ !pci_priv->pci_link_down_ind) {
+ mod_timer(&pci_priv->dev_rddm_timer,
+ jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
+ }
+ } else {
+ cnss_pr_dbg("RDDM cookie is not set\n");
+ cnss_mhi_debug_reg_dump(pci_priv);
+ /* Dump PBL/SBL error log if RDDM cookie is not set */
+ cnss_pci_dump_bl_sram_mem(pci_priv);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
+{
+ switch (mhi_state) {
+ case CNSS_MHI_INIT:
+ return "INIT";
+ case CNSS_MHI_DEINIT:
+ return "DEINIT";
+ case CNSS_MHI_POWER_ON:
+ return "POWER_ON";
+ case CNSS_MHI_POWERING_OFF:
+ return "POWERING_OFF";
+ case CNSS_MHI_POWER_OFF:
+ return "POWER_OFF";
+ case CNSS_MHI_FORCE_POWER_OFF:
+ return "FORCE_POWER_OFF";
+ case CNSS_MHI_SUSPEND:
+ return "SUSPEND";
+ case CNSS_MHI_RESUME:
+ return "RESUME";
+ case CNSS_MHI_TRIGGER_RDDM:
+ return "TRIGGER_RDDM";
+ case CNSS_MHI_RDDM_DONE:
+ return "RDDM_DONE";
+ default:
+ return "UNKNOWN";
+ }
+};
+
+static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
+ enum cnss_mhi_state mhi_state)
+{
+ switch (mhi_state) {
+ case CNSS_MHI_INIT:
+ if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_DEINIT:
+ case CNSS_MHI_POWER_ON:
+ if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
+ !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_FORCE_POWER_OFF:
+ if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_POWER_OFF:
+ case CNSS_MHI_SUSPEND:
+ if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
+ !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_RESUME:
+ if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_TRIGGER_RDDM:
+ if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
+ !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_RDDM_DONE:
+ return 0;
+ default:
+ cnss_pr_err("Unhandled MHI state: %s(%d)\n",
+ cnss_mhi_state_to_str(mhi_state), mhi_state);
+ }
+
+ cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
+ cnss_mhi_state_to_str(mhi_state), mhi_state,
+ pci_priv->mhi_state);
+ if (mhi_state != CNSS_MHI_TRIGGER_RDDM)
+ CNSS_ASSERT(0);
+
+ return -EINVAL;
+}
+
+static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
+ enum cnss_mhi_state mhi_state)
+{
+ switch (mhi_state) {
+ case CNSS_MHI_INIT:
+ set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_DEINIT:
+ clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_POWER_ON:
+ set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_POWERING_OFF:
+ set_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_POWER_OFF:
+ case CNSS_MHI_FORCE_POWER_OFF:
+ clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+ clear_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
+ clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
+ clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_SUSPEND:
+ set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_RESUME:
+ clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_TRIGGER_RDDM:
+ set_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_RDDM_DONE:
+ set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
+ break;
+ default:
+ cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
+ }
+}
+
+static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
+ enum cnss_mhi_state mhi_state)
+{
+ int ret = 0, retry = 0;
+
+ if (pci_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (mhi_state < 0) {
+ cnss_pr_err("Invalid MHI state (%d)\n", mhi_state);
+ return -EINVAL;
+ }
+
+ ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
+ if (ret)
+ goto out;
+
+ cnss_pr_vdbg("Setting MHI state: %s(%d)\n",
+ cnss_mhi_state_to_str(mhi_state), mhi_state);
+
+ switch (mhi_state) {
+ case CNSS_MHI_INIT:
+ ret = mhi_prepare_for_power_up(pci_priv->mhi_ctrl);
+ break;
+ case CNSS_MHI_DEINIT:
+ mhi_unprepare_after_power_down(pci_priv->mhi_ctrl);
+ ret = 0;
+ break;
+ case CNSS_MHI_POWER_ON:
+ ret = mhi_sync_power_up(pci_priv->mhi_ctrl);
+#if IS_ENABLED(CONFIG_MHI_BUS_MISC)
+ /* Only set img_pre_alloc when power up succeeds */
+ if (!ret && !pci_priv->mhi_ctrl->img_pre_alloc) {
+ cnss_pr_dbg("Notify MHI to use already allocated images\n");
+ pci_priv->mhi_ctrl->img_pre_alloc = true;
+ }
+#endif
+ break;
+ case CNSS_MHI_POWER_OFF:
+ mhi_power_down(pci_priv->mhi_ctrl, true);
+ ret = 0;
+ break;
+ case CNSS_MHI_FORCE_POWER_OFF:
+ mhi_power_down(pci_priv->mhi_ctrl, false);
+ ret = 0;
+ break;
+ case CNSS_MHI_SUSPEND:
+retry_mhi_suspend:
+ mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
+ if (pci_priv->drv_connected_last)
+ ret = cnss_mhi_pm_fast_suspend(pci_priv, true);
+ else
+ ret = mhi_pm_suspend(pci_priv->mhi_ctrl);
+ mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
+ if (ret == -EBUSY && retry++ < MHI_SUSPEND_RETRY_MAX_TIMES) {
+ cnss_pr_dbg("Retry MHI suspend #%d\n", retry);
+ usleep_range(MHI_SUSPEND_RETRY_DELAY_US,
+ MHI_SUSPEND_RETRY_DELAY_US + 1000);
+ goto retry_mhi_suspend;
+ }
+ break;
+ case CNSS_MHI_RESUME:
+ mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
+ if (pci_priv->drv_connected_last) {
+ cnss_pci_prevent_l1(&pci_priv->pci_dev->dev);
+ ret = cnss_mhi_pm_fast_resume(pci_priv, true);
+ cnss_pci_allow_l1(&pci_priv->pci_dev->dev);
+ } else {
+ ret = mhi_pm_resume(pci_priv->mhi_ctrl);
+ }
+ mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
+ break;
+ case CNSS_MHI_TRIGGER_RDDM:
+ ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
+ break;
+ case CNSS_MHI_RDDM_DONE:
+ break;
+ default:
+ cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto out;
+
+ cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
+
+ return 0;
+
+out:
+ cnss_pr_err("Failed to set MHI state: %s(%d), err = %d\n",
+ cnss_mhi_state_to_str(mhi_state), mhi_state, ret);
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_PCI_MSM)
+/**
+ * cnss_wlan_adsp_pc_enable: Control ADSP power collapse setup
+ * @dev: Platform driver pci private data structure
+ * @control: Power collapse enable / disable
+ *
+ * This function controls ADSP power collapse (PC). It must be called
+ * based on wlan state. ADSP power collapse during wlan RTPM suspend state
+ * results in delay during periodic QMI stats PCI link up/down. This delay
+ * causes additional power consumption.
+ * Introduced in SM8350.
+ *
+ * Result: 0 Success. negative error codes.
+ */
+static int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv,
+ bool control)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ int ret = 0;
+ u32 pm_options = PM_OPTIONS_DEFAULT;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ if (plat_priv->adsp_pc_enabled == control) {
+ cnss_pr_dbg("ADSP power collapse already %s\n",
+ control ? "Enabled" : "Disabled");
+ return 0;
+ }
+
+ if (control)
+ pm_options &= ~MSM_PCIE_CONFIG_NO_DRV_PC;
+ else
+ pm_options |= MSM_PCIE_CONFIG_NO_DRV_PC;
+
+ ret = msm_pcie_pm_control(MSM_PCIE_DRV_PC_CTRL, pci_dev->bus->number,
+ pci_dev, NULL, pm_options);
+ if (ret)
+ return ret;
+
+ cnss_pr_dbg("%s ADSP power collapse\n", control ? "Enable" : "Disable");
+ plat_priv->adsp_pc_enabled = control;
+ return 0;
+}
+#else
+static int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv,
+ bool control)
+{
+ return 0;
+}
+#endif
+
+int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv;
+ unsigned int timeout = 0;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ plat_priv = pci_priv->plat_priv;
+ if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
+ return 0;
+
+ if (MHI_TIMEOUT_OVERWRITE_MS)
+ pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS;
+ cnss_mhi_set_m2_timeout_ms(pci_priv, MHI_M2_TIMEOUT_MS);
+
+ ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
+ if (ret)
+ return ret;
+
+ timeout = pci_priv->mhi_ctrl->timeout_ms;
+ /* For non-perf builds the timeout is 10 (default) * 6 seconds */
+ if (cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
+ pci_priv->mhi_ctrl->timeout_ms *= 6;
+ else /* For perf builds the timeout is 10 (default) * 3 seconds */
+ pci_priv->mhi_ctrl->timeout_ms *= 3;
+
+ /* Start the timer to dump MHI/PBL/SBL debug data periodically */
+ mod_timer(&pci_priv->boot_debug_timer,
+ jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
+
+ ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
+ del_timer(&pci_priv->boot_debug_timer);
+ if (ret == 0)
+ cnss_wlan_adsp_pc_enable(pci_priv, false);
+
+ pci_priv->mhi_ctrl->timeout_ms = timeout;
+
+ if (ret == -ETIMEDOUT) {
+ /* This is a special case needs to be handled that if MHI
+ * power on returns -ETIMEDOUT, controller needs to take care
+ * the cleanup by calling MHI power down. Force to set the bit
+ * for driver internal MHI state to make sure it can be handled
+ * properly later.
+ */
+ set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+ ret = cnss_pci_handle_mhi_poweron_timeout(pci_priv);
+ }
+
+ return ret;
+}
+
+static void cnss_pci_power_off_mhi(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
+ return;
+
+ if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) {
+ cnss_pr_dbg("MHI is already powered off\n");
+ return;
+ }
+ cnss_wlan_adsp_pc_enable(pci_priv, true);
+ cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
+ cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_POWERING_OFF);
+
+ if (!pci_priv->pci_link_down_ind)
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
+ else
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_FORCE_POWER_OFF);
+}
+
+static void cnss_pci_deinit_mhi(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
+ return;
+
+ if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) {
+ cnss_pr_dbg("MHI is already deinited\n");
+ return;
+ }
+
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
+}
+
+static void cnss_pci_set_wlaon_pwr_ctrl(struct cnss_pci_data *pci_priv,
+ bool set_vddd4blow, bool set_shutdown,
+ bool do_force_wake)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ int ret;
+ u32 val;
+
+ if (!plat_priv->set_wlaon_pwr_ctrl)
+ return;
+
+ if (pci_priv->pci_link_state == PCI_LINK_DOWN ||
+ pci_priv->pci_link_down_ind)
+ return;
+
+ if (do_force_wake)
+ if (cnss_pci_force_wake_get(pci_priv))
+ return;
+
+ ret = cnss_pci_reg_read(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, &val);
+ if (ret) {
+ cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
+ WLAON_QFPROM_PWR_CTRL_REG, ret);
+ goto force_wake_put;
+ }
+
+ cnss_pr_dbg("Read register offset 0x%x, val = 0x%x\n",
+ WLAON_QFPROM_PWR_CTRL_REG, val);
+
+ if (set_vddd4blow)
+ val |= QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
+ else
+ val &= ~QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
+
+ if (set_shutdown)
+ val |= QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
+ else
+ val &= ~QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
+
+ ret = cnss_pci_reg_write(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, val);
+ if (ret) {
+ cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
+ WLAON_QFPROM_PWR_CTRL_REG, ret);
+ goto force_wake_put;
+ }
+
+ cnss_pr_dbg("Write val 0x%x to register offset 0x%x\n", val,
+ WLAON_QFPROM_PWR_CTRL_REG);
+
+ if (set_shutdown)
+ usleep_range(WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US,
+ WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US);
+
+force_wake_put:
+ if (do_force_wake)
+ cnss_pci_force_wake_put(pci_priv);
+}
+
+static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv,
+ u64 *time_us)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ u32 low, high;
+ u64 device_ticks;
+
+ if (!plat_priv->device_freq_hz) {
+ cnss_pr_err("Device time clock frequency is not valid\n");
+ return -EINVAL;
+ }
+
+ cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL3, &low);
+ cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL4, &high);
+
+ device_ticks = (u64)high << 32 | low;
+ do_div(device_ticks, plat_priv->device_freq_hz / 100000);
+ *time_us = device_ticks * 10;
+
+ return 0;
+}
+
+static void cnss_pci_enable_time_sync_counter(struct cnss_pci_data *pci_priv)
+{
+ cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
+ TIME_SYNC_ENABLE);
+}
+
+static void cnss_pci_clear_time_sync_counter(struct cnss_pci_data *pci_priv)
+{
+ cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
+ TIME_SYNC_CLEAR);
+}
+
+static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct device *dev = &pci_priv->pci_dev->dev;
+ unsigned long flags = 0;
+ u64 host_time_us, device_time_us, offset;
+ u32 low, high;
+ int ret;
+
+ ret = cnss_pci_prevent_l1(dev);
+ if (ret)
+ goto out;
+
+ ret = cnss_pci_force_wake_get(pci_priv);
+ if (ret)
+ goto allow_l1;
+
+ spin_lock_irqsave(&time_sync_lock, flags);
+ cnss_pci_clear_time_sync_counter(pci_priv);
+ cnss_pci_enable_time_sync_counter(pci_priv);
+ host_time_us = cnss_get_host_timestamp(plat_priv);
+ ret = cnss_pci_get_device_timestamp(pci_priv, &device_time_us);
+ cnss_pci_clear_time_sync_counter(pci_priv);
+ spin_unlock_irqrestore(&time_sync_lock, flags);
+ if (ret)
+ goto force_wake_put;
+
+ if (host_time_us < device_time_us) {
+ cnss_pr_err("Host time (%llu us) is smaller than device time (%llu us), stop\n",
+ host_time_us, device_time_us);
+ ret = -EINVAL;
+ goto force_wake_put;
+ }
+
+ offset = host_time_us - device_time_us;
+ cnss_pr_dbg("Host time = %llu us, device time = %llu us, offset = %llu us\n",
+ host_time_us, device_time_us, offset);
+
+ low = offset & 0xFFFFFFFF;
+ high = offset >> 32;
+
+ cnss_pci_reg_write(pci_priv, PCIE_SHADOW_REG_VALUE_34, low);
+ cnss_pci_reg_write(pci_priv, PCIE_SHADOW_REG_VALUE_35, high);
+
+ cnss_pci_reg_read(pci_priv, PCIE_SHADOW_REG_VALUE_34, &low);
+ cnss_pci_reg_read(pci_priv, PCIE_SHADOW_REG_VALUE_35, &high);
+
+ cnss_pr_dbg("Updated time sync regs [0x%x] = 0x%x, [0x%x] = 0x%x\n",
+ PCIE_SHADOW_REG_VALUE_34, low,
+ PCIE_SHADOW_REG_VALUE_35, high);
+
+force_wake_put:
+ cnss_pci_force_wake_put(pci_priv);
+allow_l1:
+ cnss_pci_allow_l1(dev);
+out:
+ return ret;
+}
+
+static void cnss_pci_time_sync_work_hdlr(struct work_struct *work)
+{
+ struct cnss_pci_data *pci_priv =
+ container_of(work, struct cnss_pci_data, time_sync_work.work);
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ unsigned int time_sync_period_ms =
+ plat_priv->ctrl_params.time_sync_period;
+
+ if (test_bit(DISABLE_TIME_SYNC, &plat_priv->ctrl_params.quirks)) {
+ cnss_pr_dbg("Time sync is disabled\n");
+ return;
+ }
+
+ if (!time_sync_period_ms) {
+ cnss_pr_dbg("Skip time sync as time period is 0\n");
+ return;
+ }
+
+ if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
+ return;
+
+ if (cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS) < 0)
+ goto runtime_pm_put;
+
+ mutex_lock(&pci_priv->bus_lock);
+ cnss_pci_update_timestamp(pci_priv);
+ mutex_unlock(&pci_priv->bus_lock);
+ schedule_delayed_work(&pci_priv->time_sync_work,
+ msecs_to_jiffies(time_sync_period_ms));
+
+runtime_pm_put:
+ cnss_pci_pm_runtime_mark_last_busy(pci_priv);
+ cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
+}
+
+static int cnss_pci_start_time_sync_update(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (!plat_priv->device_freq_hz) {
+ cnss_pr_dbg("Device time clock frequency is not valid, skip time sync\n");
+ return -EINVAL;
+ }
+
+ cnss_pci_time_sync_work_hdlr(&pci_priv->time_sync_work.work);
+
+ return 0;
+}
+
+static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv)
+{
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ break;
+ default:
+ return;
+ }
+
+ cancel_delayed_work_sync(&pci_priv->time_sync_work);
+}
+
+int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ plat_priv = pci_priv->plat_priv;
+
+ if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ cnss_pr_dbg("Skip driver probe\n");
+ goto out;
+ }
+
+ if (!pci_priv->driver_ops) {
+ cnss_pr_err("driver_ops is NULL\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+ test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
+ ret = pci_priv->driver_ops->reinit(pci_priv->pci_dev,
+ pci_priv->pci_device_id);
+ if (ret) {
+ cnss_pr_err("Failed to reinit host driver, err = %d\n",
+ ret);
+ goto out;
+ }
+ complete(&plat_priv->recovery_complete);
+ } else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
+ ret = pci_priv->driver_ops->probe(pci_priv->pci_dev,
+ pci_priv->pci_device_id);
+ if (ret) {
+ cnss_pr_err("Failed to probe host driver, err = %d\n",
+ ret);
+ goto out;
+ }
+ clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+ set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+ complete_all(&plat_priv->power_up_complete);
+ } else if (test_bit(CNSS_DRIVER_IDLE_RESTART,
+ &plat_priv->driver_state)) {
+ ret = pci_priv->driver_ops->idle_restart(pci_priv->pci_dev,
+ pci_priv->pci_device_id);
+ if (ret) {
+ cnss_pr_err("Failed to idle restart host driver, err = %d\n",
+ ret);
+ plat_priv->power_up_error = ret;
+ complete_all(&plat_priv->power_up_complete);
+ goto out;
+ }
+ clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
+ complete_all(&plat_priv->power_up_complete);
+ } else {
+ complete(&plat_priv->power_up_complete);
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ __pm_relax(plat_priv->recovery_ws);
+ }
+
+ cnss_pci_start_time_sync_update(pci_priv);
+
+ return 0;
+
+out:
+ return ret;
+}
+
+int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv;
+ int ret;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ plat_priv = pci_priv->plat_priv;
+
+ if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) ||
+ test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Skip driver remove\n");
+ return 0;
+ }
+
+ if (!pci_priv->driver_ops) {
+ cnss_pr_err("driver_ops is NULL\n");
+ return -EINVAL;
+ }
+
+ cnss_pci_stop_time_sync_update(pci_priv);
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+ test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
+ pci_priv->driver_ops->shutdown(pci_priv->pci_dev);
+ } else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+ pci_priv->driver_ops->remove(pci_priv->pci_dev);
+ clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+ } else if (test_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
+ &plat_priv->driver_state)) {
+ ret = pci_priv->driver_ops->idle_shutdown(pci_priv->pci_dev);
+ if (ret == -EAGAIN) {
+ clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
+ &plat_priv->driver_state);
+ return ret;
+ }
+ }
+
+ plat_priv->get_info_cb_ctx = NULL;
+ plat_priv->get_info_cb = NULL;
+
+ return 0;
+}
+
+int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
+ int modem_current_status)
+{
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ driver_ops = pci_priv->driver_ops;
+ if (!driver_ops || !driver_ops->modem_status)
+ return -EINVAL;
+
+ driver_ops->modem_status(pci_priv->pci_dev, modem_current_status);
+
+ return 0;
+}
+
+int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
+ enum cnss_driver_status status)
+{
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ driver_ops = pci_priv->driver_ops;
+ if (!driver_ops || !driver_ops->update_status)
+ return -EINVAL;
+
+ cnss_pr_dbg("Update driver status: %d\n", status);
+
+ driver_ops->update_status(pci_priv->pci_dev, status);
+
+ return 0;
+}
+
+static void cnss_pci_misc_reg_dump(struct cnss_pci_data *pci_priv,
+ struct cnss_misc_reg *misc_reg,
+ u32 misc_reg_size,
+ char *reg_name)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ bool do_force_wake_put = true;
+ int i;
+
+ if (!misc_reg)
+ return;
+
+ if (in_interrupt() || irqs_disabled())
+ return;
+
+ if (cnss_pci_check_link_status(pci_priv))
+ return;
+
+ if (cnss_pci_force_wake_get(pci_priv)) {
+ /* Continue to dump when device has entered RDDM already */
+ if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
+ return;
+ do_force_wake_put = false;
+ }
+
+ cnss_pr_dbg("Start to dump %s registers\n", reg_name);
+
+ for (i = 0; i < misc_reg_size; i++) {
+ if (!test_bit(pci_priv->misc_reg_dev_mask,
+ &misc_reg[i].dev_mask))
+ continue;
+
+ if (misc_reg[i].wr) {
+ if (misc_reg[i].offset ==
+ QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG &&
+ i >= 1)
+ misc_reg[i].val =
+ QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK |
+ misc_reg[i - 1].val;
+ if (cnss_pci_reg_write(pci_priv,
+ misc_reg[i].offset,
+ misc_reg[i].val))
+ goto force_wake_put;
+ cnss_pr_vdbg("Write 0x%X to 0x%X\n",
+ misc_reg[i].val,
+ misc_reg[i].offset);
+
+ } else {
+ if (cnss_pci_reg_read(pci_priv,
+ misc_reg[i].offset,
+ &misc_reg[i].val))
+ goto force_wake_put;
+ }
+ }
+
+force_wake_put:
+ if (do_force_wake_put)
+ cnss_pci_force_wake_put(pci_priv);
+}
+
+static void cnss_pci_dump_misc_reg(struct cnss_pci_data *pci_priv)
+{
+ if (in_interrupt() || irqs_disabled())
+ return;
+
+ if (cnss_pci_check_link_status(pci_priv))
+ return;
+
+ cnss_pci_misc_reg_dump(pci_priv, pci_priv->wcss_reg,
+ WCSS_REG_SIZE, "wcss");
+ cnss_pci_misc_reg_dump(pci_priv, pci_priv->pcie_reg,
+ PCIE_REG_SIZE, "pcie");
+ cnss_pci_misc_reg_dump(pci_priv, pci_priv->wlaon_reg,
+ WLAON_REG_SIZE, "wlaon");
+ cnss_pci_misc_reg_dump(pci_priv, pci_priv->syspm_reg,
+ SYSPM_REG_SIZE, "syspm");
+}
+
+static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
+{
+ int i, j = 0, array_size = SHADOW_REG_COUNT + SHADOW_REG_INTER_COUNT;
+ u32 reg_offset;
+ bool do_force_wake_put = true;
+
+ if (in_interrupt() || irqs_disabled())
+ return;
+
+ if (cnss_pci_check_link_status(pci_priv))
+ return;
+
+ if (!pci_priv->debug_reg) {
+ pci_priv->debug_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
+ sizeof(*pci_priv->debug_reg)
+ * array_size, GFP_KERNEL);
+ if (!pci_priv->debug_reg)
+ return;
+ }
+
+ if (cnss_pci_force_wake_get(pci_priv))
+ do_force_wake_put = false;
+
+ cnss_pr_dbg("Start to dump shadow registers\n");
+
+ for (i = 0; i < SHADOW_REG_COUNT; i++, j++) {
+ reg_offset = PCIE_SHADOW_REG_VALUE_0 + i * 4;
+ pci_priv->debug_reg[j].offset = reg_offset;
+ if (cnss_pci_reg_read(pci_priv, reg_offset,
+ &pci_priv->debug_reg[j].val))
+ goto force_wake_put;
+ }
+
+ for (i = 0; i < SHADOW_REG_INTER_COUNT; i++, j++) {
+ reg_offset = PCIE_SHADOW_REG_INTER_0 + i * 4;
+ pci_priv->debug_reg[j].offset = reg_offset;
+ if (cnss_pci_reg_read(pci_priv, reg_offset,
+ &pci_priv->debug_reg[j].val))
+ goto force_wake_put;
+ }
+
+force_wake_put:
+ if (do_force_wake_put)
+ cnss_pci_force_wake_put(pci_priv);
+}
+
+static int cnss_qca6174_powerup(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ ret = cnss_power_on_device(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to power on device, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_resume_pci_link(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = cnss_pci_call_driver_probe(pci_priv);
+ if (ret)
+ goto suspend_link;
+
+ return 0;
+suspend_link:
+ cnss_suspend_pci_link(pci_priv);
+power_off:
+ cnss_power_off_device(plat_priv);
+out:
+ return ret;
+}
+
+static int cnss_qca6174_shutdown(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ cnss_pci_pm_runtime_resume(pci_priv);
+
+ ret = cnss_pci_call_driver_remove(pci_priv);
+ if (ret == -EAGAIN)
+ goto out;
+
+ cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
+ CNSS_BUS_WIDTH_NONE);
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ cnss_pci_set_auto_suspended(pci_priv, 0);
+
+ ret = cnss_suspend_pci_link(pci_priv);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+
+ cnss_power_off_device(plat_priv);
+
+ clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+ clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
+
+out:
+ return ret;
+}
+
+static void cnss_qca6174_crash_shutdown(struct cnss_pci_data *pci_priv)
+{
+ if (pci_priv->driver_ops && pci_priv->driver_ops->crash_shutdown)
+ pci_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
+}
+
+static int cnss_qca6174_ramdump(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_ramdump_info *ramdump_info;
+
+ ramdump_info = &plat_priv->ramdump_info;
+ if (!ramdump_info->ramdump_size)
+ return -EINVAL;
+
+ return cnss_do_ramdump(plat_priv);
+}
+
+static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ unsigned int timeout;
+ int retry = 0;
+
+ if (plat_priv->ramdump_info_v2.dump_data_valid) {
+ cnss_pci_clear_dump_info(pci_priv);
+ cnss_pci_power_off_mhi(pci_priv);
+ cnss_suspend_pci_link(pci_priv);
+ cnss_pci_deinit_mhi(pci_priv);
+ cnss_power_off_device(plat_priv);
+ }
+
+ /* Clear QMI send usage count during every power up */
+ pci_priv->qmi_send_usage_count = 0;
+
+ plat_priv->power_up_error = 0;
+retry:
+ ret = cnss_power_on_device(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to power on device, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_resume_pci_link(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+ if (test_bit(IGNORE_PCI_LINK_FAILURE,
+ &plat_priv->ctrl_params.quirks)) {
+ cnss_pr_dbg("Ignore PCI link resume failure\n");
+ ret = 0;
+ goto out;
+ }
+ if (ret == -EAGAIN && retry++ < POWER_ON_RETRY_MAX_TIMES) {
+ cnss_power_off_device(plat_priv);
+ cnss_pr_dbg("Retry to resume PCI link #%d\n", retry);
+ msleep(POWER_ON_RETRY_DELAY_MS * retry);
+ goto retry;
+ }
+ /* Assert when it reaches maximum retries */
+ CNSS_ASSERT(0);
+ goto power_off;
+ }
+
+ cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false);
+ timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI);
+
+ ret = cnss_pci_start_mhi(pci_priv);
+ if (ret) {
+ cnss_fatal_err("Failed to start MHI, err = %d\n", ret);
+ if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
+ !pci_priv->pci_link_down_ind && timeout) {
+ /* Start recovery directly for MHI start failures */
+ cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+ CNSS_REASON_DEFAULT);
+ }
+ return 0;
+ }
+
+ if (test_bit(USE_CORE_ONLY_FW, &plat_priv->ctrl_params.quirks)) {
+ clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ return 0;
+ }
+
+ cnss_set_pin_connect_status(plat_priv);
+
+ if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) {
+ ret = cnss_pci_call_driver_probe(pci_priv);
+ if (ret)
+ goto stop_mhi;
+ } else if (timeout) {
+ if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state))
+ timeout += WLAN_COLD_BOOT_CAL_TIMEOUT;
+ else
+ timeout += WLAN_MISSION_MODE_TIMEOUT;
+ mod_timer(&plat_priv->fw_boot_timer,
+ jiffies + msecs_to_jiffies(timeout));
+ }
+
+ return 0;
+
+stop_mhi:
+ cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, true);
+ cnss_pci_power_off_mhi(pci_priv);
+ cnss_suspend_pci_link(pci_priv);
+ cnss_pci_deinit_mhi(pci_priv);
+power_off:
+ cnss_power_off_device(plat_priv);
+out:
+ return ret;
+}
+
+static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ int do_force_wake = true;
+
+ cnss_pci_pm_runtime_resume(pci_priv);
+
+ ret = cnss_pci_call_driver_remove(pci_priv);
+ if (ret == -EAGAIN)
+ goto out;
+
+ cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
+ CNSS_BUS_WIDTH_NONE);
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ cnss_pci_set_auto_suspended(pci_priv, 0);
+
+ if ((test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state) ||
+ test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) &&
+ test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
+ del_timer(&pci_priv->dev_rddm_timer);
+ cnss_pci_collect_dump_info(pci_priv, false);
+ CNSS_ASSERT(0);
+ }
+
+ if (!cnss_is_device_powered_on(plat_priv)) {
+ cnss_pr_dbg("Device is already powered off, ignore\n");
+ goto skip_power_off;
+ }
+
+ if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
+ do_force_wake = false;
+ cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, do_force_wake);
+
+ /* FBC image will be freed after powering off MHI, so skip
+ * if RAM dump data is still valid.
+ */
+ if (plat_priv->ramdump_info_v2.dump_data_valid)
+ goto skip_power_off;
+
+ cnss_pci_power_off_mhi(pci_priv);
+ ret = cnss_suspend_pci_link(pci_priv);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+ cnss_pci_deinit_mhi(pci_priv);
+ cnss_power_off_device(plat_priv);
+
+skip_power_off:
+ pci_priv->remap_window = 0;
+
+ clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
+ clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
+ if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
+ clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
+ pci_priv->pci_link_down_ind = false;
+ }
+ clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+ clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
+
+out:
+ return ret;
+}
+
+static void cnss_qca6290_crash_shutdown(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ set_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
+ cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
+ plat_priv->driver_state);
+
+ cnss_pci_collect_dump_info(pci_priv, true);
+ clear_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
+}
+
+static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
+ struct cnss_dump_data *dump_data = &info_v2->dump_data;
+ struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
+ int ret = 0;
+
+ if (!info_v2->dump_data_valid || !dump_seg ||
+ dump_data->nentries == 0)
+ return 0;
+
+ ret = cnss_do_elf_ramdump(plat_priv);
+
+ cnss_pci_clear_dump_info(pci_priv);
+ cnss_pci_power_off_mhi(pci_priv);
+ cnss_suspend_pci_link(pci_priv);
+ cnss_pci_deinit_mhi(pci_priv);
+ cnss_power_off_device(plat_priv);
+
+ return ret;
+}
+
+int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ switch (pci_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ ret = cnss_qca6174_powerup(pci_priv);
+ break;
+ case QCA6290_DEVICE_ID:
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ ret = cnss_qca6290_powerup(pci_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device_id found: 0x%x\n",
+ pci_priv->device_id);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ switch (pci_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ ret = cnss_qca6174_shutdown(pci_priv);
+ break;
+ case QCA6290_DEVICE_ID:
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ ret = cnss_qca6290_shutdown(pci_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device_id found: 0x%x\n",
+ pci_priv->device_id);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ switch (pci_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ cnss_qca6174_crash_shutdown(pci_priv);
+ break;
+ case QCA6290_DEVICE_ID:
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ cnss_qca6290_crash_shutdown(pci_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device_id found: 0x%x\n",
+ pci_priv->device_id);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ switch (pci_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ ret = cnss_qca6174_ramdump(pci_priv);
+ break;
+ case QCA6290_DEVICE_ID:
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ ret = cnss_qca6290_ramdump(pci_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device_id found: 0x%x\n",
+ pci_priv->device_id);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+int cnss_pci_is_drv_connected(struct device *dev)
+{
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ return pci_priv->drv_connected_last;
+}
+EXPORT_SYMBOL(cnss_pci_is_drv_connected);
+
+static void cnss_wlan_reg_driver_work(struct work_struct *work)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(work, struct cnss_plat_data, wlan_reg_driver_work.work);
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ struct cnss_cal_info *cal_info;
+
+ cnss_pr_err("%s: Enter\n", __func__);
+#ifdef terry_debug
+ cnss_pr_err("%s: Enter, driver_state: 0x%lx\n", __func__, plat_priv->driver_state);
+#endif
+
+ if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
+ goto reg_driver;
+ } else {
+ cnss_pr_err("Calibration still not done\n");
+ cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
+ if (!cal_info)
+ return;
+ cal_info->cal_status = CNSS_CAL_TIMEOUT;
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
+ 0, cal_info);
+ /* Temporarily return for bringup. CBC will not be triggered */
+ return;
+ }
+reg_driver:
+ if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
+ return;
+ }
+ reinit_completion(&plat_priv->power_up_complete);
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_REGISTER_DRIVER,
+ CNSS_EVENT_SYNC_UNKILLABLE,
+ pci_priv->driver_ops);
+
+ cnss_pr_err("%s: Exit\n", __func__);
+}
+
+int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_pci_data *pci_priv;
+#ifndef CONFIG_WCN_GOOGLE
+ const struct pci_device_id *id_table = driver_ops->id_table;
+#endif
+ unsigned int timeout;
+
+ cnss_pr_err("%s: Enter\n", __func__);
+
+ if (!plat_priv) {
+ cnss_pr_info("plat_priv is not ready for register driver\n");
+ return -EAGAIN;
+ }
+
+ pci_priv = plat_priv->bus_priv;
+ if (!pci_priv) {
+ cnss_pr_info("pci_priv is not ready for register driver\n");
+ return -EAGAIN;
+ }
+
+ if (pci_priv->driver_ops) {
+ cnss_pr_err("Driver has already registered\n");
+ return -EEXIST;
+ }
+
+ if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
+ return -EINVAL;
+ }
+
+#ifndef CONFIG_WCN_GOOGLE
+ if (!id_table || !pci_dev_present(id_table)) {
+ /* id_table pointer will move from pci_dev_present(),
+ * so check again using local pointer.
+ */
+ id_table = driver_ops->id_table;
+ while (id_table->vendor) {
+ cnss_pr_info("Host driver is built for PCIe device ID 0x%x\n",
+ id_table->device);
+ id_table++;
+ }
+ cnss_pr_err("Enumerated PCIe device id is 0x%x, reject unsupported driver\n",
+ pci_priv->device_id);
+ return -ENODEV;
+ }
+#endif
+ if (!plat_priv->cbc_enabled ||
+ test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
+ goto register_driver;
+
+ pci_priv->driver_ops = driver_ops;
+ /* If Cold Boot Calibration is enabled, it is the 1st step in init
+ * sequence.CBC is done on file system_ready trigger. Qcacld will be
+ * loaded from vendor_modprobe.sh at early boot and must be deferred
+ * until CBC is complete
+ */
+ timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_CALIBRATION);
+#ifdef terry_debug
+ cnss_pr_err("%s: schedule_delayed_work %d ms ,iDriver_state 0x%lx\n", __func__, timeout, plat_priv->driver_state);
+ //timeout = 30000;
+ //goto register_driver;
+#endif
+ INIT_DELAYED_WORK(&plat_priv->wlan_reg_driver_work,
+ cnss_wlan_reg_driver_work);
+ schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
+ msecs_to_jiffies(timeout));
+ cnss_pr_info("WLAN register driver deferred for Calibration\n");
+ return 0;
+register_driver:
+ reinit_completion(&plat_priv->power_up_complete);
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_REGISTER_DRIVER,
+ CNSS_EVENT_SYNC_UNKILLABLE,
+ driver_ops);
+
+ return ret;
+}
+EXPORT_SYMBOL(cnss_wlan_register_driver);
+
+void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ int ret = 0;
+ unsigned int timeout;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return;
+ }
+
+ mutex_lock(&plat_priv->driver_ops_lock);
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ goto skip_wait_power_up;
+
+ timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_WLAN_WATCHDOG);
+ ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
+ msecs_to_jiffies(timeout));
+ if (!ret) {
+ cnss_pr_err("Timeout (%ums) waiting for driver power up to complete\n",
+ timeout);
+ CNSS_ASSERT(0);
+ }
+
+skip_wait_power_up:
+ if (!test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+ !test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
+ goto skip_wait_recovery;
+
+ reinit_completion(&plat_priv->recovery_complete);
+ timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY);
+ ret = wait_for_completion_timeout(&plat_priv->recovery_complete,
+ msecs_to_jiffies(timeout));
+ if (!ret) {
+ cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n",
+ timeout);
+ CNSS_ASSERT(0);
+ }
+
+skip_wait_recovery:
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+ CNSS_EVENT_SYNC_UNKILLABLE, NULL);
+
+ mutex_unlock(&plat_priv->driver_ops_lock);
+}
+EXPORT_SYMBOL(cnss_wlan_unregister_driver);
+
+int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv,
+ void *data)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Reboot or shutdown is in progress, ignore register driver\n");
+ return -EINVAL;
+ }
+
+ set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+ pci_priv->driver_ops = data;
+
+ ret = cnss_pci_dev_powerup(pci_priv);
+ if (ret) {
+ clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+ pci_priv->driver_ops = NULL;
+ }
+
+ return ret;
+}
+
+int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+ cnss_pci_dev_shutdown(pci_priv);
+ pci_priv->driver_ops = NULL;
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_PCI_MSM)
+static bool cnss_pci_is_drv_supported(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *root_port = pcie_find_root_port(pci_priv->pci_dev);
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct device_node *root_of_node;
+ bool drv_supported = false;
+
+ if (!root_port) {
+ cnss_pr_err("PCIe DRV is not supported as root port is null\n");
+ pci_priv->drv_supported = false;
+ return drv_supported;
+ }
+
+ root_of_node = root_port->dev.of_node;
+
+ if (root_of_node->parent)
+ drv_supported = of_property_read_bool(root_of_node->parent,
+ "qcom,drv-supported");
+
+ cnss_pr_dbg("PCIe DRV is %s\n",
+ drv_supported ? "supported" : "not supported");
+ pci_priv->drv_supported = drv_supported;
+
+ if (drv_supported) {
+ plat_priv->cap.cap_flag |= CNSS_HAS_DRV_SUPPORT;
+ cnss_set_feature_list(plat_priv, CNSS_DRV_SUPPORT_V01);
+ }
+
+ return drv_supported;
+}
+
+static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
+{
+ struct pci_dev *pci_dev;
+ struct cnss_pci_data *pci_priv;
+ struct device *dev;
+
+ if (!notify)
+ return;
+
+ pci_dev = notify->user;
+ if (!pci_dev)
+ return;
+
+ pci_priv = cnss_get_pci_priv(pci_dev);
+ if (!pci_priv)
+ return;
+ dev = &pci_priv->pci_dev->dev;
+
+ switch (notify->event) {
+ case MSM_PCIE_EVENT_LINKDOWN:
+ cnss_pr_dbg("PCI link down event callback\n");
+ cnss_pci_handle_linkdown(pci_priv);
+ break;
+ case MSM_PCIE_EVENT_WAKEUP:
+ if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
+ cnss_pci_get_auto_suspended(pci_priv)) ||
+ dev->power.runtime_status == RPM_SUSPENDING) {
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ cnss_pci_pm_request_resume(pci_priv);
+ }
+ break;
+ case MSM_PCIE_EVENT_DRV_CONNECT:
+ cnss_pr_dbg("DRV subsystem is connected\n");
+ cnss_pci_set_drv_connected(pci_priv, 1);
+ break;
+ case MSM_PCIE_EVENT_DRV_DISCONNECT:
+ cnss_pr_dbg("DRV subsystem is disconnected\n");
+ if (cnss_pci_get_auto_suspended(pci_priv))
+ cnss_pci_pm_request_resume(pci_priv);
+ cnss_pci_set_drv_connected(pci_priv, 0);
+ break;
+ default:
+ cnss_pr_err("Received invalid PCI event: %d\n", notify->event);
+ }
+}
+
+/**
+ * cnss_reg_pci_event() - Register for PCIe events
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function shall call corresponding PCIe root complex driver APIs
+ * to register for PCIe events like link down or WAKE GPIO toggling etc.
+ * The events should be based on PCIe root complex driver's capability.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct msm_pcie_register_event *pci_event;
+
+ pci_event = &pci_priv->msm_pci_event;
+ pci_event->events = MSM_PCIE_EVENT_LINKDOWN |
+ MSM_PCIE_EVENT_WAKEUP;
+
+ if (cnss_pci_is_drv_supported(pci_priv))
+ pci_event->events = pci_event->events |
+ MSM_PCIE_EVENT_DRV_CONNECT |
+ MSM_PCIE_EVENT_DRV_DISCONNECT;
+
+ pci_event->user = pci_priv->pci_dev;
+ pci_event->mode = MSM_PCIE_TRIGGER_CALLBACK;
+ pci_event->callback = cnss_pci_event_cb;
+ pci_event->options = MSM_PCIE_CONFIG_NO_RECOVERY;
+
+ ret = msm_pcie_register_event(pci_event);
+ if (ret)
+ cnss_pr_err("Failed to register MSM PCI event, err = %d\n",
+ ret);
+
+ return ret;
+}
+
+static void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv)
+{
+ msm_pcie_deregister_event(&pci_priv->msm_pci_event);
+}
+#else
+static void cnss_pci_event_cb(struct exynos_pcie_notify *notify)
+{
+ unsigned long flags;
+ struct pci_dev *pci_dev;
+ struct cnss_pci_data *pci_priv;
+ struct cnss_plat_data *plat_priv;
+
+ if (!notify)
+ return;
+
+ pci_dev = notify->user;
+ if (!pci_dev)
+ return;
+
+ pci_priv = cnss_get_pci_priv(pci_dev);
+ if (!pci_priv)
+ return;
+
+ plat_priv = pci_priv->plat_priv;
+ switch (notify->event) {
+// case EXYNOS_PCIE_EVENT_CPL_TIMEOUT:
+// cnss_pr_err("Received PCI CPL timeout event, link possibly down\n");
+ /* Fall through, handle it as link down */
+ case EXYNOS_PCIE_EVENT_LINKDOWN:
+ //exynos_pcie_set_perst(GOOGLE_RC_ID, false);
+ exynos_pcie_set_perst_gpio(GOOGLE_RC_ID, false);
+ if (test_bit(ENABLE_PCI_LINK_DOWN_PANIC,
+ &plat_priv->ctrl_params.quirks))
+ panic("cnss: PCI link is down\n");
+
+ spin_lock_irqsave(&pci_link_down_lock, flags);
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_dbg("PCI link down recovery is in progress, ignore\n");
+ spin_unlock_irqrestore(&pci_link_down_lock, flags);
+ return;
+ }
+ pci_priv->pci_link_down_ind = true;
+ spin_unlock_irqrestore(&pci_link_down_lock, flags);
+
+ cnss_fatal_err("PCI link down, schedule recovery\n");
+ cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
+ break;
+ default:
+ cnss_pr_err("Received invalid PCI event: %d\n", notify->event);
+ }
+}
+
+static int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct exynos_pcie_register_event *pci_event;
+
+ pci_event = &pci_priv->exynos_pci_event;
+ pci_event->events = EXYNOS_PCIE_EVENT_LINKDOWN;
+// EXYNOS_PCIE_EVENT_CPL_TIMEOUT;
+ pci_event->user = pci_priv->pci_dev;
+ pci_event->mode = EXYNOS_PCIE_TRIGGER_CALLBACK;
+ pci_event->callback = cnss_pci_event_cb;
+
+ ret = exynos_pcie_register_event(pci_event);
+ if (ret)
+ cnss_pr_err("Failed to register exynos PCI event, err = %d\n",
+ ret);
+ return ret;
+
+}
+
+static void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv)
+{
+ exynos_pcie_deregister_event(&pci_priv->exynos_pci_event);
+}
+#endif
+
+static int cnss_pci_suspend_driver(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
+ int ret = 0;
+
+ pm_message_t state = { .event = PM_EVENT_SUSPEND };
+
+ if (driver_ops && driver_ops->suspend) {
+ ret = driver_ops->suspend(pci_dev, state);
+ if (ret) {
+ cnss_pr_err("Failed to suspend host driver, err = %d\n",
+ ret);
+ ret = -EAGAIN;
+ }
+ }
+
+ return ret;
+}
+
+static int cnss_pci_resume_driver(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
+ int ret = 0;
+
+ if (driver_ops && driver_ops->resume) {
+ ret = driver_ops->resume(pci_dev);
+ if (ret)
+ cnss_pr_err("Failed to resume host driver, err = %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ int ret = 0;
+
+ if (pci_priv->pci_link_state == PCI_LINK_DOWN)
+ goto out;
+
+ if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (pci_priv->drv_connected_last)
+ goto skip_disable_pci;
+
+ pci_clear_master(pci_dev);
+ cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
+ pci_disable_device(pci_dev);
+
+ ret = pci_set_power_state(pci_dev, PCI_D3hot);
+ if (ret)
+ cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
+
+skip_disable_pci:
+ if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
+ ret = -EAGAIN;
+ goto resume_mhi;
+ }
+ pci_priv->pci_link_state = PCI_LINK_DOWN;
+
+ return 0;
+
+resume_mhi:
+ if (!pci_is_enabled(pci_dev))
+ if (pci_enable_device(pci_dev))
+ cnss_pr_err("Failed to enable PCI device\n");
+ if (pci_priv->saved_state)
+ cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
+ pci_set_master(pci_dev);
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+out:
+ return ret;
+}
+
+int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ int ret = 0;
+
+ if (pci_priv->pci_link_state == PCI_LINK_UP)
+ goto out;
+
+ if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
+ cnss_fatal_err("Failed to resume PCI link from suspend\n");
+ cnss_pci_link_down(&pci_dev->dev);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ pci_priv->pci_link_state = PCI_LINK_UP;
+
+ if (pci_priv->drv_connected_last)
+ goto skip_enable_pci;
+
+ ret = pci_enable_device(pci_dev);
+ if (ret) {
+ cnss_pr_err("Failed to enable PCI device, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (pci_priv->saved_state)
+ cnss_set_pci_config_space(pci_priv,
+ RESTORE_PCI_CONFIG_SPACE);
+ pci_set_master(pci_dev);
+
+skip_enable_pci:
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+out:
+ return ret;
+}
+
+static int cnss_pci_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+ struct cnss_plat_data *plat_priv;
+
+ if (!pci_priv)
+ goto out;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ goto out;
+
+ if (!cnss_is_device_powered_on(plat_priv))
+ goto out;
+
+ if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
+ pci_priv->drv_supported) {
+ pci_priv->drv_connected_last =
+ cnss_pci_get_drv_connected(pci_priv);
+ if (!pci_priv->drv_connected_last) {
+ cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+ }
+
+#if CONFIG_WCN_GOOGLE
+ exynos_pcie_rc_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI, GOOGLE_RC_ID);
+#endif
+
+ set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
+
+ ret = cnss_pci_suspend_driver(pci_priv);
+ if (ret)
+ goto clear_flag;
+
+ if (!pci_priv->disable_pc) {
+ mutex_lock(&pci_priv->bus_lock);
+ ret = cnss_pci_suspend_bus(pci_priv);
+ mutex_unlock(&pci_priv->bus_lock);
+ if (ret)
+ goto resume_driver;
+ }
+
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+
+ return 0;
+
+resume_driver:
+ cnss_pci_resume_driver(pci_priv);
+clear_flag:
+ pci_priv->drv_connected_last = 0;
+ clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
+#if CONFIG_WCN_GOOGLE
+ exynos_pcie_rc_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI, GOOGLE_RC_ID);
+#endif
+out:
+ return ret;
+}
+
+static int cnss_pci_resume(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+
+ if (!pci_priv)
+ goto out;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ goto out;
+
+ if (pci_priv->pci_link_down_ind)
+ goto out;
+
+ if (!cnss_is_device_powered_on(pci_priv->plat_priv))
+ goto out;
+
+ if (!pci_priv->disable_pc) {
+ ret = cnss_pci_resume_bus(pci_priv);
+ if (ret)
+ goto out;
+ }
+
+ ret = cnss_pci_resume_driver(pci_priv);
+
+ pci_priv->drv_connected_last = 0;
+ clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
+#if CONFIG_WCN_GOOGLE
+ exynos_pcie_rc_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI, GOOGLE_RC_ID);
+#endif
+out:
+ return ret;
+}
+
+static int cnss_pci_suspend_noirq(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ goto out;
+
+ if (!cnss_is_device_powered_on(pci_priv->plat_priv))
+ goto out;
+
+ driver_ops = pci_priv->driver_ops;
+ if (driver_ops && driver_ops->suspend_noirq)
+ ret = driver_ops->suspend_noirq(pci_dev);
+
+ if (pci_priv->disable_pc && !pci_dev->state_saved &&
+ !pci_priv->plat_priv->use_pm_domain)
+ pci_save_state(pci_dev);
+
+out:
+ return ret;
+}
+
+static int cnss_pci_resume_noirq(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ goto out;
+
+ if (!cnss_is_device_powered_on(pci_priv->plat_priv))
+ goto out;
+
+ driver_ops = pci_priv->driver_ops;
+ if (driver_ops && driver_ops->resume_noirq &&
+ !pci_priv->pci_link_down_ind)
+ ret = driver_ops->resume_noirq(pci_dev);
+
+out:
+ return ret;
+}
+
+static int cnss_pci_runtime_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ return -EAGAIN;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ return -EAGAIN;
+
+ if (!cnss_is_device_powered_on(pci_priv->plat_priv))
+ return -EAGAIN;
+
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_dbg("PCI link down recovery is in progress!\n");
+ return -EAGAIN;
+ }
+
+ if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
+ pci_priv->drv_supported) {
+ pci_priv->drv_connected_last =
+ cnss_pci_get_drv_connected(pci_priv);
+ if (!pci_priv->drv_connected_last) {
+ cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
+ return -EAGAIN;
+ }
+ }
+
+ cnss_pr_vdbg("Runtime suspend start\n");
+
+ driver_ops = pci_priv->driver_ops;
+ if (driver_ops && driver_ops->runtime_ops &&
+ driver_ops->runtime_ops->runtime_suspend)
+ ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
+ else
+ ret = cnss_auto_suspend(dev);
+
+ if (ret)
+ pci_priv->drv_connected_last = 0;
+
+ cnss_pr_vdbg("Runtime suspend status: %d\n", ret);
+
+ return ret;
+}
+
+static int cnss_pci_runtime_resume(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ return -EAGAIN;
+
+ if (!cnss_is_device_powered_on(pci_priv->plat_priv))
+ return -EAGAIN;
+
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_dbg("PCI link down recovery is in progress!\n");
+ return -EAGAIN;
+ }
+
+ cnss_pr_vdbg("Runtime resume start\n");
+
+ driver_ops = pci_priv->driver_ops;
+ if (driver_ops && driver_ops->runtime_ops &&
+ driver_ops->runtime_ops->runtime_resume)
+ ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
+ else
+ ret = cnss_auto_resume(dev);
+
+ if (!ret)
+ pci_priv->drv_connected_last = 0;
+
+ cnss_pr_vdbg("Runtime resume status: %d\n", ret);
+
+ return ret;
+}
+
+static int cnss_pci_runtime_idle(struct device *dev)
+{
+ cnss_pr_vdbg("Runtime idle\n");
+
+ pm_request_autosuspend(dev);
+
+ return -EBUSY;
+}
+
+int cnss_wlan_pm_control(struct device *dev, bool vote)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+#ifndef CONFIG_WCN_GOOGLE
+ int ret = 0;
+#endif
+ if (!pci_priv)
+ return -ENODEV;
+
+#ifndef CONFIG_WCN_GOOGLE
+ ret = cnss_pci_disable_pc(pci_priv, vote);
+ if (ret)
+ return ret;
+#endif
+ pci_priv->disable_pc = vote;
+ cnss_pr_dbg("%s PCIe power collapse\n", vote ? "disable" : "enable");
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_wlan_pm_control);
+
+static void cnss_pci_pm_runtime_get_record(struct cnss_pci_data *pci_priv,
+ enum cnss_rtpm_id id)
+{
+ if (id >= RTPM_ID_MAX)
+ return;
+
+ atomic_inc(&pci_priv->pm_stats.runtime_get);
+ atomic_inc(&pci_priv->pm_stats.runtime_get_id[id]);
+ pci_priv->pm_stats.runtime_get_timestamp_id[id] =
+ cnss_get_host_timestamp(pci_priv->plat_priv);
+}
+
+static void cnss_pci_pm_runtime_put_record(struct cnss_pci_data *pci_priv,
+ enum cnss_rtpm_id id)
+{
+ if (id >= RTPM_ID_MAX)
+ return;
+
+ atomic_inc(&pci_priv->pm_stats.runtime_put);
+ atomic_inc(&pci_priv->pm_stats.runtime_put_id[id]);
+ pci_priv->pm_stats.runtime_put_timestamp_id[id] =
+ cnss_get_host_timestamp(pci_priv->plat_priv);
+}
+
+void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv)
+{
+ struct device *dev;
+
+ if (!pci_priv)
+ return;
+
+ dev = &pci_priv->pci_dev->dev;
+
+ cnss_pr_dbg("Runtime PM usage count: %d\n",
+ atomic_read(&dev->power.usage_count));
+}
+
+int cnss_pci_pm_request_resume(struct cnss_pci_data *pci_priv)
+{
+ struct device *dev;
+ enum rpm_status status;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ dev = &pci_priv->pci_dev->dev;
+
+ status = dev->power.runtime_status;
+ if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
+ cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
+ (void *)_RET_IP_);
+
+ return pm_request_resume(dev);
+}
+
+int cnss_pci_pm_runtime_resume(struct cnss_pci_data *pci_priv)
+{
+ struct device *dev;
+ enum rpm_status status;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ dev = &pci_priv->pci_dev->dev;
+
+ status = dev->power.runtime_status;
+ if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
+ cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
+ (void *)_RET_IP_);
+
+ return pm_runtime_resume(dev);
+}
+
+int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv,
+ enum cnss_rtpm_id id)
+{
+ struct device *dev;
+ enum rpm_status status;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ dev = &pci_priv->pci_dev->dev;
+
+ status = dev->power.runtime_status;
+ if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
+ cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
+ (void *)_RET_IP_);
+
+ cnss_pci_pm_runtime_get_record(pci_priv, id);
+
+ return pm_runtime_get(dev);
+}
+
+int cnss_pci_pm_runtime_get_sync(struct cnss_pci_data *pci_priv,
+ enum cnss_rtpm_id id)
+{
+ struct device *dev;
+ enum rpm_status status;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ dev = &pci_priv->pci_dev->dev;
+
+ status = dev->power.runtime_status;
+ if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
+ cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
+ (void *)_RET_IP_);
+
+ cnss_pci_pm_runtime_get_record(pci_priv, id);
+
+ return pm_runtime_get_sync(dev);
+}
+
+void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv,
+ enum cnss_rtpm_id id)
+{
+ if (!pci_priv)
+ return;
+
+ cnss_pci_pm_runtime_get_record(pci_priv, id);
+ pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
+}
+
+int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv,
+ enum cnss_rtpm_id id)
+{
+ struct device *dev;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ dev = &pci_priv->pci_dev->dev;
+
+ if (atomic_read(&dev->power.usage_count) == 0) {
+ cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
+ return -EINVAL;
+ }
+
+ cnss_pci_pm_runtime_put_record(pci_priv, id);
+
+ return pm_runtime_put_autosuspend(&pci_priv->pci_dev->dev);
+}
+
+void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv,
+ enum cnss_rtpm_id id)
+{
+ struct device *dev;
+
+ if (!pci_priv)
+ return;
+
+ dev = &pci_priv->pci_dev->dev;
+
+ if (atomic_read(&dev->power.usage_count) == 0) {
+ cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
+ return;
+ }
+
+ cnss_pci_pm_runtime_put_record(pci_priv, id);
+ pm_runtime_put_noidle(&pci_priv->pci_dev->dev);
+}
+
+void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv)
+{
+ if (!pci_priv)
+ return;
+
+ pm_runtime_mark_last_busy(&pci_priv->pci_dev->dev);
+}
+
+int cnss_auto_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ return -ENODEV;
+
+ mutex_lock(&pci_priv->bus_lock);
+ if (!pci_priv->qmi_send_usage_count) {
+ ret = cnss_pci_suspend_bus(pci_priv);
+ if (ret) {
+ mutex_unlock(&pci_priv->bus_lock);
+ return ret;
+ }
+ }
+
+ cnss_pci_set_auto_suspended(pci_priv, 1);
+ mutex_unlock(&pci_priv->bus_lock);
+
+ cnss_pci_set_monitor_wake_intr(pci_priv, true);
+
+ /* For suspend temporarily set bandwidth vote to NONE and dont save in
+ * current_bw_vote as in resume path we should vote for last used
+ * bandwidth vote. Also ignore error if bw voting is not setup.
+ */
+ cnss_setup_bus_bandwidth(plat_priv, CNSS_BUS_WIDTH_NONE, false);
+ return 0;
+}
+EXPORT_SYMBOL(cnss_auto_suspend);
+
+int cnss_auto_resume(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ return -ENODEV;
+
+ mutex_lock(&pci_priv->bus_lock);
+ ret = cnss_pci_resume_bus(pci_priv);
+ if (ret) {
+ mutex_unlock(&pci_priv->bus_lock);
+ return ret;
+ }
+
+ cnss_pci_set_auto_suspended(pci_priv, 0);
+ mutex_unlock(&pci_priv->bus_lock);
+
+ cnss_request_bus_bandwidth(dev, plat_priv->icc.current_bw_vote);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_auto_resume);
+
+int cnss_pci_force_wake_request_sync(struct device *dev, int timeout_us)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct mhi_controller *mhi_ctrl;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ break;
+ default:
+ return 0;
+ }
+
+ mhi_ctrl = pci_priv->mhi_ctrl;
+ if (!mhi_ctrl)
+ return -EINVAL;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
+ return -EAGAIN;
+
+ if (timeout_us) {
+ /* Busy wait for timeout_us */
+ return cnss_mhi_device_get_sync_atomic(pci_priv,
+ timeout_us, false);
+ } else {
+ /* Sleep wait for mhi_ctrl->timeout_ms */
+ return mhi_device_get_sync(mhi_ctrl->mhi_dev);
+ }
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_request_sync);
+
+int cnss_pci_force_wake_request(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct mhi_controller *mhi_ctrl;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ break;
+ default:
+ return 0;
+ }
+
+ mhi_ctrl = pci_priv->mhi_ctrl;
+ if (!mhi_ctrl)
+ return -EINVAL;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
+ return -EAGAIN;
+
+ mhi_device_get(mhi_ctrl->mhi_dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_request);
+
+int cnss_pci_is_device_awake(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct mhi_controller *mhi_ctrl;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ break;
+ default:
+ return 0;
+ }
+
+ mhi_ctrl = pci_priv->mhi_ctrl;
+ if (!mhi_ctrl)
+ return -EINVAL;
+
+ return (mhi_ctrl->dev_state == MHI_STATE_M0);
+}
+EXPORT_SYMBOL(cnss_pci_is_device_awake);
+
+int cnss_pci_force_wake_release(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct mhi_controller *mhi_ctrl;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ break;
+ default:
+ return 0;
+ }
+
+ mhi_ctrl = pci_priv->mhi_ctrl;
+ if (!mhi_ctrl)
+ return -EINVAL;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
+ return -EAGAIN;
+
+ mhi_device_put(mhi_ctrl->mhi_dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_release);
+
+int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ mutex_lock(&pci_priv->bus_lock);
+ if (cnss_pci_get_auto_suspended(pci_priv) &&
+ !pci_priv->qmi_send_usage_count)
+ ret = cnss_pci_resume_bus(pci_priv);
+ pci_priv->qmi_send_usage_count++;
+ cnss_pr_buf("Increased QMI send usage count to %d\n",
+ pci_priv->qmi_send_usage_count);
+ mutex_unlock(&pci_priv->bus_lock);
+
+ return ret;
+}
+
+int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ mutex_lock(&pci_priv->bus_lock);
+ if (pci_priv->qmi_send_usage_count)
+ pci_priv->qmi_send_usage_count--;
+ cnss_pr_buf("Decreased QMI send usage count to %d\n",
+ pci_priv->qmi_send_usage_count);
+ if (cnss_pci_get_auto_suspended(pci_priv) &&
+ !pci_priv->qmi_send_usage_count &&
+ !cnss_pcie_is_device_down(pci_priv))
+ ret = cnss_pci_suspend_bus(pci_priv);
+ mutex_unlock(&pci_priv->bus_lock);
+
+ return ret;
+}
+
+int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+ struct device *dev = &pci_priv->pci_dev->dev;
+ int i;
+
+ for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
+ if (!fw_mem[i].va && fw_mem[i].size) {
+ fw_mem[i].va =
+ dma_alloc_attrs(dev, fw_mem[i].size,
+ &fw_mem[i].pa, GFP_KERNEL,
+ fw_mem[i].attrs);
+
+ if (!fw_mem[i].va) {
+ cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
+ fw_mem[i].size, fw_mem[i].type);
+
+ return -ENOMEM;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+ struct device *dev = &pci_priv->pci_dev->dev;
+ int i;
+
+ for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
+ if (fw_mem[i].va && fw_mem[i].size) {
+ cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
+ fw_mem[i].va, &fw_mem[i].pa,
+ fw_mem[i].size, fw_mem[i].type);
+ dma_free_attrs(dev, fw_mem[i].size,
+ fw_mem[i].va, fw_mem[i].pa,
+ fw_mem[i].attrs);
+ fw_mem[i].va = NULL;
+ fw_mem[i].pa = 0;
+ fw_mem[i].size = 0;
+ fw_mem[i].type = 0;
+ }
+ }
+
+ plat_priv->fw_mem_seg_len = 0;
+}
+
+int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+ int i, j;
+
+ for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+ if (!qdss_mem[i].va && qdss_mem[i].size) {
+ qdss_mem[i].va =
+ dma_alloc_coherent(&pci_priv->pci_dev->dev,
+ qdss_mem[i].size,
+ &qdss_mem[i].pa,
+ GFP_KERNEL);
+ if (!qdss_mem[i].va) {
+ cnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
+ qdss_mem[i].size,
+ qdss_mem[i].type, i);
+ break;
+ }
+ }
+ }
+
+ /* Best-effort allocation for QDSS trace */
+ if (i < plat_priv->qdss_mem_seg_len) {
+ for (j = i; j < plat_priv->qdss_mem_seg_len; j++) {
+ qdss_mem[j].type = 0;
+ qdss_mem[j].size = 0;
+ }
+ plat_priv->qdss_mem_seg_len = i;
+ }
+
+ return 0;
+}
+
+void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+ int i;
+
+ for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+ if (qdss_mem[i].va && qdss_mem[i].size) {
+ cnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
+ &qdss_mem[i].pa, qdss_mem[i].size,
+ qdss_mem[i].type);
+ dma_free_coherent(&pci_priv->pci_dev->dev,
+ qdss_mem[i].size, qdss_mem[i].va,
+ qdss_mem[i].pa);
+ qdss_mem[i].va = NULL;
+ qdss_mem[i].pa = 0;
+ qdss_mem[i].size = 0;
+ qdss_mem[i].type = 0;
+ }
+ }
+ plat_priv->qdss_mem_seg_len = 0;
+}
+
+int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
+ char filename[MAX_FIRMWARE_NAME_LEN];
+ char *phy_filename = DEFAULT_PHY_UCODE_FILE_NAME;
+ const struct firmware *fw_entry;
+ int ret = 0;
+
+ /* Use forward compatibility here since for any recent device
+ * it should use DEFAULT_PHY_UCODE_FILE_NAME.
+ */
+ switch (pci_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ cnss_pr_err("Invalid device ID (0x%x) to load phy image\n",
+ pci_priv->device_id);
+ return -EINVAL;
+ case QCA6290_DEVICE_ID:
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ phy_filename = DEFAULT_PHY_M3_FILE_NAME;
+ break;
+ default:
+ break;
+ }
+
+ if (!m3_mem->va && !m3_mem->size) {
+ cnss_pci_add_fw_prefix_name(pci_priv, filename,
+ phy_filename);
+
+ ret = firmware_request_nowarn(&fw_entry, filename,
+ &pci_priv->pci_dev->dev);
+ if (ret) {
+ cnss_pr_err("Failed to load M3 image: %s\n", filename);
+ return ret;
+ }
+
+ m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
+ fw_entry->size, &m3_mem->pa,
+ GFP_KERNEL);
+ if (!m3_mem->va) {
+ cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
+ fw_entry->size);
+ release_firmware(fw_entry);
+ return -ENOMEM;
+ }
+
+ memcpy(m3_mem->va, fw_entry->data, fw_entry->size);
+ m3_mem->size = fw_entry->size;
+ release_firmware(fw_entry);
+ }
+
+ return 0;
+}
+
+static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
+
+ if (m3_mem->va && m3_mem->size) {
+ cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+ m3_mem->va, &m3_mem->pa, m3_mem->size);
+ dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size,
+ m3_mem->va, m3_mem->pa);
+ }
+
+ m3_mem->va = NULL;
+ m3_mem->pa = 0;
+ m3_mem->size = 0;
+}
+
+void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv;
+
+ if (!pci_priv)
+ return;
+
+ cnss_fatal_err("Timeout waiting for FW ready indication\n");
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ return;
+
+ if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Ignore FW ready timeout for calibration mode\n");
+ return;
+ }
+
+ cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+ CNSS_REASON_TIMEOUT);
+}
+#ifndef CONFIG_WCN_GOOGLE
+static int cnss_pci_smmu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova,
+ int flags, void *handler_token)
+{
+ struct cnss_pci_data *pci_priv = handler_token;
+
+ cnss_fatal_err("SMMU fault happened with IOVA 0x%lx\n", iova);
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
+ cnss_force_fw_assert(&pci_priv->pci_dev->dev);
+
+ /* IOMMU driver requires -ENOSYS to print debug info. */
+ return -ENOSYS;
+}
+#endif
+static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
+{
+#ifndef CONFIG_WCN_GOOGLE
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct device_node *of_node;
+ struct resource *res;
+ const char *iommu_dma_type;
+ u32 addr_win[2];
+ int ret = 0;
+
+ of_node = of_parse_phandle(pci_dev->dev.of_node, "qcom,iommu-group", 0);
+ if (!of_node)
+ return ret;
+
+ cnss_pr_dbg("Initializing SMMU\n");
+
+ pci_priv->iommu_domain = iommu_get_domain_for_dev(&pci_dev->dev);
+ ret = of_property_read_string(of_node, "qcom,iommu-dma",
+ &iommu_dma_type);
+ if (!ret && !strcmp("fastmap", iommu_dma_type)) {
+ cnss_pr_dbg("Enabling SMMU S1 stage\n");
+ pci_priv->smmu_s1_enable = true;
+ iommu_set_fault_handler(pci_priv->iommu_domain,
+ cnss_pci_smmu_fault_handler, pci_priv);
+ }
+
+ ret = of_property_read_u32_array(of_node, "qcom,iommu-dma-addr-pool",
+ addr_win, ARRAY_SIZE(addr_win));
+ if (ret) {
+ cnss_pr_err("Invalid SMMU size window, err = %d\n", ret);
+ of_node_put(of_node);
+ return ret;
+ }
+
+ pci_priv->smmu_iova_start = addr_win[0];
+ pci_priv->smmu_iova_len = addr_win[1];
+ cnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: 0x%zx\n",
+ &pci_priv->smmu_iova_start,
+ pci_priv->smmu_iova_len);
+
+ res = platform_get_resource_byname(plat_priv->plat_dev, IORESOURCE_MEM,
+ "smmu_iova_ipa");
+ if (res) {
+ pci_priv->smmu_iova_ipa_start = res->start;
+ pci_priv->smmu_iova_ipa_current = res->start;
+ pci_priv->smmu_iova_ipa_len = resource_size(res);
+ cnss_pr_dbg("smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: 0x%zx\n",
+ &pci_priv->smmu_iova_ipa_start,
+ pci_priv->smmu_iova_ipa_len);
+ }
+
+ pci_priv->iommu_geometry = of_property_read_bool(of_node,
+ "qcom,iommu-geometry");
+ cnss_pr_dbg("iommu_geometry: %d\n", pci_priv->iommu_geometry);
+
+ of_node_put(of_node);
+#endif
+ return 0;
+}
+
+static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
+{
+ pci_priv->iommu_domain = NULL;
+}
+
+int cnss_pci_get_iova(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
+{
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (!pci_priv->smmu_iova_len)
+ return -EINVAL;
+
+ *addr = pci_priv->smmu_iova_start;
+ *size = pci_priv->smmu_iova_len;
+
+ return 0;
+}
+
+int cnss_pci_get_iova_ipa(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
+{
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (!pci_priv->smmu_iova_ipa_len)
+ return -EINVAL;
+
+ *addr = pci_priv->smmu_iova_ipa_start;
+ *size = pci_priv->smmu_iova_ipa_len;
+
+ return 0;
+}
+
+struct iommu_domain *cnss_smmu_get_domain(struct device *dev)
+{
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+
+ if (!pci_priv)
+ return NULL;
+
+ return pci_priv->iommu_domain;
+}
+EXPORT_SYMBOL(cnss_smmu_get_domain);
+
+int cnss_smmu_map(struct device *dev,
+ phys_addr_t paddr, uint32_t *iova_addr, size_t size)
+{
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+ struct cnss_plat_data *plat_priv;
+ unsigned long iova;
+ size_t len;
+ int ret = 0;
+ int flag = IOMMU_READ | IOMMU_WRITE;
+ struct pci_dev *root_port;
+ struct device_node *root_of_node;
+ bool dma_coherent = false;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (!iova_addr) {
+ cnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
+ &paddr, size);
+ return -EINVAL;
+ }
+
+ plat_priv = pci_priv->plat_priv;
+
+ len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
+ iova = roundup(pci_priv->smmu_iova_ipa_current, PAGE_SIZE);
+
+ if (pci_priv->iommu_geometry &&
+ iova >= pci_priv->smmu_iova_ipa_start +
+ pci_priv->smmu_iova_ipa_len) {
+ cnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
+ iova,
+ &pci_priv->smmu_iova_ipa_start,
+ pci_priv->smmu_iova_ipa_len);
+ return -ENOMEM;
+ }
+
+ if (!test_bit(DISABLE_IO_COHERENCY,
+ &plat_priv->ctrl_params.quirks)) {
+ root_port = pcie_find_root_port(pci_priv->pci_dev);
+ if (!root_port) {
+ cnss_pr_err("Root port is null, so dma_coherent is disabled\n");
+ } else {
+ root_of_node = root_port->dev.of_node;
+ if (root_of_node && root_of_node->parent) {
+ dma_coherent =
+ of_property_read_bool(root_of_node->parent,
+ "dma-coherent");
+ cnss_pr_dbg("dma-coherent is %s\n",
+ dma_coherent ? "enabled" : "disabled");
+ if (dma_coherent)
+ flag |= IOMMU_CACHE;
+ }
+ }
+ }
+
+ cnss_pr_dbg("IOMMU map: iova %lx, len %zu\n", iova, len);
+
+ ret = iommu_map(pci_priv->iommu_domain, iova,
+ rounddown(paddr, PAGE_SIZE), len, flag);
+ if (ret) {
+ cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
+ return ret;
+ }
+
+ pci_priv->smmu_iova_ipa_current = iova + len;
+ *iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE));
+ cnss_pr_dbg("IOMMU map: iova_addr %lx\n", *iova_addr);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_smmu_map);
+
+int cnss_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size)
+{
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+ unsigned long iova;
+ size_t unmapped;
+ size_t len;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ iova = rounddown(iova_addr, PAGE_SIZE);
+ len = roundup(size + iova_addr - iova, PAGE_SIZE);
+
+ if (iova >= pci_priv->smmu_iova_ipa_start +
+ pci_priv->smmu_iova_ipa_len) {
+ cnss_pr_err("Out of IOVA space to unmap, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
+ iova,
+ &pci_priv->smmu_iova_ipa_start,
+ pci_priv->smmu_iova_ipa_len);
+ return -ENOMEM;
+ }
+
+ cnss_pr_dbg("IOMMU unmap: iova %lx, len %zu\n", iova, len);
+
+ unmapped = iommu_unmap(pci_priv->iommu_domain, iova, len);
+ if (unmapped != len) {
+ cnss_pr_err("IOMMU unmap failed, unmapped = %zu, requested = %zu\n",
+ unmapped, len);
+ return -EINVAL;
+ }
+
+ pci_priv->smmu_iova_ipa_current = iova;
+ return 0;
+}
+EXPORT_SYMBOL(cnss_smmu_unmap);
+
+int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
+{
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+ struct cnss_plat_data *plat_priv;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ return -ENODEV;
+
+ info->va = pci_priv->bar;
+ info->pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
+ info->chip_id = plat_priv->chip_info.chip_id;
+ info->chip_family = plat_priv->chip_info.chip_family;
+ info->board_id = plat_priv->board_info.board_id;
+ info->soc_id = plat_priv->soc_info.soc_id;
+ info->fw_version = plat_priv->fw_version_info.fw_version;
+ strlcpy(info->fw_build_timestamp,
+ plat_priv->fw_version_info.fw_build_timestamp,
+ sizeof(info->fw_build_timestamp));
+ memcpy(&info->device_version, &plat_priv->device_version,
+ sizeof(info->device_version));
+ memcpy(&info->dev_mem_info, &plat_priv->dev_mem_info,
+ sizeof(info->dev_mem_info));
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_soc_info);
+
+#ifndef CONFIG_WCN_GOOGLE
+static struct cnss_msi_config msi_config = {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct cnss_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+};
+#else
+static struct cnss_msi_config msi_config = {
+ .total_vectors = 16,
+ .total_users = 4,
+ .users = (struct cnss_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 5, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 8 },
+ { .name = "DP", .num_vectors = 7, .base_vector = 9 },
+ },
+};
+#endif
+
+static int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv)
+{
+ pci_priv->msi_config = &msi_config;
+
+ return 0;
+}
+
+static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ int num_vectors;
+ struct cnss_msi_config *msi_config;
+ struct msi_desc *msi_desc;
+
+ if (pci_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ ret = cnss_pci_get_msi_assignment(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret);
+ goto out;
+ }
+
+ msi_config = pci_priv->msi_config;
+ if (!msi_config) {
+ cnss_pr_err("msi_config is NULL!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ num_vectors = pci_alloc_irq_vectors(pci_dev,
+ msi_config->total_vectors,
+ msi_config->total_vectors,
+ PCI_IRQ_MSI);
+ if (num_vectors != msi_config->total_vectors) {
+ cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d",
+ msi_config->total_vectors, num_vectors);
+ if (num_vectors >= 0)
+ ret = -EINVAL;
+ goto reset_msi_config;
+ }
+
+ msi_desc = irq_get_msi_desc(pci_dev->irq);
+ if (!msi_desc) {
+ cnss_pr_err("msi_desc is NULL!\n");
+ ret = -EINVAL;
+ goto free_msi_vector;
+ }
+
+ pci_priv->msi_ep_base_data = msi_desc->msg.data;
+ cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data);
+
+ return 0;
+
+free_msi_vector:
+ pci_free_irq_vectors(pci_priv->pci_dev);
+reset_msi_config:
+ pci_priv->msi_config = NULL;
+out:
+ return ret;
+}
+
+static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv)
+{
+ if (pci_priv->device_id == QCA6174_DEVICE_ID)
+ return;
+
+ pci_free_irq_vectors(pci_priv->pci_dev);
+}
+
+int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+ struct cnss_msi_config *msi_config;
+ int idx;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ msi_config = pci_priv->msi_config;
+ if (!msi_config) {
+ cnss_pr_err("MSI is not supported.\n");
+ return -EINVAL;
+ }
+
+ for (idx = 0; idx < msi_config->total_users; idx++) {
+ if (strcmp(user_name, msi_config->users[idx].name) == 0) {
+ *num_vectors = msi_config->users[idx].num_vectors;
+ *user_base_data = msi_config->users[idx].base_vector
+ + pci_priv->msi_ep_base_data;
+ *base_vector = msi_config->users[idx].base_vector;
+
+ cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
+ user_name, *num_vectors, *user_base_data,
+ *base_vector);
+
+ return 0;
+ }
+ }
+
+ cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(cnss_get_user_msi_assignment);
+
+int cnss_get_msi_irq(struct device *dev, unsigned int vector)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ int irq_num;
+
+ irq_num = pci_irq_vector(pci_dev, vector);
+ cnss_pr_dbg("Get IRQ number %d for vector index %d\n", irq_num, vector);
+
+ return irq_num;
+}
+EXPORT_SYMBOL(cnss_get_msi_irq);
+
+void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
+ u32 *msi_addr_high)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ u16 control;
+
+ pci_read_config_word(pci_dev, pci_dev->msi_cap + PCI_MSI_FLAGS,
+ &control);
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
+ msi_addr_low);
+ /* Return MSI high address only when device supports 64-bit MSI */
+ if (control & PCI_MSI_FLAGS_64BIT)
+ pci_read_config_dword(pci_dev,
+ pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
+ msi_addr_high);
+ else
+ *msi_addr_high = 0;
+
+ cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
+ *msi_addr_low, *msi_addr_high);
+}
+EXPORT_SYMBOL(cnss_get_msi_address);
+
+u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv)
+{
+ int ret, num_vectors;
+ u32 user_base_data, base_vector;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
+ WAKE_MSI_NAME, &num_vectors,
+ &user_base_data, &base_vector);
+ if (ret) {
+ cnss_pr_err("WAKE MSI is not valid\n");
+ return 0;
+ }
+
+ return user_base_data;
+}
+
+static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ u16 device_id;
+
+ pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id);
+ if (device_id != pci_priv->pci_device_id->device) {
+ cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n",
+ device_id, pci_priv->pci_device_id->device);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = pci_assign_resource(pci_dev, PCI_BAR_NUM);
+ if (ret) {
+ pr_err("Failed to assign PCI resource, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_enable_device(pci_dev);
+ if (ret) {
+ cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss");
+ if (ret) {
+ cnss_pr_err("Failed to request PCI region, err = %d\n", ret);
+ goto disable_device;
+ }
+
+ switch (device_id) {
+ case QCA6174_DEVICE_ID:
+ pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
+ break;
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ #if 0
+ pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
+ #else
+ pci_priv->dma_bit_mask = PCI_DMA_MASK_36_BIT;
+ #endif
+ break;
+ default:
+ pci_priv->dma_bit_mask = PCI_DMA_MASK_64_BIT;
+ break;
+ }
+
+ cnss_pr_dbg("Set PCI DMA MASK (0x%llx)\n", pci_priv->dma_bit_mask);
+
+ ret = pci_set_dma_mask(pci_dev, pci_priv->dma_bit_mask);
+ if (ret) {
+ cnss_pr_err("Failed to set PCI DMA mask, err = %d\n", ret);
+ goto release_region;
+ }
+
+ ret = pci_set_consistent_dma_mask(pci_dev, pci_priv->dma_bit_mask);
+ if (ret) {
+ cnss_pr_err("Failed to set PCI consistent DMA mask, err = %d\n",
+ ret);
+ goto release_region;
+ }
+
+ pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0);
+ if (!pci_priv->bar) {
+ cnss_pr_err("Failed to do PCI IO map!\n");
+ ret = -EIO;
+ goto release_region;
+ }
+
+ /* Save default config space without BME enabled */
+ pci_save_state(pci_dev);
+ pci_priv->default_state = pci_store_saved_state(pci_dev);
+
+ pci_set_master(pci_dev);
+
+ return 0;
+
+release_region:
+ pci_release_region(pci_dev, PCI_BAR_NUM);
+disable_device:
+ pci_disable_device(pci_dev);
+out:
+ return ret;
+}
+
+static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+
+ pci_clear_master(pci_dev);
+ pci_load_and_free_saved_state(pci_dev, &pci_priv->saved_state);
+ pci_load_and_free_saved_state(pci_dev, &pci_priv->default_state);
+
+ if (pci_priv->bar) {
+ pci_iounmap(pci_dev, pci_priv->bar);
+ pci_priv->bar = NULL;
+ }
+
+ pci_release_region(pci_dev, PCI_BAR_NUM);
+ if (pci_is_enabled(pci_dev))
+ pci_disable_device(pci_dev);
+}
+
+static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ int i, array_size = ARRAY_SIZE(qdss_csr) - 1;
+ gfp_t gfp = GFP_KERNEL;
+ u32 reg_offset;
+
+ if (in_interrupt() || irqs_disabled())
+ gfp = GFP_ATOMIC;
+
+ if (!plat_priv->qdss_reg) {
+ plat_priv->qdss_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
+ sizeof(*plat_priv->qdss_reg)
+ * array_size, gfp);
+ if (!plat_priv->qdss_reg)
+ return;
+ }
+
+ cnss_pr_dbg("Start to dump qdss registers\n");
+
+ for (i = 0; qdss_csr[i].name; i++) {
+ reg_offset = QDSS_APB_DEC_CSR_BASE + qdss_csr[i].offset;
+ if (cnss_pci_reg_read(pci_priv, reg_offset,
+ &plat_priv->qdss_reg[i]))
+ return;
+ cnss_pr_dbg("%s[0x%x] = 0x%x\n", qdss_csr[i].name, reg_offset,
+ plat_priv->qdss_reg[i]);
+ }
+}
+
+static void cnss_pci_dump_ce_reg(struct cnss_pci_data *pci_priv,
+ enum cnss_ce_index ce)
+{
+ int i;
+ u32 ce_base = ce * CE_REG_INTERVAL;
+ u32 reg_offset, src_ring_base, dst_ring_base, cmn_base, val;
+
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ src_ring_base = QCA6390_CE_SRC_RING_REG_BASE;
+ dst_ring_base = QCA6390_CE_DST_RING_REG_BASE;
+ cmn_base = QCA6390_CE_COMMON_REG_BASE;
+ break;
+ case QCA6490_DEVICE_ID:
+ src_ring_base = QCA6490_CE_SRC_RING_REG_BASE;
+ dst_ring_base = QCA6490_CE_DST_RING_REG_BASE;
+ cmn_base = QCA6490_CE_COMMON_REG_BASE;
+ break;
+ default:
+ return;
+ }
+
+ switch (ce) {
+ case CNSS_CE_09:
+ case CNSS_CE_10:
+ for (i = 0; ce_src[i].name; i++) {
+ reg_offset = src_ring_base + ce_base + ce_src[i].offset;
+ if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
+ return;
+ cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
+ ce, ce_src[i].name, reg_offset, val);
+ }
+
+ for (i = 0; ce_dst[i].name; i++) {
+ reg_offset = dst_ring_base + ce_base + ce_dst[i].offset;
+ if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
+ return;
+ cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
+ ce, ce_dst[i].name, reg_offset, val);
+ }
+ break;
+ case CNSS_CE_COMMON:
+ for (i = 0; ce_cmn[i].name; i++) {
+ reg_offset = cmn_base + ce_cmn[i].offset;
+ if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
+ return;
+ cnss_pr_dbg("CE_COMMON_%s[0x%x] = 0x%x\n",
+ ce_cmn[i].name, reg_offset, val);
+ }
+ break;
+ default:
+ cnss_pr_err("Unsupported CE[%d] registers dump\n", ce);
+ }
+}
+
+static void cnss_pci_dump_debug_reg(struct cnss_pci_data *pci_priv)
+{
+ if (cnss_pci_check_link_status(pci_priv))
+ return;
+
+ cnss_pr_dbg("Start to dump debug registers\n");
+
+ cnss_mhi_debug_reg_dump(pci_priv);
+ cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_COMMON);
+ cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_09);
+ cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_10);
+}
+
+int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
+{
+ int ret;
+ struct cnss_plat_data *plat_priv;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
+ test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state))
+ return -EINVAL;
+
+ cnss_auto_resume(&pci_priv->pci_dev->dev);
+
+ if (!cnss_pci_check_link_status(pci_priv))
+ cnss_mhi_debug_reg_dump(pci_priv);
+
+ cnss_pci_dump_misc_reg(pci_priv);
+ cnss_pci_dump_shadow_reg(pci_priv);
+
+ /* If link is still down here, directly trigger link down recovery */
+ ret = cnss_pci_check_link_status(pci_priv);
+ if (ret) {
+ cnss_pci_link_down(&pci_priv->pci_dev->dev);
+ return 0;
+ }
+
+ ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
+ if (ret) {
+ if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
+ test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state)) {
+ cnss_pr_dbg("MHI is not powered on, ignore RDDM failure\n");
+ return 0;
+ }
+ cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
+ cnss_pci_dump_debug_reg(pci_priv);
+ cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+ CNSS_REASON_DEFAULT);
+ return ret;
+ }
+
+ if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
+ mod_timer(&pci_priv->dev_rddm_timer,
+ jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
+ }
+
+ return 0;
+}
+
+static void cnss_pci_add_dump_seg(struct cnss_pci_data *pci_priv,
+ struct cnss_dump_seg *dump_seg,
+ enum cnss_fw_dump_type type, int seg_no,
+ void *va, dma_addr_t dma, size_t size)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct device *dev = &pci_priv->pci_dev->dev;
+ phys_addr_t pa;
+
+ dump_seg->address = dma;
+ dump_seg->v_address = va;
+ dump_seg->size = size;
+ dump_seg->type = type;
+
+ cnss_pr_dbg("Seg: %x, va: %pK, dma: %pa, size: 0x%zx\n",
+ seg_no, va, &dma, size);
+
+ if (cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS))
+ return;
+
+ cnss_minidump_add_region(plat_priv, type, seg_no, va, pa, size);
+}
+
+static void cnss_pci_remove_dump_seg(struct cnss_pci_data *pci_priv,
+ struct cnss_dump_seg *dump_seg,
+ enum cnss_fw_dump_type type, int seg_no,
+ void *va, dma_addr_t dma, size_t size)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct device *dev = &pci_priv->pci_dev->dev;
+ phys_addr_t pa;
+
+ cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS);
+ cnss_minidump_remove_region(plat_priv, type, seg_no, va, pa, size);
+}
+
+int cnss_pci_call_driver_uevent(struct cnss_pci_data *pci_priv,
+ enum cnss_driver_status status, void *data)
+{
+ struct cnss_uevent_data uevent_data;
+ struct cnss_wlan_driver *driver_ops;
+
+ driver_ops = pci_priv->driver_ops;
+ if (!driver_ops || !driver_ops->update_event) {
+ cnss_pr_dbg("Hang event driver ops is NULL\n");
+ return -EINVAL;
+ }
+
+ cnss_pr_dbg("Calling driver uevent: %d\n", status);
+
+ uevent_data.status = status;
+ uevent_data.data = data;
+
+ return driver_ops->update_event(pci_priv->pci_dev, &uevent_data);
+}
+
+static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+ struct cnss_hang_event hang_event;
+ void *hang_data_va = NULL;
+ u64 offset = 0;
+ int i = 0;
+
+ if (!fw_mem || !plat_priv->fw_mem_seg_len)
+ return;
+
+ memset(&hang_event, 0, sizeof(hang_event));
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ offset = HST_HANG_DATA_OFFSET;
+ break;
+ case QCA6490_DEVICE_ID:
+ offset = HSP_HANG_DATA_OFFSET;
+ break;
+ default:
+ cnss_pr_err("Skip Hang Event Data as unsupported Device ID received: %d\n",
+ pci_priv->device_id);
+ return;
+ }
+
+ for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
+ if (fw_mem[i].type == QMI_WLFW_MEM_TYPE_DDR_V01 &&
+ fw_mem[i].va) {
+ hang_data_va = fw_mem[i].va + offset;
+ hang_event.hang_event_data = kmemdup(hang_data_va,
+ HANG_DATA_LENGTH,
+ GFP_ATOMIC);
+ if (!hang_event.hang_event_data) {
+ cnss_pr_dbg("Hang data memory alloc failed\n");
+ return;
+ }
+ hang_event.hang_event_data_len = HANG_DATA_LENGTH;
+ break;
+ }
+ }
+
+ cnss_pci_call_driver_uevent(pci_priv, CNSS_HANG_EVENT, &hang_event);
+
+ kfree(hang_event.hang_event_data);
+ hang_event.hang_event_data = NULL;
+}
+
+void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_dump_data *dump_data =
+ &plat_priv->ramdump_info_v2.dump_data;
+ struct cnss_dump_seg *dump_seg =
+ plat_priv->ramdump_info_v2.dump_data_vaddr;
+ struct image_info *fw_image, *rddm_image;
+ struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+ int ret, i, j;
+
+ if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
+ !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
+ cnss_pci_send_hang_event(pci_priv);
+
+ if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) {
+ cnss_pr_dbg("RAM dump is already collected, skip\n");
+ return;
+ }
+
+ if (!cnss_is_device_powered_on(plat_priv)) {
+ cnss_pr_dbg("Device is already powered off, skip\n");
+ return;
+ }
+
+ if (!in_panic) {
+ mutex_lock(&pci_priv->bus_lock);
+ ret = cnss_pci_check_link_status(pci_priv);
+ if (ret) {
+ if (ret != -EACCES) {
+ mutex_unlock(&pci_priv->bus_lock);
+ return;
+ }
+ if (cnss_pci_resume_bus(pci_priv)) {
+ mutex_unlock(&pci_priv->bus_lock);
+ return;
+ }
+ }
+ mutex_unlock(&pci_priv->bus_lock);
+ } else {
+ if (cnss_pci_check_link_status(pci_priv))
+ return;
+ }
+
+ cnss_mhi_debug_reg_dump(pci_priv);
+ cnss_pci_dump_misc_reg(pci_priv);
+ cnss_pci_dump_shadow_reg(pci_priv);
+ cnss_pci_dump_qdss_reg(pci_priv);
+
+ ret = mhi_download_rddm_image(pci_priv->mhi_ctrl, in_panic);
+ if (ret) {
+ cnss_fatal_err("Failed to download RDDM image, err = %d\n",
+ ret);
+ cnss_pci_dump_debug_reg(pci_priv);
+ return;
+ }
+
+ fw_image = pci_priv->mhi_ctrl->fbc_image;
+ rddm_image = pci_priv->mhi_ctrl->rddm_image;
+ dump_data->nentries = 0;
+
+ if (!dump_seg) {
+ cnss_pr_warn("FW image dump collection not setup");
+ goto skip_dump;
+ }
+
+ cnss_pr_dbg("Collect FW image dump segment, nentries %d\n",
+ fw_image->entries);
+
+ for (i = 0; i < fw_image->entries; i++) {
+ cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
+ fw_image->mhi_buf[i].buf,
+ fw_image->mhi_buf[i].dma_addr,
+ fw_image->mhi_buf[i].len);
+ dump_seg++;
+ }
+
+ dump_data->nentries += fw_image->entries;
+
+ cnss_pr_dbg("Collect RDDM image dump segment, nentries %d\n",
+ rddm_image->entries);
+
+ for (i = 0; i < rddm_image->entries; i++) {
+ cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
+ rddm_image->mhi_buf[i].buf,
+ rddm_image->mhi_buf[i].dma_addr,
+ rddm_image->mhi_buf[i].len);
+ dump_seg++;
+ }
+
+ dump_data->nentries += rddm_image->entries;
+
+ cnss_mhi_dump_sfr(pci_priv);
+
+ cnss_pr_dbg("Collect remote heap dump segment\n");
+
+ for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
+ if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
+ cnss_pci_add_dump_seg(pci_priv, dump_seg,
+ CNSS_FW_REMOTE_HEAP, j,
+ fw_mem[i].va, fw_mem[i].pa,
+ fw_mem[i].size);
+ dump_seg++;
+ dump_data->nentries++;
+ j++;
+ }
+ }
+
+ if (dump_data->nentries > 0)
+ plat_priv->ramdump_info_v2.dump_data_valid = true;
+
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_DONE);
+
+skip_dump:
+ complete(&plat_priv->rddm_complete);
+}
+
+void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_dump_seg *dump_seg =
+ plat_priv->ramdump_info_v2.dump_data_vaddr;
+ struct image_info *fw_image, *rddm_image;
+ struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+ int i, j;
+
+ if (!dump_seg)
+ return;
+
+ fw_image = pci_priv->mhi_ctrl->fbc_image;
+ rddm_image = pci_priv->mhi_ctrl->rddm_image;
+
+ for (i = 0; i < fw_image->entries; i++) {
+ cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
+ fw_image->mhi_buf[i].buf,
+ fw_image->mhi_buf[i].dma_addr,
+ fw_image->mhi_buf[i].len);
+ dump_seg++;
+ }
+
+ for (i = 0; i < rddm_image->entries; i++) {
+ cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
+ rddm_image->mhi_buf[i].buf,
+ rddm_image->mhi_buf[i].dma_addr,
+ rddm_image->mhi_buf[i].len);
+ dump_seg++;
+ }
+
+ for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
+ if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
+ cnss_pci_remove_dump_seg(pci_priv, dump_seg,
+ CNSS_FW_REMOTE_HEAP, j,
+ fw_mem[i].va, fw_mem[i].pa,
+ fw_mem[i].size);
+ dump_seg++;
+ j++;
+ }
+ }
+
+ plat_priv->ramdump_info_v2.dump_data.nentries = 0;
+ plat_priv->ramdump_info_v2.dump_data_valid = false;
+}
+
+void cnss_pci_device_crashed(struct cnss_pci_data *pci_priv)
+{
+ if (!pci_priv)
+ return;
+
+ cnss_device_crashed(&pci_priv->pci_dev->dev);
+}
+
+static int cnss_mhi_pm_runtime_get(struct mhi_controller *mhi_ctrl)
+{
+ struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
+
+ return cnss_pci_pm_runtime_get(pci_priv, RTPM_ID_MHI);
+}
+
+static void cnss_mhi_pm_runtime_put_noidle(struct mhi_controller *mhi_ctrl)
+{
+ struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
+
+ cnss_pci_pm_runtime_put_noidle(pci_priv, RTPM_ID_MHI);
+}
+
+void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
+ char *prefix_name, char *name)
+{
+ struct cnss_plat_data *plat_priv;
+
+ if (!pci_priv)
+ return;
+
+ plat_priv = pci_priv->plat_priv;
+
+ if (!plat_priv->use_fw_path_with_prefix) {
+ scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
+ return;
+ }
+
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
+ QCA6390_PATH_PREFIX "%s", name);
+ break;
+ case QCA6490_DEVICE_ID:
+ scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
+ QCA6490_PATH_PREFIX "%s", name);
+ break;
+ case WCN7850_DEVICE_ID:
+ scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
+ WCN7850_PATH_PREFIX "%s", name);
+ break;
+ default:
+ scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
+ break;
+ }
+
+ cnss_pr_dbg("FW name added with prefix: %s\n", prefix_name);
+}
+
+static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
+
+ plat_priv->device_version.family_number = mhi_ctrl->family_number;
+ plat_priv->device_version.device_number = mhi_ctrl->device_number;
+ plat_priv->device_version.major_version = mhi_ctrl->major_version;
+ plat_priv->device_version.minor_version = mhi_ctrl->minor_version;
+
+ cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n",
+ plat_priv->device_version.family_number,
+ plat_priv->device_version.device_number,
+ plat_priv->device_version.major_version,
+ plat_priv->device_version.minor_version);
+
+ /* Only keep lower 4 bits as real device major version */
+ plat_priv->device_version.major_version &= DEVICE_MAJOR_VERSION_MASK;
+
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
+ cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
+ pci_priv->device_id,
+ plat_priv->device_version.major_version);
+ return -EINVAL;
+ }
+ cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
+ FW_V2_FILE_NAME);
+ snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
+ FW_V2_FILE_NAME);
+ break;
+ case QCA6490_DEVICE_ID:
+ switch (plat_priv->device_version.major_version) {
+ case FW_V2_NUMBER:
+ cnss_pci_add_fw_prefix_name(pci_priv,
+ plat_priv->firmware_name,
+ FW_V2_FILE_NAME);
+ snprintf(plat_priv->fw_fallback_name,
+ MAX_FIRMWARE_NAME_LEN,
+ FW_V2_FILE_NAME);
+ break;
+ default:
+ cnss_pci_add_fw_prefix_name(pci_priv,
+ plat_priv->firmware_name,
+ DEFAULT_FW_FILE_NAME);
+ snprintf(plat_priv->fw_fallback_name,
+ MAX_FIRMWARE_NAME_LEN,
+ DEFAULT_FW_FILE_NAME);
+ break;
+ }
+ break;
+ default:
+ cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
+ DEFAULT_FW_FILE_NAME);
+ snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
+ DEFAULT_FW_FILE_NAME);
+ break;
+ }
+
+ cnss_pr_dbg("FW name is %s, FW fallback name is %s\n",
+ plat_priv->firmware_name, plat_priv->fw_fallback_name);
+
+ return 0;
+}
+
+static char *cnss_mhi_notify_status_to_str(enum mhi_callback status)
+{
+ switch (status) {
+ case MHI_CB_IDLE:
+ return "IDLE";
+ case MHI_CB_EE_RDDM:
+ return "RDDM";
+ case MHI_CB_SYS_ERROR:
+ return "SYS_ERROR";
+ case MHI_CB_FATAL_ERROR:
+ return "FATAL_ERROR";
+ case MHI_CB_EE_MISSION_MODE:
+ return "MISSION_MODE";
+#if IS_ENABLED(CONFIG_MHI_BUS_MISC)
+ case MHI_CB_FALLBACK_IMG:
+ return "FW_FALLBACK";
+#endif
+ default:
+ return "UNKNOWN";
+ }
+};
+
+static void cnss_dev_rddm_timeout_hdlr(struct timer_list *t)
+{
+ struct cnss_pci_data *pci_priv =
+ from_timer(pci_priv, t, dev_rddm_timer);
+
+ if (!pci_priv)
+ return;
+
+ cnss_fatal_err("Timeout waiting for RDDM notification\n");
+
+ if (mhi_get_exec_env(pci_priv->mhi_ctrl) == MHI_EE_PBL)
+ cnss_pr_err("Unable to collect ramdumps due to abrupt reset\n");
+
+ cnss_mhi_debug_reg_dump(pci_priv);
+
+ cnss_schedule_recovery(&pci_priv->pci_dev->dev, CNSS_REASON_TIMEOUT);
+}
+
+static void cnss_boot_debug_timeout_hdlr(struct timer_list *t)
+{
+ struct cnss_pci_data *pci_priv =
+ from_timer(pci_priv, t, boot_debug_timer);
+
+ if (!pci_priv)
+ return;
+
+ if (cnss_pci_check_link_status(pci_priv))
+ return;
+
+ if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
+ return;
+
+ if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
+ return;
+
+ if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE))
+ return;
+
+ cnss_pr_dbg("Dump MHI/PBL/SBL debug data every %ds during MHI power on\n",
+ BOOT_DEBUG_TIMEOUT_MS / 1000);
+ cnss_mhi_debug_reg_dump(pci_priv);
+ cnss_pci_dump_bl_sram_mem(pci_priv);
+
+ mod_timer(&pci_priv->boot_debug_timer,
+ jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
+}
+
+static void cnss_mhi_notify_status(struct mhi_controller *mhi_ctrl,
+ enum mhi_callback reason)
+{
+ struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
+ struct cnss_plat_data *plat_priv;
+ enum cnss_recovery_reason cnss_reason;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL");
+ return;
+ }
+
+ plat_priv = pci_priv->plat_priv;
+
+ if (reason != MHI_CB_IDLE)
+ cnss_pr_dbg("MHI status cb is called with reason %s(%d)\n",
+ cnss_mhi_notify_status_to_str(reason), reason);
+
+ switch (reason) {
+ case MHI_CB_IDLE:
+ case MHI_CB_EE_MISSION_MODE:
+ return;
+ case MHI_CB_FATAL_ERROR:
+ cnss_ignore_qmi_failure(true);
+ set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
+ del_timer(&plat_priv->fw_boot_timer);
+ cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
+ cnss_reason = CNSS_REASON_DEFAULT;
+ break;
+ case MHI_CB_SYS_ERROR:
+ cnss_ignore_qmi_failure(true);
+ set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
+ del_timer(&plat_priv->fw_boot_timer);
+ mod_timer(&pci_priv->dev_rddm_timer,
+ jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
+ cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
+ return;
+ case MHI_CB_EE_RDDM:
+ cnss_ignore_qmi_failure(true);
+ set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
+ del_timer(&plat_priv->fw_boot_timer);
+ del_timer(&pci_priv->dev_rddm_timer);
+ cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
+ cnss_reason = CNSS_REASON_RDDM;
+ break;
+#if IS_ENABLED(CONFIG_MHI_BUS_MISC)
+ case MHI_CB_FALLBACK_IMG:
+ plat_priv->use_fw_path_with_prefix = false;
+ cnss_pci_update_fw_name(pci_priv);
+ return;
+#endif
+ default:
+ cnss_pr_err("Unsupported MHI status cb reason: %d\n", reason);
+ return;
+ }
+
+ cnss_schedule_recovery(&pci_priv->pci_dev->dev, cnss_reason);
+}
+
+static int cnss_pci_get_mhi_msi(struct cnss_pci_data *pci_priv)
+{
+ int ret, num_vectors, i;
+ u32 user_base_data, base_vector;
+ int *irq;
+
+ ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
+ MHI_MSI_NAME, &num_vectors,
+ &user_base_data, &base_vector);
+ if (ret)
+ return ret;
+
+ cnss_pr_dbg("Number of assigned MSI for MHI is %d, base vector is %d\n",
+ num_vectors, base_vector);
+
+ irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
+ if (!irq)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vectors; i++)
+ irq[i] = cnss_get_msi_irq(&pci_priv->pci_dev->dev,
+ base_vector + i);
+
+ pci_priv->mhi_ctrl->irq = irq;
+ pci_priv->mhi_ctrl->nr_irqs = num_vectors;
+
+ return 0;
+}
+
+static int cnss_mhi_bw_scale(struct mhi_controller *mhi_ctrl,
+ struct mhi_link_info *link_info)
+{
+ struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ int ret = 0;
+
+ cnss_pr_dbg("Setting link speed:0x%x, width:0x%x\n",
+ link_info->target_link_speed,
+ link_info->target_link_width);
+
+ /* It has to set target link speed here before setting link bandwidth
+ * when device requests link speed change. This can avoid setting link
+ * bandwidth getting rejected if requested link speed is higher than
+ * current one.
+ */
+ ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num,
+ link_info->target_link_speed);
+ if (ret)
+ cnss_pr_err("Failed to set target link speed to 0x%x, err = %d\n",
+ link_info->target_link_speed, ret);
+
+ ret = cnss_pci_set_link_bandwidth(pci_priv,
+ link_info->target_link_speed,
+ link_info->target_link_width);
+
+ if (ret) {
+ cnss_pr_err("Failed to set link bandwidth, err = %d\n", ret);
+ return ret;
+ }
+
+ pci_priv->def_link_speed = link_info->target_link_speed;
+ pci_priv->def_link_width = link_info->target_link_width;
+
+ return 0;
+}
+
+static int cnss_mhi_read_reg(struct mhi_controller *mhi_ctrl,
+ void __iomem *addr, u32 *out)
+{
+ struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
+
+ u32 tmp = readl_relaxed(addr);
+
+ /* Unexpected value, query the link status */
+ if (PCI_INVALID_READ(tmp) &&
+ cnss_pci_check_link_status(pci_priv))
+ return -EIO;
+
+ *out = tmp;
+
+ return 0;
+}
+
+static void cnss_mhi_write_reg(struct mhi_controller *mhi_ctrl,
+ void __iomem *addr, u32 val)
+{
+ writel_relaxed(val, addr);
+}
+
+static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ struct mhi_controller *mhi_ctrl;
+
+ if (pci_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ mhi_ctrl = mhi_alloc_controller();
+ if (!mhi_ctrl) {
+ cnss_pr_err("Invalid MHI controller context\n");
+ return -EINVAL;
+ }
+
+ pci_priv->mhi_ctrl = mhi_ctrl;
+ mhi_ctrl->cntrl_dev = &pci_dev->dev;
+
+ mhi_ctrl->fw_image = plat_priv->firmware_name;
+#if IS_ENABLED(CONFIG_MHI_BUS_MISC)
+ mhi_ctrl->fallback_fw_image = plat_priv->fw_fallback_name;
+#endif
+
+ mhi_ctrl->regs = pci_priv->bar;
+ mhi_ctrl->reg_len = pci_resource_len(pci_priv->pci_dev, PCI_BAR_NUM);
+ cnss_pr_dbg("BAR starts at %pa, length is %x\n",
+ &pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM),
+ mhi_ctrl->reg_len);
+
+ ret = cnss_pci_get_mhi_msi(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to get MSI for MHI, err = %d\n", ret);
+ goto free_mhi_ctrl;
+ }
+
+ if (pci_priv->smmu_s1_enable) {
+ mhi_ctrl->iova_start = pci_priv->smmu_iova_start;
+ mhi_ctrl->iova_stop = pci_priv->smmu_iova_start +
+ pci_priv->smmu_iova_len;
+ } else {
+ mhi_ctrl->iova_start = 0;
+ mhi_ctrl->iova_stop = pci_priv->dma_bit_mask;
+ }
+
+ mhi_ctrl->status_cb = cnss_mhi_notify_status;
+ mhi_ctrl->runtime_get = cnss_mhi_pm_runtime_get;
+ mhi_ctrl->runtime_put = cnss_mhi_pm_runtime_put_noidle;
+ mhi_ctrl->read_reg = cnss_mhi_read_reg;
+ mhi_ctrl->write_reg = cnss_mhi_write_reg;
+
+ mhi_ctrl->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
+ if (!mhi_ctrl->rddm_size)
+ mhi_ctrl->rddm_size = RAMDUMP_SIZE_DEFAULT;
+ mhi_ctrl->sbl_size = SZ_512K;
+ mhi_ctrl->seg_len = SZ_512K;
+ mhi_ctrl->fbc_download = true;
+
+ ret = mhi_register_controller(mhi_ctrl, &cnss_mhi_config);
+ if (ret) {
+ cnss_pr_err("Failed to register to MHI bus, err = %d\n", ret);
+ goto free_mhi_irq;
+ }
+
+ /* BW scale CB needs to be set after registering MHI per requirement */
+ cnss_mhi_controller_set_bw_scale_cb(pci_priv, cnss_mhi_bw_scale);
+
+ ret = cnss_pci_update_fw_name(pci_priv);
+ if (ret)
+ goto unreg_mhi;
+
+ return 0;
+
+unreg_mhi:
+ mhi_unregister_controller(mhi_ctrl);
+free_mhi_irq:
+ kfree(mhi_ctrl->irq);
+free_mhi_ctrl:
+ mhi_free_controller(mhi_ctrl);
+
+ return ret;
+}
+
+static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
+{
+ struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
+
+ if (pci_priv->device_id == QCA6174_DEVICE_ID)
+ return;
+
+ mhi_unregister_controller(mhi_ctrl);
+ kfree(mhi_ctrl->irq);
+ mhi_free_controller(mhi_ctrl);
+}
+
+static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv)
+{
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ pci_priv->misc_reg_dev_mask = REG_MASK_QCA6390;
+ pci_priv->wcss_reg = wcss_reg_access_seq;
+ pci_priv->pcie_reg = pcie_reg_access_seq;
+ pci_priv->wlaon_reg = wlaon_reg_access_seq;
+ pci_priv->syspm_reg = syspm_reg_access_seq;
+
+ /* Configure WDOG register with specific value so that we can
+ * know if HW is in the process of WDOG reset recovery or not
+ * when reading the registers.
+ */
+ cnss_pci_reg_write
+ (pci_priv,
+ QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG,
+ QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL);
+ break;
+ case QCA6490_DEVICE_ID:
+ pci_priv->misc_reg_dev_mask = REG_MASK_QCA6490;
+ pci_priv->wlaon_reg = wlaon_reg_access_seq;
+ break;
+ default:
+ return;
+ }
+}
+
+//#if !IS_ENABLED(CONFIG_ARCH_QCOM)
+#ifdef CONFIG_WCN_GOOGLE
+static irqreturn_t cnss_pci_wake_handler(int irq, void *data)
+{
+ struct cnss_pci_data *pci_priv = data;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ pci_priv->wake_counter++;
+ cnss_pr_dbg("WLAN PCI wake IRQ (%u) is asserted #%u\n",
+ pci_priv->wake_irq, pci_priv->wake_counter);
+
+ /* Make sure abort current suspend */
+ cnss_pm_stay_awake(plat_priv);
+ cnss_pm_relax(plat_priv);
+ /* Above two pm* API calls will abort system suspend only when
+ * plat_dev->dev->ws is initiated by device_init_wakeup() API, and
+ * calling pm_system_wakeup() is just to guarantee system suspend
+ * can be aborted if it is not initiated in any case.
+ */
+ pm_system_wakeup();
+
+ if (cnss_pci_get_monitor_wake_intr(pci_priv) &&
+ cnss_pci_get_auto_suspended(pci_priv)) {
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ cnss_pci_pm_request_resume(pci_priv);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * cnss_pci_wake_gpio_init() - Setup PCI wake GPIO for WLAN
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function initializes WLAN PCI wake GPIO and corresponding
+ * interrupt. It should be used in non-MSM platforms whose PCIe
+ * root complex driver doesn't handle the GPIO.
+ *
+ * Return: 0 for success or skip, negative value for error
+ */
+static int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct device *dev = &plat_priv->plat_dev->dev;
+ int ret = 0;
+
+ pci_priv->wake_gpio = of_get_named_gpio(dev->of_node,
+ "wlan-pci-wake-gpio", 0);
+ if (pci_priv->wake_gpio < 0)
+ goto out;
+
+ cnss_pr_dbg("Get PCI wake GPIO (%d) from device node\n",
+ pci_priv->wake_gpio);
+
+ ret = gpio_request(pci_priv->wake_gpio, "wlan_pci_wake_gpio");
+ if (ret) {
+ cnss_pr_err("Failed to request PCI wake GPIO, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ gpio_direction_input(pci_priv->wake_gpio);
+ pci_priv->wake_irq = gpio_to_irq(pci_priv->wake_gpio);
+
+ ret = request_irq(pci_priv->wake_irq, cnss_pci_wake_handler,
+ IRQF_TRIGGER_FALLING, "wlan_pci_wake_irq", pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to request PCI wake IRQ, err = %d\n", ret);
+ goto free_gpio;
+ }
+
+ ret = enable_irq_wake(pci_priv->wake_irq);
+ if (ret) {
+ cnss_pr_err("Failed to enable PCI wake IRQ, err = %d\n", ret);
+ goto free_irq;
+ }
+
+ return 0;
+
+free_irq:
+ free_irq(pci_priv->wake_irq, pci_priv);
+free_gpio:
+ gpio_free(pci_priv->wake_gpio);
+out:
+ return ret;
+}
+
+static void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
+{
+ if (pci_priv->wake_gpio < 0)
+ return;
+
+ disable_irq_wake(pci_priv->wake_irq);
+ free_irq(pci_priv->wake_irq, pci_priv);
+ gpio_free(pci_priv->wake_gpio);
+}
+#else
+static int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
+{
+ return 0;
+}
+
+static void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
+{
+}
+#endif
+
+#ifndef CONFIG_WCN_GOOGLE
+#if IS_ENABLED(CONFIG_ARCH_QCOM)
+/**
+ * cnss_pci_of_reserved_mem_device_init() - Assign reserved memory region
+ * to given PCI device
+ * @pci_priv: driver PCI bus context pointer
+ *
+ * This function shall call corresponding of_reserved_mem_device* API to
+ * assign reserved memory region to PCI device based on where the memory is
+ * defined and attached to (platform device of_node or PCI device of_node)
+ * in device tree.
+ *
+ * Return: 0 for success, negative value for error
+ */
+static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
+{
+ struct device *dev_pci = &pci_priv->pci_dev->dev;
+ int ret;
+
+ /* Use of_reserved_mem_device_init_by_idx() if reserved memory is
+ * attached to platform device of_node.
+ */
+ ret = of_reserved_mem_device_init(dev_pci);
+ if (ret)
+ cnss_pr_err("Failed to init reserved mem device, err = %d\n",
+ ret);
+ if (dev_pci->cma_area)
+ cnss_pr_dbg("CMA area is %s\n",
+ cma_get_name(dev_pci->cma_area));
+
+ return ret;
+}
+#else
+static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
+{
+ return 0;
+}
+#endif
+#endif
+/* Setting to use this cnss_pm_domain ops will let PM framework override the
+ * ops from dev->bus->pm which is pci_dev_pm_ops from pci-driver.c. This ops
+ * has to take care everything device driver needed which is currently done
+ * from pci_dev_pm_ops.
+ */
+static struct dev_pm_domain cnss_pm_domain = {
+ .ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
+ cnss_pci_resume_noirq)
+ SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend,
+ cnss_pci_runtime_resume,
+ cnss_pci_runtime_idle)
+ }
+};
+
+static int cnss_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ int ret = 0;
+ struct cnss_pci_data *pci_priv;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct device *dev = &pci_dev->dev;
+
+ cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x\n",
+ id->vendor, pci_dev->device);
+#if CONFIG_WCN_GOOGLE
+ exynos_pcie_rc_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI, GOOGLE_RC_ID);
+#endif
+ pci_priv = devm_kzalloc(dev, sizeof(*pci_priv), GFP_KERNEL);
+ if (!pci_priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pci_priv->pci_link_state = PCI_LINK_UP;
+ pci_priv->plat_priv = plat_priv;
+ pci_priv->pci_dev = pci_dev;
+ pci_priv->pci_device_id = id;
+ pci_priv->device_id = pci_dev->device;
+ cnss_set_pci_priv(pci_dev, pci_priv);
+ plat_priv->device_id = pci_dev->device;
+ plat_priv->bus_priv = pci_priv;
+ mutex_init(&pci_priv->bus_lock);
+ if (plat_priv->use_pm_domain)
+ dev->pm_domain = &cnss_pm_domain;
+
+#ifndef CONFIG_WCN_GOOGLE
+ cnss_pci_of_reserved_mem_device_init(pci_priv);
+#else
+ ret = of_reserved_mem_device_init_by_idx(dev, (&plat_priv->plat_dev->dev)->of_node, 0);
+ if (ret)
+ cnss_pr_err("Failed to init reserved mem device, err = %d\n", ret);
+ if (dev->cma_area)
+ cnss_pr_dbg("CMA area\n");
+#endif
+
+ ret = cnss_register_subsys(plat_priv);
+ if (ret)
+ goto reset_ctx;
+
+ ret = cnss_register_ramdump(plat_priv);
+ if (ret)
+ goto unregister_subsys;
+
+ ret = cnss_pci_init_smmu(pci_priv);
+ if (ret)
+ goto unregister_ramdump;
+
+ ret = cnss_reg_pci_event(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
+ goto deinit_smmu;
+ }
+
+ ret = cnss_pci_enable_bus(pci_priv);
+ if (ret)
+ goto dereg_pci_event;
+
+ ret = cnss_pci_enable_msi(pci_priv);
+ if (ret)
+ goto disable_bus;
+
+ ret = cnss_pci_register_mhi(pci_priv);
+ if (ret)
+ goto disable_msi;
+
+ switch (pci_dev->device) {
+ case QCA6174_DEVICE_ID:
+ pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
+ &pci_priv->revision_id);
+ break;
+ case QCA6290_DEVICE_ID:
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false);
+ timer_setup(&pci_priv->dev_rddm_timer,
+ cnss_dev_rddm_timeout_hdlr, 0);
+ timer_setup(&pci_priv->boot_debug_timer,
+ cnss_boot_debug_timeout_hdlr, 0);
+ INIT_DELAYED_WORK(&pci_priv->time_sync_work,
+ cnss_pci_time_sync_work_hdlr);
+ cnss_pci_get_link_status(pci_priv);
+ cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, false);
+ cnss_pci_wake_gpio_init(pci_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown PCI device found: 0x%x\n",
+ pci_dev->device);
+ ret = -ENODEV;
+ goto unreg_mhi;
+ }
+
+ cnss_pci_config_regs(pci_priv);
+ if (EMULATION_HW)
+ goto out;
+ ret = cnss_suspend_pci_link(pci_priv);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+ cnss_power_off_device(plat_priv);
+
+ return 0;
+
+unreg_mhi:
+ cnss_pci_unregister_mhi(pci_priv);
+disable_msi:
+ cnss_pci_disable_msi(pci_priv);
+disable_bus:
+ cnss_pci_disable_bus(pci_priv);
+dereg_pci_event:
+ cnss_dereg_pci_event(pci_priv);
+deinit_smmu:
+ cnss_pci_deinit_smmu(pci_priv);
+unregister_ramdump:
+ cnss_unregister_ramdump(plat_priv);
+unregister_subsys:
+ cnss_unregister_subsys(plat_priv);
+reset_ctx:
+ plat_priv->bus_priv = NULL;
+out:
+ return ret;
+}
+
+static void cnss_pci_remove(struct pci_dev *pci_dev)
+{
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv =
+ cnss_bus_dev_to_plat_priv(&pci_dev->dev);
+
+ cnss_pci_free_m3_mem(pci_priv);
+ cnss_pci_free_fw_mem(pci_priv);
+ cnss_pci_free_qdss_mem(pci_priv);
+
+ switch (pci_dev->device) {
+ case QCA6290_DEVICE_ID:
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ case WCN7850_DEVICE_ID:
+ cnss_pci_wake_gpio_deinit(pci_priv);
+ del_timer(&pci_priv->boot_debug_timer);
+ del_timer(&pci_priv->dev_rddm_timer);
+ break;
+ default:
+ break;
+ }
+
+ cnss_pci_unregister_mhi(pci_priv);
+ cnss_pci_disable_msi(pci_priv);
+ cnss_pci_disable_bus(pci_priv);
+ cnss_dereg_pci_event(pci_priv);
+ cnss_pci_deinit_smmu(pci_priv);
+ if (plat_priv) {
+ cnss_unregister_ramdump(plat_priv);
+ cnss_unregister_subsys(plat_priv);
+ plat_priv->bus_priv = NULL;
+ } else {
+ cnss_pr_err("Plat_priv is null, Unable to unregister ramdump,subsys\n");
+ }
+}
+
+static const struct pci_device_id cnss_pci_id_table[] = {
+ { QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
+ { QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
+ { QCA6390_VENDOR_ID, QCA6390_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
+ { QCA6490_VENDOR_ID, QCA6490_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
+ { WCN7850_VENDOR_ID, WCN7850_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
+
+static const struct dev_pm_ops cnss_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
+ cnss_pci_resume_noirq)
+ SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume,
+ cnss_pci_runtime_idle)
+};
+
+struct pci_driver cnss_pci_driver = {
+ .name = "cnss_pci",
+ .id_table = cnss_pci_id_table,
+ .probe = cnss_pci_probe,
+ .remove = cnss_pci_remove,
+ .driver = {
+ .pm = &cnss_pm_ops,
+ },
+};
+#ifndef CONFIG_WCN_GOOGLE
+static int cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
+{
+ int ret, retry = 0;
+
+ /* Always set initial target PCIe link speed to Gen2 for QCA6490 device
+ * since there may be link issues if it boots up with Gen3 link speed.
+ * Device is able to change it later at any time. It will be rejected
+ * if requested speed is higher than the one specified in PCIe DT.
+ */
+ if (plat_priv->device_id == QCA6490_DEVICE_ID) {
+ ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
+ PCI_EXP_LNKSTA_CLS_5_0GB);
+ if (ret && ret != -EPROBE_DEFER)
+ cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen2, err = %d\n",
+ rc_num, ret);
+ }
+
+ cnss_pr_dbg("Trying to enumerate with PCIe RC%x\n", rc_num);
+retry:
+ ret = _cnss_pci_enumerate(plat_priv, rc_num);
+ if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ cnss_pr_dbg("PCIe RC driver is not ready, defer probe\n");
+ goto out;
+ }
+ cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n",
+ rc_num, ret);
+ if (retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
+ cnss_pr_dbg("Retry PCI link training #%d\n", retry);
+ goto retry;
+ } else {
+ goto out;
+ }
+ }
+
+ plat_priv->rc_num = rc_num;
+
+out:
+ return ret;
+}
+#endif
+int cnss_pci_init(struct cnss_plat_data *plat_priv)
+{
+#ifndef CONFIG_WCN_GOOGLE
+ struct device *dev = &plat_priv->plat_dev->dev;
+ const __be32 *prop;
+ int ret = 0, prop_len = 0, rc_count, i;
+
+ prop = of_get_property(dev->of_node, "qcom,wlan-rc-num", &prop_len);
+ if (!prop || !prop_len) {
+ cnss_pr_err("Failed to get PCIe RC number from DT\n");
+ goto out;
+ }
+
+ rc_count = prop_len / sizeof(__be32);
+ for (i = 0; i < rc_count; i++) {
+ ret = cnss_pci_enumerate(plat_priv, be32_to_cpup(&prop[i]));
+ if (!ret)
+ break;
+ else if (ret == -EPROBE_DEFER || (ret && i == rc_count - 1))
+ goto out;
+ }
+#else
+ int ret = 0;
+ cnss_pr_err("%s: Enter\n", __func__);
+ ret = exynos_pcie_pm_resume(GOOGLE_RC_ID);
+ cnss_pr_err("ret of exynos_pcie_pm_resume: %d\n", ret);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ cnss_pr_dbg("PCIe RC driver is not ready, defer probe\n");
+ else
+ cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n",
+ GOOGLE_RC_ID, ret);
+ goto out;
+ }
+#endif
+ ret = pci_register_driver(&cnss_pci_driver);
+ cnss_pr_err("ret of pci_register_driver: %d\n", ret);
+ if (ret) {
+ cnss_pr_err("Failed to register to PCI framework, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (!plat_priv->bus_priv) {
+ cnss_pr_err("Failed to probe PCI driver\n");
+ ret = -ENODEV;
+ goto unreg_pci;
+ }
+
+ return 0;
+
+unreg_pci:
+ pci_unregister_driver(&cnss_pci_driver);
+out:
+ return ret;
+}
+
+void cnss_pci_deinit(struct cnss_plat_data *plat_priv)
+{
+ pci_unregister_driver(&cnss_pci_driver);
+}
diff --git a/cnss2/pci.h b/cnss2/pci.h
new file mode 100644
index 0000000..99cda6d
--- /dev/null
+++ b/cnss2/pci.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+
+#ifndef _CNSS_PCI_H
+#define _CNSS_PCI_H
+
+#include <linux/iommu.h>
+#include <linux/mhi.h>
+#if IS_ENABLED(CONFIG_MHI_BUS_MISC)
+#include <linux/mhi_misc.h>
+#endif
+#if IS_ENABLED(CONFIG_PCI_MSM)
+#include <linux/msm_pcie.h>
+#endif
+#include <linux/pci.h>
+
+#include "main.h"
+
+enum cnss_mhi_state {
+ CNSS_MHI_INIT,
+ CNSS_MHI_DEINIT,
+ CNSS_MHI_POWER_ON,
+ CNSS_MHI_POWERING_OFF,
+ CNSS_MHI_POWER_OFF,
+ CNSS_MHI_FORCE_POWER_OFF,
+ CNSS_MHI_SUSPEND,
+ CNSS_MHI_RESUME,
+ CNSS_MHI_TRIGGER_RDDM,
+ CNSS_MHI_RDDM,
+ CNSS_MHI_RDDM_DONE,
+};
+
+enum pci_link_status {
+ PCI_GEN1,
+ PCI_GEN2,
+ PCI_DEF,
+};
+
+enum cnss_rtpm_id {
+ RTPM_ID_CNSS,
+ RTPM_ID_MHI,
+ RTPM_ID_MAX,
+};
+
+enum cnss_pci_reg_dev_mask {
+ REG_MASK_QCA6390,
+ REG_MASK_QCA6490,
+ REG_MASK_WCN7850,
+};
+
+struct cnss_msi_user {
+ char *name;
+ int num_vectors;
+ u32 base_vector;
+};
+
+struct cnss_msi_config {
+ int total_vectors;
+ int total_users;
+ struct cnss_msi_user *users;
+};
+
+struct cnss_pci_reg {
+ char *name;
+ u32 offset;
+};
+
+struct cnss_pci_debug_reg {
+ u32 offset;
+ u32 val;
+};
+
+struct cnss_misc_reg {
+ unsigned long dev_mask;
+ u8 wr;
+ u32 offset;
+ u32 val;
+};
+
+struct cnss_pm_stats {
+ atomic_t runtime_get;
+ atomic_t runtime_put;
+ atomic_t runtime_get_id[RTPM_ID_MAX];
+ atomic_t runtime_put_id[RTPM_ID_MAX];
+ u64 runtime_get_timestamp_id[RTPM_ID_MAX];
+ u64 runtime_put_timestamp_id[RTPM_ID_MAX];
+};
+
+struct cnss_pci_data {
+ struct pci_dev *pci_dev;
+ struct cnss_plat_data *plat_priv;
+ const struct pci_device_id *pci_device_id;
+ u32 device_id;
+ u16 revision_id;
+ u64 dma_bit_mask;
+ struct cnss_wlan_driver *driver_ops;
+ u8 pci_link_state;
+ u8 pci_link_down_ind;
+ struct pci_saved_state *saved_state;
+ struct pci_saved_state *default_state;
+#if IS_ENABLED(CONFIG_PCI_MSM)
+ struct msm_pcie_register_event msm_pci_event;
+#endif
+#if defined(CONFIG_WCN_GOOGLE)
+ struct exynos_pcie_register_event exynos_pci_event;
+#endif
+ struct cnss_pm_stats pm_stats;
+ atomic_t auto_suspended;
+ atomic_t drv_connected;
+ u8 drv_connected_last;
+ u32 qmi_send_usage_count;
+ u16 def_link_speed;
+ u16 def_link_width;
+ u16 cur_link_speed;
+ int wake_gpio;
+ int wake_irq;
+ u32 wake_counter;
+ u8 monitor_wake_intr;
+ struct iommu_domain *iommu_domain;
+ u8 smmu_s1_enable;
+ dma_addr_t smmu_iova_start;
+ size_t smmu_iova_len;
+ dma_addr_t smmu_iova_ipa_start;
+ dma_addr_t smmu_iova_ipa_current;
+ size_t smmu_iova_ipa_len;
+ void __iomem *bar;
+ struct cnss_msi_config *msi_config;
+ u32 msi_ep_base_data;
+ struct mhi_controller *mhi_ctrl;
+ unsigned long mhi_state;
+ u32 remap_window;
+ struct timer_list dev_rddm_timer;
+ struct timer_list boot_debug_timer;
+ struct delayed_work time_sync_work;
+ u8 disable_pc;
+ struct mutex bus_lock; /* mutex for suspend and resume bus */
+ struct cnss_pci_debug_reg *debug_reg;
+ struct cnss_misc_reg *wcss_reg;
+ struct cnss_misc_reg *pcie_reg;
+ struct cnss_misc_reg *wlaon_reg;
+ struct cnss_misc_reg *syspm_reg;
+ unsigned long misc_reg_dev_mask;
+ u8 iommu_geometry;
+ bool drv_supported;
+};
+
+static inline void cnss_set_pci_priv(struct pci_dev *pci_dev, void *data)
+{
+ pci_set_drvdata(pci_dev, data);
+}
+
+static inline struct cnss_pci_data *cnss_get_pci_priv(struct pci_dev *pci_dev)
+{
+ return pci_get_drvdata(pci_dev);
+}
+
+static inline struct cnss_plat_data *cnss_pci_priv_to_plat_priv(void *bus_priv)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ return pci_priv->plat_priv;
+}
+
+static inline void cnss_pci_set_monitor_wake_intr(void *bus_priv, bool val)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ pci_priv->monitor_wake_intr = val;
+}
+
+static inline bool cnss_pci_get_monitor_wake_intr(void *bus_priv)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ return pci_priv->monitor_wake_intr;
+}
+
+static inline void cnss_pci_set_auto_suspended(void *bus_priv, int val)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ atomic_set(&pci_priv->auto_suspended, val);
+}
+
+static inline int cnss_pci_get_auto_suspended(void *bus_priv)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ return atomic_read(&pci_priv->auto_suspended);
+}
+
+static inline void cnss_pci_set_drv_connected(void *bus_priv, int val)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ atomic_set(&pci_priv->drv_connected, val);
+}
+
+static inline int cnss_pci_get_drv_connected(void *bus_priv)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ return atomic_read(&pci_priv->drv_connected);
+}
+
+int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv);
+int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv);
+int cnss_resume_pci_link(struct cnss_pci_data *pci_priv);
+int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv);
+int cnss_pci_init(struct cnss_plat_data *plat_priv);
+void cnss_pci_deinit(struct cnss_plat_data *plat_priv);
+void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
+ char *prefix_name, char *name);
+int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv);
+int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv);
+void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv);
+int cnss_pci_load_m3(struct cnss_pci_data *pci_priv);
+int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv);
+void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic);
+void cnss_pci_device_crashed(struct cnss_pci_data *pci_priv);
+void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv);
+u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv);
+int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv);
+int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv);
+int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv);
+void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv);
+int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv);
+int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv);
+int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv);
+int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv);
+int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv);
+int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv);
+int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv, void *data);
+int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv);
+int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
+ int modem_current_status);
+void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv);
+int cnss_pci_pm_request_resume(struct cnss_pci_data *pci_priv);
+int cnss_pci_pm_runtime_resume(struct cnss_pci_data *pci_priv);
+int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv,
+ enum cnss_rtpm_id id);
+int cnss_pci_pm_runtime_get_sync(struct cnss_pci_data *pci_priv,
+ enum cnss_rtpm_id id);
+void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv,
+ enum cnss_rtpm_id id);
+int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv,
+ enum cnss_rtpm_id id);
+void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv,
+ enum cnss_rtpm_id id);
+void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv);
+int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
+ enum cnss_driver_status status);
+int cnss_pci_call_driver_uevent(struct cnss_pci_data *pci_priv,
+ enum cnss_driver_status status, void *data);
+int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv);
+int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv);
+int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv);
+int cnss_pci_debug_reg_read(struct cnss_pci_data *pci_priv, u32 offset,
+ u32 *val, bool raw_access);
+int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
+ u32 val, bool raw_access);
+int cnss_pci_get_iova(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size);
+int cnss_pci_get_iova_ipa(struct cnss_pci_data *pci_priv, u64 *addr,
+ u64 *size);
+
+#endif /* _CNSS_PCI_H */
diff --git a/cnss2/power.c b/cnss2/power.c
new file mode 100644
index 0000000..8d5ce9f
--- /dev/null
+++ b/cnss2/power.c
@@ -0,0 +1,1283 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#if IS_ENABLED(CONFIG_MSM_QMP)
+#include <linux/mailbox/qmp.h>
+#endif
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+#if IS_ENABLED(CONFIG_QCOM_COMMAND_DB)
+#include <soc/qcom/cmd-db.h>
+#endif
+
+#include "main.h"
+#include "debug.h"
+#include "bus.h"
+
+#if IS_ENABLED(CONFIG_ARCH_QCOM)
+static struct cnss_vreg_cfg cnss_vreg_list[] = {
+ {"vdd-wlan-core", 1300000, 1300000, 0, 0, 0},
+ {"vdd-wlan-io", 1800000, 1800000, 0, 0, 0},
+ {"vdd-wlan-xtal-aon", 0, 0, 0, 0, 0},
+ {"vdd-wlan-xtal", 1800000, 1800000, 0, 2, 0},
+ {"vdd-wlan", 0, 0, 0, 0, 0},
+ {"vdd-wlan-ctrl1", 0, 0, 0, 0, 0},
+ {"vdd-wlan-ctrl2", 0, 0, 0, 0, 0},
+ {"vdd-wlan-sp2t", 2700000, 2700000, 0, 0, 0},
+ {"wlan-ant-switch", 1800000, 1800000, 0, 0, 0},
+ {"wlan-soc-swreg", 1200000, 1200000, 0, 0, 0},
+ {"vdd-wlan-aon", 950000, 950000, 0, 0, 0},
+ {"vdd-wlan-dig", 950000, 952000, 0, 0, 0},
+ {"vdd-wlan-rfa1", 1900000, 1900000, 0, 0, 0},
+ {"vdd-wlan-rfa2", 1350000, 1350000, 0, 0, 0},
+ {"vdd-wlan-en", 0, 0, 0, 10, 0},
+};
+
+static struct cnss_clk_cfg cnss_clk_list[] = {
+ {"rf_clk", 0, 0},
+};
+#else
+static struct cnss_vreg_cfg cnss_vreg_list[] = {
+};
+
+static struct cnss_clk_cfg cnss_clk_list[] = {
+};
+#endif
+
+#define CNSS_VREG_INFO_SIZE ARRAY_SIZE(cnss_vreg_list)
+#define CNSS_CLK_INFO_SIZE ARRAY_SIZE(cnss_clk_list)
+#define MAX_PROP_SIZE 32
+
+#define BOOTSTRAP_GPIO "qcom,enable-bootstrap-gpio"
+#define BOOTSTRAP_ACTIVE "bootstrap_active"
+#define WLAN_EN_GPIO "wlan-en-gpio"
+#define BT_EN_GPIO "qcom,bt-en-gpio"
+#define XO_CLK_GPIO "qcom,xo-clk-gpio"
+#define WLAN_EN_ACTIVE "wlan_en_active"
+#define WLAN_EN_SLEEP "wlan_en_sleep"
+
+#define BOOTSTRAP_DELAY 1000
+#define WLAN_ENABLE_DELAY 1000
+
+#define TCS_CMD_DATA_ADDR_OFFSET 0x4
+#define TCS_OFFSET 0xC8
+#define TCS_CMD_OFFSET 0x10
+#define MAX_TCS_NUM 8
+#define MAX_TCS_CMD_NUM 5
+#define BT_CXMX_VOLTAGE_MV 950
+#define CNSS_MBOX_MSG_MAX_LEN 64
+#define CNSS_MBOX_TIMEOUT_MS 1000
+
+/**
+ * enum cnss_vreg_param: Voltage regulator TCS param
+ * @CNSS_VREG_VOLTAGE: Provides voltage level to be configured in TCS
+ * @CNSS_VREG_MODE: Regulator mode
+ * @CNSS_VREG_TCS_ENABLE: Set Voltage regulator enable config in TCS
+ */
+enum cnss_vreg_param {
+ CNSS_VREG_VOLTAGE,
+ CNSS_VREG_MODE,
+ CNSS_VREG_ENABLE,
+};
+
+/**
+ * enum cnss_tcs_seq: TCS sequence ID for trigger
+ * CNSS_TCS_UP_SEQ: TCS Sequence based on up trigger / Wake TCS
+ * CNSS_TCS_DOWN_SEQ: TCS Sequence based on down trigger / Sleep TCS
+ * CNSS_TCS_ALL_SEQ: Update for both up and down triggers
+ */
+enum cnss_tcs_seq {
+ CNSS_TCS_UP_SEQ,
+ CNSS_TCS_DOWN_SEQ,
+ CNSS_TCS_ALL_SEQ,
+};
+
+
+static int cnss_get_vreg_single(struct cnss_plat_data *plat_priv,
+ struct cnss_vreg_info *vreg)
+{
+ int ret = 0;
+ struct device *dev;
+ struct regulator *reg;
+ const __be32 *prop;
+ char prop_name[MAX_PROP_SIZE] = {0};
+ int len;
+
+ dev = &plat_priv->plat_dev->dev;
+ reg = devm_regulator_get_optional(dev, vreg->cfg.name);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ if (ret == -ENODEV)
+ return ret;
+ else if (ret == -EPROBE_DEFER)
+ cnss_pr_info("EPROBE_DEFER for regulator: %s\n",
+ vreg->cfg.name);
+ else
+ cnss_pr_err("Failed to get regulator %s, err = %d\n",
+ vreg->cfg.name, ret);
+ return ret;
+ }
+
+ vreg->reg = reg;
+
+ snprintf(prop_name, MAX_PROP_SIZE, "qcom,%s-config",
+ vreg->cfg.name);
+
+ prop = of_get_property(dev->of_node, prop_name, &len);
+ if (!prop || len != (5 * sizeof(__be32))) {
+ cnss_pr_dbg("Property %s %s, use default\n", prop_name,
+ prop ? "invalid format" : "doesn't exist");
+ } else {
+ vreg->cfg.min_uv = be32_to_cpup(&prop[0]);
+ vreg->cfg.max_uv = be32_to_cpup(&prop[1]);
+ vreg->cfg.load_ua = be32_to_cpup(&prop[2]);
+ vreg->cfg.delay_us = be32_to_cpup(&prop[3]);
+ vreg->cfg.need_unvote = be32_to_cpup(&prop[4]);
+ }
+
+ cnss_pr_dbg("Got regulator: %s, min_uv: %u, max_uv: %u, load_ua: %u, delay_us: %u, need_unvote: %u\n",
+ vreg->cfg.name, vreg->cfg.min_uv,
+ vreg->cfg.max_uv, vreg->cfg.load_ua,
+ vreg->cfg.delay_us, vreg->cfg.need_unvote);
+
+ return 0;
+}
+
+static void cnss_put_vreg_single(struct cnss_plat_data *plat_priv,
+ struct cnss_vreg_info *vreg)
+{
+ struct device *dev = &plat_priv->plat_dev->dev;
+
+ cnss_pr_dbg("Put regulator: %s\n", vreg->cfg.name);
+ devm_regulator_put(vreg->reg);
+ devm_kfree(dev, vreg);
+}
+
+static int cnss_vreg_on_single(struct cnss_vreg_info *vreg)
+{
+ int ret = 0;
+
+ if (vreg->enabled) {
+ cnss_pr_dbg("Regulator %s is already enabled\n",
+ vreg->cfg.name);
+ return 0;
+ }
+
+ cnss_pr_dbg("Regulator %s is being enabled\n", vreg->cfg.name);
+
+ if (vreg->cfg.min_uv != 0 && vreg->cfg.max_uv != 0) {
+ ret = regulator_set_voltage(vreg->reg,
+ vreg->cfg.min_uv,
+ vreg->cfg.max_uv);
+
+ if (ret) {
+ cnss_pr_err("Failed to set voltage for regulator %s, min_uv: %u, max_uv: %u, err = %d\n",
+ vreg->cfg.name, vreg->cfg.min_uv,
+ vreg->cfg.max_uv, ret);
+ goto out;
+ }
+ }
+
+ if (vreg->cfg.load_ua) {
+ ret = regulator_set_load(vreg->reg,
+ vreg->cfg.load_ua);
+
+ if (ret < 0) {
+ cnss_pr_err("Failed to set load for regulator %s, load: %u, err = %d\n",
+ vreg->cfg.name, vreg->cfg.load_ua,
+ ret);
+ goto out;
+ }
+ }
+
+ if (vreg->cfg.delay_us)
+ udelay(vreg->cfg.delay_us);
+
+ ret = regulator_enable(vreg->reg);
+ if (ret) {
+ cnss_pr_err("Failed to enable regulator %s, err = %d\n",
+ vreg->cfg.name, ret);
+ goto out;
+ }
+ vreg->enabled = true;
+
+out:
+ return ret;
+}
+
+static int cnss_vreg_unvote_single(struct cnss_vreg_info *vreg)
+{
+ int ret = 0;
+
+ if (!vreg->enabled) {
+ cnss_pr_dbg("Regulator %s is already disabled\n",
+ vreg->cfg.name);
+ return 0;
+ }
+
+ cnss_pr_dbg("Removing vote for Regulator %s\n", vreg->cfg.name);
+
+ if (vreg->cfg.load_ua) {
+ ret = regulator_set_load(vreg->reg, 0);
+ if (ret < 0)
+ cnss_pr_err("Failed to set load for regulator %s, err = %d\n",
+ vreg->cfg.name, ret);
+ }
+
+ if (vreg->cfg.min_uv != 0 && vreg->cfg.max_uv != 0) {
+ ret = regulator_set_voltage(vreg->reg, 0,
+ vreg->cfg.max_uv);
+ if (ret)
+ cnss_pr_err("Failed to set voltage for regulator %s, err = %d\n",
+ vreg->cfg.name, ret);
+ }
+
+ return ret;
+}
+
+static int cnss_vreg_off_single(struct cnss_vreg_info *vreg)
+{
+ int ret = 0;
+
+ if (!vreg->enabled) {
+ cnss_pr_dbg("Regulator %s is already disabled\n",
+ vreg->cfg.name);
+ return 0;
+ }
+
+ cnss_pr_dbg("Regulator %s is being disabled\n",
+ vreg->cfg.name);
+
+ ret = regulator_disable(vreg->reg);
+ if (ret)
+ cnss_pr_err("Failed to disable regulator %s, err = %d\n",
+ vreg->cfg.name, ret);
+
+ if (vreg->cfg.load_ua) {
+ ret = regulator_set_load(vreg->reg, 0);
+ if (ret < 0)
+ cnss_pr_err("Failed to set load for regulator %s, err = %d\n",
+ vreg->cfg.name, ret);
+ }
+
+ if (vreg->cfg.min_uv != 0 && vreg->cfg.max_uv != 0) {
+ ret = regulator_set_voltage(vreg->reg, 0,
+ vreg->cfg.max_uv);
+ if (ret)
+ cnss_pr_err("Failed to set voltage for regulator %s, err = %d\n",
+ vreg->cfg.name, ret);
+ }
+ vreg->enabled = false;
+
+ return ret;
+}
+
+static struct cnss_vreg_cfg *get_vreg_list(u32 *vreg_list_size,
+ enum cnss_vreg_type type)
+{
+ switch (type) {
+ case CNSS_VREG_PRIM:
+ *vreg_list_size = CNSS_VREG_INFO_SIZE;
+ return cnss_vreg_list;
+ default:
+ cnss_pr_err("Unsupported vreg type 0x%x\n", type);
+ *vreg_list_size = 0;
+ return NULL;
+ }
+}
+
+static int cnss_get_vreg(struct cnss_plat_data *plat_priv,
+ struct list_head *vreg_list,
+ struct cnss_vreg_cfg *vreg_cfg,
+ u32 vreg_list_size)
+{
+ int ret = 0;
+ int i;
+ struct cnss_vreg_info *vreg;
+ struct device *dev = &plat_priv->plat_dev->dev;
+
+ if (!list_empty(vreg_list)) {
+ cnss_pr_dbg("Vregs have already been updated\n");
+ return 0;
+ }
+
+ for (i = 0; i < vreg_list_size; i++) {
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ memcpy(&vreg->cfg, &vreg_cfg[i], sizeof(vreg->cfg));
+ ret = cnss_get_vreg_single(plat_priv, vreg);
+ if (ret != 0) {
+ if (ret == -ENODEV) {
+ devm_kfree(dev, vreg);
+ continue;
+ } else {
+ devm_kfree(dev, vreg);
+ return ret;
+ }
+ }
+ list_add_tail(&vreg->list, vreg_list);
+ }
+
+ return 0;
+}
+
+static void cnss_put_vreg(struct cnss_plat_data *plat_priv,
+ struct list_head *vreg_list)
+{
+ struct cnss_vreg_info *vreg;
+
+ while (!list_empty(vreg_list)) {
+ vreg = list_first_entry(vreg_list,
+ struct cnss_vreg_info, list);
+ list_del(&vreg->list);
+ if (IS_ERR_OR_NULL(vreg->reg))
+ continue;
+ cnss_put_vreg_single(plat_priv, vreg);
+ }
+}
+
+static int cnss_vreg_on(struct cnss_plat_data *plat_priv,
+ struct list_head *vreg_list)
+{
+ struct cnss_vreg_info *vreg;
+ int ret = 0;
+
+ list_for_each_entry(vreg, vreg_list, list) {
+ if (IS_ERR_OR_NULL(vreg->reg))
+ continue;
+ ret = cnss_vreg_on_single(vreg);
+ if (ret)
+ break;
+ }
+
+ if (!ret)
+ return 0;
+
+ list_for_each_entry_continue_reverse(vreg, vreg_list, list) {
+ if (IS_ERR_OR_NULL(vreg->reg) || !vreg->enabled)
+ continue;
+
+ cnss_vreg_off_single(vreg);
+ }
+
+ return ret;
+}
+
+static int cnss_vreg_off(struct cnss_plat_data *plat_priv,
+ struct list_head *vreg_list)
+{
+ struct cnss_vreg_info *vreg;
+
+ list_for_each_entry_reverse(vreg, vreg_list, list) {
+ if (IS_ERR_OR_NULL(vreg->reg))
+ continue;
+
+ cnss_vreg_off_single(vreg);
+ }
+
+ return 0;
+}
+
+static int cnss_vreg_unvote(struct cnss_plat_data *plat_priv,
+ struct list_head *vreg_list)
+{
+ struct cnss_vreg_info *vreg;
+
+ list_for_each_entry_reverse(vreg, vreg_list, list) {
+ if (IS_ERR_OR_NULL(vreg->reg))
+ continue;
+
+ if (vreg->cfg.need_unvote)
+ cnss_vreg_unvote_single(vreg);
+ }
+
+ return 0;
+}
+
+int cnss_get_vreg_type(struct cnss_plat_data *plat_priv,
+ enum cnss_vreg_type type)
+{
+ struct cnss_vreg_cfg *vreg_cfg;
+ u32 vreg_list_size = 0;
+ int ret = 0;
+
+ vreg_cfg = get_vreg_list(&vreg_list_size, type);
+ if (!vreg_cfg)
+ return -EINVAL;
+
+ switch (type) {
+ case CNSS_VREG_PRIM:
+ ret = cnss_get_vreg(plat_priv, &plat_priv->vreg_list,
+ vreg_cfg, vreg_list_size);
+ break;
+ default:
+ cnss_pr_err("Unsupported vreg type 0x%x\n", type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+void cnss_put_vreg_type(struct cnss_plat_data *plat_priv,
+ enum cnss_vreg_type type)
+{
+ switch (type) {
+ case CNSS_VREG_PRIM:
+ cnss_put_vreg(plat_priv, &plat_priv->vreg_list);
+ break;
+ default:
+ return;
+ }
+}
+
+int cnss_vreg_on_type(struct cnss_plat_data *plat_priv,
+ enum cnss_vreg_type type)
+{
+ int ret = 0;
+
+ switch (type) {
+ case CNSS_VREG_PRIM:
+ ret = cnss_vreg_on(plat_priv, &plat_priv->vreg_list);
+ break;
+ default:
+ cnss_pr_err("Unsupported vreg type 0x%x\n", type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+int cnss_vreg_off_type(struct cnss_plat_data *plat_priv,
+ enum cnss_vreg_type type)
+{
+ int ret = 0;
+
+ switch (type) {
+ case CNSS_VREG_PRIM:
+ ret = cnss_vreg_off(plat_priv, &plat_priv->vreg_list);
+ break;
+ default:
+ cnss_pr_err("Unsupported vreg type 0x%x\n", type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+int cnss_vreg_unvote_type(struct cnss_plat_data *plat_priv,
+ enum cnss_vreg_type type)
+{
+ int ret = 0;
+
+ switch (type) {
+ case CNSS_VREG_PRIM:
+ ret = cnss_vreg_unvote(plat_priv, &plat_priv->vreg_list);
+ break;
+ default:
+ cnss_pr_err("Unsupported vreg type 0x%x\n", type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cnss_get_clk_single(struct cnss_plat_data *plat_priv,
+ struct cnss_clk_info *clk_info)
+{
+ struct device *dev = &plat_priv->plat_dev->dev;
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, clk_info->cfg.name);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ if (clk_info->cfg.required)
+ cnss_pr_err("Failed to get clock %s, err = %d\n",
+ clk_info->cfg.name, ret);
+ else
+ cnss_pr_dbg("Failed to get optional clock %s, err = %d\n",
+ clk_info->cfg.name, ret);
+ return ret;
+ }
+
+ clk_info->clk = clk;
+ cnss_pr_dbg("Got clock: %s, freq: %u\n",
+ clk_info->cfg.name, clk_info->cfg.freq);
+
+ return 0;
+}
+
+static void cnss_put_clk_single(struct cnss_plat_data *plat_priv,
+ struct cnss_clk_info *clk_info)
+{
+ struct device *dev = &plat_priv->plat_dev->dev;
+
+ cnss_pr_dbg("Put clock: %s\n", clk_info->cfg.name);
+ devm_clk_put(dev, clk_info->clk);
+}
+
+static int cnss_clk_on_single(struct cnss_clk_info *clk_info)
+{
+ int ret;
+
+ if (clk_info->enabled) {
+ cnss_pr_dbg("Clock %s is already enabled\n",
+ clk_info->cfg.name);
+ return 0;
+ }
+
+ cnss_pr_dbg("Clock %s is being enabled\n", clk_info->cfg.name);
+
+ if (clk_info->cfg.freq) {
+ ret = clk_set_rate(clk_info->clk, clk_info->cfg.freq);
+ if (ret) {
+ cnss_pr_err("Failed to set frequency %u for clock %s, err = %d\n",
+ clk_info->cfg.freq, clk_info->cfg.name,
+ ret);
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(clk_info->clk);
+ if (ret) {
+ cnss_pr_err("Failed to enable clock %s, err = %d\n",
+ clk_info->cfg.name, ret);
+ return ret;
+ }
+
+ clk_info->enabled = true;
+
+ return 0;
+}
+
+static int cnss_clk_off_single(struct cnss_clk_info *clk_info)
+{
+ if (!clk_info->enabled) {
+ cnss_pr_dbg("Clock %s is already disabled\n",
+ clk_info->cfg.name);
+ return 0;
+ }
+
+ cnss_pr_dbg("Clock %s is being disabled\n", clk_info->cfg.name);
+
+ clk_disable_unprepare(clk_info->clk);
+ clk_info->enabled = false;
+
+ return 0;
+}
+
+int cnss_get_clk(struct cnss_plat_data *plat_priv)
+{
+ struct device *dev;
+ struct list_head *clk_list;
+ struct cnss_clk_info *clk_info;
+ int ret, i;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ dev = &plat_priv->plat_dev->dev;
+ clk_list = &plat_priv->clk_list;
+
+ if (!list_empty(clk_list)) {
+ cnss_pr_dbg("Clocks have already been updated\n");
+ return 0;
+ }
+
+ for (i = 0; i < CNSS_CLK_INFO_SIZE; i++) {
+ clk_info = devm_kzalloc(dev, sizeof(*clk_info), GFP_KERNEL);
+ if (!clk_info) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ memcpy(&clk_info->cfg, &cnss_clk_list[i],
+ sizeof(clk_info->cfg));
+ ret = cnss_get_clk_single(plat_priv, clk_info);
+ if (ret != 0) {
+ if (clk_info->cfg.required) {
+ devm_kfree(dev, clk_info);
+ goto cleanup;
+ } else {
+ devm_kfree(dev, clk_info);
+ continue;
+ }
+ }
+ list_add_tail(&clk_info->list, clk_list);
+ }
+
+ return 0;
+
+cleanup:
+ while (!list_empty(clk_list)) {
+ clk_info = list_first_entry(clk_list, struct cnss_clk_info,
+ list);
+ list_del(&clk_info->list);
+ if (IS_ERR_OR_NULL(clk_info->clk))
+ continue;
+ cnss_put_clk_single(plat_priv, clk_info);
+ devm_kfree(dev, clk_info);
+ }
+
+ return ret;
+}
+
+void cnss_put_clk(struct cnss_plat_data *plat_priv)
+{
+ struct device *dev;
+ struct list_head *clk_list;
+ struct cnss_clk_info *clk_info;
+
+ if (!plat_priv)
+ return;
+
+ dev = &plat_priv->plat_dev->dev;
+ clk_list = &plat_priv->clk_list;
+
+ while (!list_empty(clk_list)) {
+ clk_info = list_first_entry(clk_list, struct cnss_clk_info,
+ list);
+ list_del(&clk_info->list);
+ if (IS_ERR_OR_NULL(clk_info->clk))
+ continue;
+ cnss_put_clk_single(plat_priv, clk_info);
+ devm_kfree(dev, clk_info);
+ }
+}
+
+static int cnss_clk_on(struct cnss_plat_data *plat_priv,
+ struct list_head *clk_list)
+{
+ struct cnss_clk_info *clk_info;
+ int ret = 0;
+
+ list_for_each_entry(clk_info, clk_list, list) {
+ if (IS_ERR_OR_NULL(clk_info->clk))
+ continue;
+ ret = cnss_clk_on_single(clk_info);
+ if (ret)
+ break;
+ }
+
+ if (!ret)
+ return 0;
+
+ list_for_each_entry_continue_reverse(clk_info, clk_list, list) {
+ if (IS_ERR_OR_NULL(clk_info->clk))
+ continue;
+
+ cnss_clk_off_single(clk_info);
+ }
+
+ return ret;
+}
+
+static int cnss_clk_off(struct cnss_plat_data *plat_priv,
+ struct list_head *clk_list)
+{
+ struct cnss_clk_info *clk_info;
+
+ list_for_each_entry_reverse(clk_info, clk_list, list) {
+ if (IS_ERR_OR_NULL(clk_info->clk))
+ continue;
+
+ cnss_clk_off_single(clk_info);
+ }
+
+ return 0;
+}
+
+int cnss_get_pinctrl(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct device *dev;
+ struct cnss_pinctrl_info *pinctrl_info;
+
+ dev = &plat_priv->plat_dev->dev;
+ pinctrl_info = &plat_priv->pinctrl_info;
+
+ pinctrl_info->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR_OR_NULL(pinctrl_info->pinctrl)) {
+ ret = PTR_ERR(pinctrl_info->pinctrl);
+ cnss_pr_err("Failed to get pinctrl, err = %d\n", ret);
+ goto out;
+ }
+
+ if (of_find_property(dev->of_node, BOOTSTRAP_GPIO, NULL)) {
+ pinctrl_info->bootstrap_active =
+ pinctrl_lookup_state(pinctrl_info->pinctrl,
+ BOOTSTRAP_ACTIVE);
+ if (IS_ERR_OR_NULL(pinctrl_info->bootstrap_active)) {
+ ret = PTR_ERR(pinctrl_info->bootstrap_active);
+ cnss_pr_err("Failed to get bootstrap active state, err = %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ if (of_find_property(dev->of_node, WLAN_EN_GPIO, NULL)) {
+ pinctrl_info->wlan_en_active =
+ pinctrl_lookup_state(pinctrl_info->pinctrl,
+ WLAN_EN_ACTIVE);
+ if (IS_ERR_OR_NULL(pinctrl_info->wlan_en_active)) {
+ ret = PTR_ERR(pinctrl_info->wlan_en_active);
+ cnss_pr_err("Failed to get wlan_en active state, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ pinctrl_info->wlan_en_sleep =
+ pinctrl_lookup_state(pinctrl_info->pinctrl,
+ WLAN_EN_SLEEP);
+ if (IS_ERR_OR_NULL(pinctrl_info->wlan_en_sleep)) {
+ ret = PTR_ERR(pinctrl_info->wlan_en_sleep);
+ cnss_pr_err("Failed to get wlan_en sleep state, err = %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ /* Added for QCA6490 PMU delayed WLAN_EN_GPIO */
+ if (of_find_property(dev->of_node, BT_EN_GPIO, NULL)) {
+ pinctrl_info->bt_en_gpio = of_get_named_gpio(dev->of_node,
+ BT_EN_GPIO, 0);
+ cnss_pr_dbg("BT GPIO: %d\n", pinctrl_info->bt_en_gpio);
+ } else {
+ pinctrl_info->bt_en_gpio = -EINVAL;
+ }
+
+ /* Added for QCA6490 to minimize XO CLK selection leakage prevention */
+ if (of_find_property(dev->of_node, XO_CLK_GPIO, NULL)) {
+ pinctrl_info->xo_clk_gpio = of_get_named_gpio(dev->of_node,
+ XO_CLK_GPIO, 0);
+ cnss_pr_dbg("QCA6490 XO_CLK GPIO: %d\n",
+ pinctrl_info->xo_clk_gpio);
+ cnss_set_feature_list(plat_priv, BOOTSTRAP_CLOCK_SELECT_V01);
+ } else {
+ pinctrl_info->xo_clk_gpio = -EINVAL;
+ }
+ return 0;
+out:
+ return ret;
+}
+
+#define CNSS_XO_CLK_RETRY_COUNT_MAX 5
+static void cnss_set_xo_clk_gpio_state(struct cnss_plat_data *plat_priv,
+ bool enable)
+{
+ int xo_clk_gpio = plat_priv->pinctrl_info.xo_clk_gpio, retry = 0, ret;
+
+ if (xo_clk_gpio < 0 || plat_priv->device_id != QCA6490_DEVICE_ID)
+ return;
+
+retry_gpio_req:
+ ret = gpio_request(xo_clk_gpio, "XO_CLK_GPIO");
+ if (ret) {
+ if (retry++ < CNSS_XO_CLK_RETRY_COUNT_MAX) {
+ /* wait for ~(10 - 20) ms */
+ usleep_range(10000, 20000);
+ goto retry_gpio_req;
+ }
+ }
+
+ if (ret) {
+ cnss_pr_err("QCA6490 XO CLK Gpio request failed\n");
+ return;
+ }
+
+ if (enable) {
+ gpio_direction_output(xo_clk_gpio, 1);
+ /*XO CLK must be asserted for some time before WLAN_EN */
+ usleep_range(100, 200);
+ } else {
+ /* Assert XO CLK ~(2-5)ms before off for valid latch in HW */
+ usleep_range(2000, 5000);
+ gpio_direction_output(xo_clk_gpio, 0);
+ }
+
+ gpio_free(xo_clk_gpio);
+}
+
+static int cnss_select_pinctrl_state(struct cnss_plat_data *plat_priv,
+ bool state)
+{
+ int ret = 0;
+ struct cnss_pinctrl_info *pinctrl_info;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ pinctrl_info = &plat_priv->pinctrl_info;
+
+ if (state) {
+ if (!IS_ERR_OR_NULL(pinctrl_info->bootstrap_active)) {
+ ret = pinctrl_select_state
+ (pinctrl_info->pinctrl,
+ pinctrl_info->bootstrap_active);
+ if (ret) {
+ cnss_pr_err("Failed to select bootstrap active state, err = %d\n",
+ ret);
+ goto out;
+ }
+ udelay(BOOTSTRAP_DELAY);
+ }
+ cnss_set_xo_clk_gpio_state(plat_priv, true);
+ if (!IS_ERR_OR_NULL(pinctrl_info->wlan_en_active)) {
+ ret = pinctrl_select_state
+ (pinctrl_info->pinctrl,
+ pinctrl_info->wlan_en_active);
+ if (ret) {
+ cnss_pr_err("Failed to select wlan_en active state, err = %d\n",
+ ret);
+ goto out;
+ }
+ udelay(WLAN_ENABLE_DELAY);
+ }
+ cnss_set_xo_clk_gpio_state(plat_priv, false);
+ } else {
+ if (!IS_ERR_OR_NULL(pinctrl_info->wlan_en_sleep)) {
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->wlan_en_sleep);
+ if (ret) {
+ cnss_pr_err("Failed to select wlan_en sleep state, err = %d\n",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+ cnss_pr_dbg("%s WLAN_EN GPIO successfully\n",
+ state ? "Assert" : "De-assert");
+
+ return 0;
+out:
+ return ret;
+}
+
+/**
+ * cnss_select_pinctrl_enable - select WLAN_GPIO for Active pinctrl status
+ * @plat_priv: Platform private data structure pointer
+ *
+ * For QCA6490, PMU requires minimum 100ms delay between BT_EN_GPIO off and
+ * WLAN_EN_GPIO on. This is done to avoid power up issues.
+ *
+ * Return: Status of pinctrl select operation. 0 - Success.
+ */
+static int cnss_select_pinctrl_enable(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0, bt_en_gpio = plat_priv->pinctrl_info.bt_en_gpio;
+ u8 wlan_en_state = 0;
+
+ if (bt_en_gpio < 0 || plat_priv->device_id != QCA6490_DEVICE_ID)
+ goto set_wlan_en;
+
+ if (gpio_get_value(bt_en_gpio)) {
+ cnss_pr_dbg("BT_EN_GPIO State: On\n");
+ ret = cnss_select_pinctrl_state(plat_priv, true);
+ if (!ret)
+ return ret;
+ wlan_en_state = 1;
+ }
+ if (!gpio_get_value(bt_en_gpio)) {
+ cnss_pr_dbg("BT_EN_GPIO State: Off. Delay WLAN_GPIO enable\n");
+ /* check for BT_EN_GPIO down race during above operation */
+ if (wlan_en_state) {
+ cnss_pr_dbg("Reset WLAN_EN as BT got turned off during enable\n");
+ cnss_select_pinctrl_state(plat_priv, false);
+ wlan_en_state = 0;
+ }
+ /* 100 ms delay for BT_EN and WLAN_EN QCA6490 PMU sequencing */
+ msleep(100);
+ }
+set_wlan_en:
+ if (!wlan_en_state)
+ ret = cnss_select_pinctrl_state(plat_priv, true);
+ return ret;
+}
+
+int cnss_power_on_device(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ if (plat_priv->powered_on) {
+ cnss_pr_dbg("Already powered up");
+ return 0;
+ }
+
+ ret = cnss_vreg_on_type(plat_priv, CNSS_VREG_PRIM);
+ if (ret) {
+ cnss_pr_err("Failed to turn on vreg, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_clk_on(plat_priv, &plat_priv->clk_list);
+ if (ret) {
+ cnss_pr_err("Failed to turn on clocks, err = %d\n", ret);
+ goto vreg_off;
+ }
+
+ ret = cnss_select_pinctrl_enable(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to select pinctrl state, err = %d\n", ret);
+ goto clk_off;
+ }
+
+ plat_priv->powered_on = true;
+
+ return 0;
+
+clk_off:
+ cnss_clk_off(plat_priv, &plat_priv->clk_list);
+vreg_off:
+ cnss_vreg_off_type(plat_priv, CNSS_VREG_PRIM);
+out:
+ return ret;
+}
+
+void cnss_power_off_device(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv->powered_on) {
+ cnss_pr_dbg("Already powered down");
+ return;
+ }
+
+ cnss_select_pinctrl_state(plat_priv, false);
+ cnss_clk_off(plat_priv, &plat_priv->clk_list);
+ cnss_vreg_off_type(plat_priv, CNSS_VREG_PRIM);
+ plat_priv->powered_on = false;
+}
+
+bool cnss_is_device_powered_on(struct cnss_plat_data *plat_priv)
+{
+ return plat_priv->powered_on;
+}
+
+void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv)
+{
+ unsigned long pin_status = 0;
+
+ set_bit(CNSS_WLAN_EN, &pin_status);
+ set_bit(CNSS_PCIE_TXN, &pin_status);
+ set_bit(CNSS_PCIE_TXP, &pin_status);
+ set_bit(CNSS_PCIE_RXN, &pin_status);
+ set_bit(CNSS_PCIE_RXP, &pin_status);
+ set_bit(CNSS_PCIE_REFCLKN, &pin_status);
+ set_bit(CNSS_PCIE_REFCLKP, &pin_status);
+ set_bit(CNSS_PCIE_RST, &pin_status);
+
+ plat_priv->pin_result.host_pin_result = pin_status;
+}
+
+#if IS_ENABLED(CONFIG_QCOM_COMMAND_DB)
+static int cnss_cmd_db_ready(struct cnss_plat_data *plat_priv)
+{
+ return cmd_db_ready();
+}
+
+static u32 cnss_cmd_db_read_addr(struct cnss_plat_data *plat_priv,
+ const char *res_id)
+{
+ return cmd_db_read_addr(res_id);
+}
+#else
+static int cnss_cmd_db_ready(struct cnss_plat_data *plat_priv)
+{
+ return -EOPNOTSUPP;
+}
+
+static u32 cnss_cmd_db_read_addr(struct cnss_plat_data *plat_priv,
+ const char *res_id)
+{
+ return 0;
+}
+#endif
+
+int cnss_get_tcs_info(struct cnss_plat_data *plat_priv)
+{
+ struct platform_device *plat_dev = plat_priv->plat_dev;
+ struct resource *res;
+ resource_size_t addr_len;
+ void __iomem *tcs_cmd_base_addr;
+ int ret = 0;
+
+ res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, "tcs_cmd");
+ if (!res) {
+ cnss_pr_dbg("TCS CMD address is not present for CPR\n");
+ goto out;
+ }
+
+ plat_priv->tcs_info.cmd_base_addr = res->start;
+ addr_len = resource_size(res);
+ cnss_pr_dbg("TCS CMD base address is %pa with length %pa\n",
+ &plat_priv->tcs_info.cmd_base_addr, &addr_len);
+
+ tcs_cmd_base_addr = devm_ioremap(&plat_dev->dev, res->start, addr_len);
+ if (!tcs_cmd_base_addr) {
+ ret = -EINVAL;
+ cnss_pr_err("Failed to map TCS CMD address, err = %d\n",
+ ret);
+ goto out;
+ }
+ plat_priv->tcs_info.cmd_base_addr_io = tcs_cmd_base_addr;
+ return 0;
+out:
+ return ret;
+}
+
+int cnss_get_cpr_info(struct cnss_plat_data *plat_priv)
+{
+ struct platform_device *plat_dev = plat_priv->plat_dev;
+ struct cnss_cpr_info *cpr_info = &plat_priv->cpr_info;
+ const char *cmd_db_name;
+ u32 cpr_pmic_addr = 0;
+ int ret = 0;
+
+ if (plat_priv->tcs_info.cmd_base_addr == 0) {
+ cnss_pr_dbg("TCS CMD not configured\n");
+ return 0;
+ }
+
+ ret = of_property_read_string(plat_dev->dev.of_node,
+ "qcom,cmd_db_name", &cmd_db_name);
+ if (ret) {
+ cnss_pr_dbg("CommandDB name is not present for CPR\n");
+ goto out;
+ }
+
+ ret = cnss_cmd_db_ready(plat_priv);
+ if (ret) {
+ cnss_pr_err("CommandDB is not ready, err = %d\n", ret);
+ goto out;
+ }
+
+ cpr_pmic_addr = cnss_cmd_db_read_addr(plat_priv, cmd_db_name);
+ if (cpr_pmic_addr > 0) {
+ cpr_info->cpr_pmic_addr = cpr_pmic_addr;
+ cnss_pr_dbg("Get CPR PMIC address 0x%x from %s\n",
+ cpr_info->cpr_pmic_addr, cmd_db_name);
+ } else {
+ cnss_pr_err("CPR PMIC address is not available for %s\n",
+ cmd_db_name);
+ ret = -EINVAL;
+ goto out;
+ }
+ return 0;
+out:
+ return ret;
+}
+
+int cnss_aop_mbox_init(struct cnss_plat_data *plat_priv)
+{
+ struct mbox_client *mbox = &plat_priv->mbox_client_data;
+ struct mbox_chan *chan;
+ int ret = 0;
+
+ mbox->dev = &plat_priv->plat_dev->dev;
+ mbox->tx_block = true;
+ mbox->tx_tout = CNSS_MBOX_TIMEOUT_MS;
+ mbox->knows_txdone = false;
+
+ plat_priv->mbox_chan = NULL;
+ chan = mbox_request_channel(mbox, 0);
+ if (IS_ERR(chan)) {
+ cnss_pr_err("Failed to get mbox channel\n");
+ return PTR_ERR(chan);
+ }
+ plat_priv->mbox_chan = chan;
+
+ ret = of_property_read_string(plat_priv->plat_dev->dev.of_node,
+ "qcom,vreg_ol_cpr",
+ &plat_priv->vreg_ol_cpr);
+ if (ret)
+ cnss_pr_dbg("Vreg for OL CPR not configured\n");
+
+ ret = of_property_read_string(plat_priv->plat_dev->dev.of_node,
+ "qcom,vreg_ipa",
+ &plat_priv->vreg_ipa);
+ if (ret)
+ cnss_pr_dbg("Volt regulator for Int Power Amp not configured\n");
+
+ cnss_pr_dbg("Mbox channel initialized\n");
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_MSM_QMP)
+static int cnss_aop_set_vreg_param(struct cnss_plat_data *plat_priv,
+ const char *vreg_name,
+ enum cnss_vreg_param param,
+ enum cnss_tcs_seq seq, int val)
+{
+ struct qmp_pkt pkt;
+ char mbox_msg[CNSS_MBOX_MSG_MAX_LEN];
+ static const char * const vreg_param_str[] = {"v", "m", "e"};
+ static const char *const tcs_seq_str[] = {"upval", "dwnval", "enable"};
+ int ret = 0;
+
+ if (param > CNSS_VREG_ENABLE || seq > CNSS_TCS_ALL_SEQ || !vreg_name)
+ return -EINVAL;
+
+ snprintf(mbox_msg, CNSS_MBOX_MSG_MAX_LEN,
+ "{class: wlan_pdc, res: %s.%s, %s: %d}", vreg_name,
+ vreg_param_str[param], tcs_seq_str[seq], val);
+
+ cnss_pr_dbg("Sending AOP Mbox msg: %s\n", mbox_msg);
+ pkt.size = CNSS_MBOX_MSG_MAX_LEN;
+ pkt.data = mbox_msg;
+
+ ret = mbox_send_message(plat_priv->mbox_chan, &pkt);
+ if (ret < 0)
+ cnss_pr_err("Failed to send AOP mbox msg: %s\n", mbox_msg);
+ else
+ ret = 0;
+
+ return ret;
+}
+#else
+static int cnss_aop_set_vreg_param(struct cnss_plat_data *plat_priv,
+ const char *vreg_name,
+ enum cnss_vreg_param param,
+ enum cnss_tcs_seq seq, int val)
+{
+ return 0;
+}
+#endif
+
+int cnss_update_cpr_info(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_cpr_info *cpr_info = &plat_priv->cpr_info;
+ u32 pmic_addr, voltage = 0, voltage_tmp, offset;
+ void __iomem *tcs_cmd_addr, *tcs_cmd_data_addr;
+ int i, j;
+
+ if (cpr_info->voltage == 0) {
+ cnss_pr_err("OL CPR Voltage %dm is not valid\n",
+ cpr_info->voltage);
+ return -EINVAL;
+ }
+
+ if (!plat_priv->vreg_ol_cpr || !plat_priv->mbox_chan) {
+ cnss_pr_dbg("Mbox channel / OL CPR Vreg not configured\n");
+ } else {
+ return cnss_aop_set_vreg_param(plat_priv,
+ plat_priv->vreg_ol_cpr,
+ CNSS_VREG_VOLTAGE,
+ CNSS_TCS_UP_SEQ,
+ cpr_info->voltage);
+ }
+
+ if (plat_priv->tcs_info.cmd_base_addr == 0) {
+ cnss_pr_dbg("TCS CMD not configured for OL CPR update\n");
+ return 0;
+ }
+
+ if (cpr_info->cpr_pmic_addr == 0) {
+ cnss_pr_err("PMIC address 0x%x is not valid\n",
+ cpr_info->cpr_pmic_addr);
+ return -EINVAL;
+ }
+
+ if (cpr_info->tcs_cmd_data_addr_io)
+ goto update_cpr;
+
+ for (i = 0; i < MAX_TCS_NUM; i++) {
+ for (j = 0; j < MAX_TCS_CMD_NUM; j++) {
+ offset = i * TCS_OFFSET + j * TCS_CMD_OFFSET;
+ tcs_cmd_addr = plat_priv->tcs_info.cmd_base_addr_io +
+ offset;
+ pmic_addr = readl_relaxed(tcs_cmd_addr);
+ if (pmic_addr == cpr_info->cpr_pmic_addr) {
+ tcs_cmd_data_addr = tcs_cmd_addr +
+ TCS_CMD_DATA_ADDR_OFFSET;
+ voltage_tmp = readl_relaxed(tcs_cmd_data_addr);
+ cnss_pr_dbg("Got voltage %dmV from i: %d, j: %d\n",
+ voltage_tmp, i, j);
+
+ if (voltage_tmp > voltage) {
+ voltage = voltage_tmp;
+ cpr_info->tcs_cmd_data_addr =
+ plat_priv->tcs_info.cmd_base_addr +
+ offset + TCS_CMD_DATA_ADDR_OFFSET;
+ cpr_info->tcs_cmd_data_addr_io =
+ tcs_cmd_data_addr;
+ }
+ }
+ }
+ }
+
+ if (!cpr_info->tcs_cmd_data_addr_io) {
+ cnss_pr_err("Failed to find proper TCS CMD data address\n");
+ return -EINVAL;
+ }
+
+update_cpr:
+ cpr_info->voltage = cpr_info->voltage > BT_CXMX_VOLTAGE_MV ?
+ cpr_info->voltage : BT_CXMX_VOLTAGE_MV;
+ cnss_pr_dbg("Update TCS CMD data address %pa with voltage %dmV\n",
+ &cpr_info->tcs_cmd_data_addr, cpr_info->voltage);
+ writel_relaxed(cpr_info->voltage, cpr_info->tcs_cmd_data_addr_io);
+
+ return 0;
+}
+
+int cnss_enable_int_pow_amp_vreg(struct cnss_plat_data *plat_priv)
+{
+ struct platform_device *plat_dev = plat_priv->plat_dev;
+ u32 offset, addr_val, data_val;
+ void __iomem *tcs_cmd;
+ int ret;
+ static bool config_done;
+
+ if (plat_priv->device_id != QCA6490_DEVICE_ID)
+ return -EINVAL;
+
+ if (config_done) {
+ cnss_pr_dbg("IPA Vreg already configured\n");
+ return 0;
+ }
+
+ if (!plat_priv->vreg_ipa || !plat_priv->mbox_chan) {
+ cnss_pr_dbg("Mbox channel / IPA Vreg not configured\n");
+ } else {
+ ret = cnss_aop_set_vreg_param(plat_priv,
+ plat_priv->vreg_ipa,
+ CNSS_VREG_ENABLE,
+ CNSS_TCS_UP_SEQ, 1);
+ if (ret == 0)
+ config_done = true;
+ return ret;
+ }
+
+ if (!plat_priv->tcs_info.cmd_base_addr_io) {
+ cnss_pr_err("TCS CMD not configured for IPA Vreg enable\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(plat_dev->dev.of_node,
+ "qcom,tcs_offset_int_pow_amp_vreg",
+ &offset);
+ if (ret) {
+ cnss_pr_dbg("Internal Power Amp Vreg not configured\n");
+ return -EINVAL;
+ }
+ tcs_cmd = plat_priv->tcs_info.cmd_base_addr_io + offset;
+ addr_val = readl_relaxed(tcs_cmd);
+ tcs_cmd += TCS_CMD_DATA_ADDR_OFFSET;
+
+ /* 1 = enable Vreg */
+ writel_relaxed(1, tcs_cmd);
+
+ data_val = readl_relaxed(tcs_cmd);
+ cnss_pr_dbg("Setup S3E TCS Addr: %x Data: %d\n", addr_val, data_val);
+ config_done = true;
+
+ return 0;
+}
diff --git a/cnss2/qmi.c b/cnss2/qmi.c
new file mode 100644
index 0000000..6d43ef2
--- /dev/null
+++ b/cnss2/qmi.c
@@ -0,0 +1,3448 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */
+
+#include <linux/module.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "bus.h"
+#include "debug.h"
+#include "main.h"
+#include "qmi.h"
+#include "genl.h"
+
+#define WLFW_SERVICE_INS_ID_V01 1
+#define WLFW_CLIENT_ID 0x4b4e454c
+#define BDF_FILE_NAME_PREFIX "bdwlan"
+#define ELF_BDF_FILE_NAME "bdwlan.elf"
+#define ELF_BDF_FILE_NAME_GF "bdwlang.elf"
+#define ELF_BDF_FILE_NAME_PREFIX "bdwlan.e"
+#define ELF_BDF_FILE_NAME_GF_PREFIX "bdwlang.e"
+#define BIN_BDF_FILE_NAME "bdwlan.bin"
+#define BIN_BDF_FILE_NAME_GF "bdwlang.bin"
+#define BIN_BDF_FILE_NAME_PREFIX "bdwlan.b"
+#define BIN_BDF_FILE_NAME_GF_PREFIX "bdwlang.b"
+#define REGDB_FILE_NAME "regdb.bin"
+#define HDS_FILE_NAME "hds.bin"
+#define CHIP_ID_GF_MASK 0x10
+
+#define QDSS_TRACE_CONFIG_FILE "qdss_trace_config"
+#ifdef CONFIG_CNSS2_DEBUG
+#define QDSS_DEBUG_FILE_STR "debug_"
+#else
+#define QDSS_DEBUG_FILE_STR ""
+#endif
+#define HW_V1_NUMBER "v1"
+#define HW_V2_NUMBER "v2"
+
+#define QMI_WLFW_TIMEOUT_MS (plat_priv->ctrl_params.qmi_timeout)
+#define QMI_WLFW_TIMEOUT_JF msecs_to_jiffies(QMI_WLFW_TIMEOUT_MS)
+#define COEX_TIMEOUT QMI_WLFW_TIMEOUT_JF
+#define IMS_TIMEOUT QMI_WLFW_TIMEOUT_JF
+
+#define QMI_WLFW_MAX_RECV_BUF_SIZE SZ_8K
+#define IMSPRIVATE_SERVICE_MAX_MSG_LEN SZ_8K
+#define DMS_QMI_MAX_MSG_LEN SZ_256
+
+#define QMI_WLFW_MAC_READY_TIMEOUT_MS 50
+#define QMI_WLFW_MAC_READY_MAX_RETRY 200
+
+#ifdef CONFIG_CNSS2_DEBUG
+static bool ignore_qmi_failure;
+#define CNSS_QMI_ASSERT() CNSS_ASSERT(ignore_qmi_failure)
+void cnss_ignore_qmi_failure(bool ignore)
+{
+ ignore_qmi_failure = ignore;
+}
+#else
+#define CNSS_QMI_ASSERT() do { } while (0)
+void cnss_ignore_qmi_failure(bool ignore) { }
+#endif
+
+static char *cnss_qmi_mode_to_str(enum cnss_driver_mode mode)
+{
+ switch (mode) {
+ case CNSS_MISSION:
+ return "MISSION";
+ case CNSS_FTM:
+ return "FTM";
+ case CNSS_EPPING:
+ return "EPPING";
+ case CNSS_WALTEST:
+ return "WALTEST";
+ case CNSS_OFF:
+ return "OFF";
+ case CNSS_CCPM:
+ return "CCPM";
+ case CNSS_QVIT:
+ return "QVIT";
+ case CNSS_CALIBRATION:
+ return "CALIBRATION";
+ default:
+ return "UNKNOWN";
+ }
+};
+
+static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_ind_register_req_msg_v01 *req;
+ struct wlfw_ind_register_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending indication register message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->client_id_valid = 1;
+ req->client_id = WLFW_CLIENT_ID;
+ req->request_mem_enable_valid = 1;
+ req->request_mem_enable = 1;
+ req->fw_mem_ready_enable_valid = 1;
+ req->fw_mem_ready_enable = 1;
+ /* fw_ready indication is replaced by fw_init_done in HST/HSP */
+ req->fw_init_done_enable_valid = 1;
+ req->fw_init_done_enable = 1;
+ req->pin_connect_result_enable_valid = 1;
+ req->pin_connect_result_enable = 1;
+ req->cal_done_enable_valid = 1;
+ req->cal_done_enable = 1;
+ req->qdss_trace_req_mem_enable_valid = 1;
+ req->qdss_trace_req_mem_enable = 1;
+ req->qdss_trace_save_enable_valid = 1;
+ req->qdss_trace_save_enable = 1;
+ req->qdss_trace_free_enable_valid = 1;
+ req->qdss_trace_free_enable = 1;
+ req->respond_get_info_enable_valid = 1;
+ req->respond_get_info_enable = 1;
+ req->wfc_call_twt_config_enable_valid = 1;
+ req->wfc_call_twt_config_enable = 1;
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_ind_register_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for indication register request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_IND_REGISTER_REQ_V01,
+ WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_ind_register_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send indication register request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of indication register request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Indication register request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ if (resp->fw_status_valid) {
+ if (resp->fw_status & QMI_WLFW_ALREADY_REGISTERED_V01) {
+ ret = -EALREADY;
+ goto qmi_registered;
+ }
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ CNSS_QMI_ASSERT();
+
+qmi_registered:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+static void cnss_wlfw_host_cap_parse_mlo(struct cnss_plat_data *plat_priv,
+ struct wlfw_host_cap_req_msg_v01 *req)
+{
+ if (plat_priv->device_id == WCN7850_DEVICE_ID) {
+ req->mlo_capable_valid = 1;
+ req->mlo_capable = 1;
+ req->mlo_chip_id_valid = 1;
+ req->mlo_chip_id = 0;
+ req->mlo_group_id_valid = 1;
+ req->mlo_group_id = 0;
+ req->max_mlo_peer_valid = 1;
+ /* Max peer number generally won't change for the same device
+ * but needs to be synced with host driver.
+ */
+ req->max_mlo_peer = 32;
+ req->mlo_num_chips_valid = 1;
+ req->mlo_num_chips = 1;
+ req->mlo_chip_info_valid = 1;
+ req->mlo_chip_info[0].chip_id = 0;
+ req->mlo_chip_info[0].num_local_links = 2;
+ req->mlo_chip_info[0].hw_link_id[0] = 0;
+ req->mlo_chip_info[0].hw_link_id[1] = 1;
+ req->mlo_chip_info[0].valid_mlo_link_id[0] = 1;
+ req->mlo_chip_info[0].valid_mlo_link_id[1] = 1;
+ }
+}
+
+static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_host_cap_req_msg_v01 *req;
+ struct wlfw_host_cap_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+ u64 iova_start = 0, iova_size = 0,
+ iova_ipa_start = 0, iova_ipa_size = 0;
+ u64 feature_list = 0;
+
+ cnss_pr_dbg("Sending host capability message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->num_clients_valid = 1;
+ req->num_clients = 1;
+ cnss_pr_dbg("Number of clients is %d\n", req->num_clients);
+
+ req->wake_msi = cnss_bus_get_wake_irq(plat_priv);
+ if (req->wake_msi) {
+ cnss_pr_dbg("WAKE MSI base data is %d\n", req->wake_msi);
+ req->wake_msi_valid = 1;
+ }
+
+ req->bdf_support_valid = 1;
+ req->bdf_support = 1;
+
+ req->m3_support_valid = 1;
+ req->m3_support = 1;
+
+ req->m3_cache_support_valid = 1;
+ req->m3_cache_support = 1;
+
+ req->cal_done_valid = 1;
+ req->cal_done = plat_priv->cal_done;
+ cnss_pr_dbg("Calibration done is %d\n", plat_priv->cal_done);
+
+ if (!cnss_bus_get_iova(plat_priv, &iova_start, &iova_size) &&
+ !cnss_bus_get_iova_ipa(plat_priv, &iova_ipa_start,
+ &iova_ipa_size)) {
+ req->ddr_range_valid = 1;
+ req->ddr_range[0].start = iova_start;
+ req->ddr_range[0].size = iova_size + iova_ipa_size;
+ cnss_pr_dbg("Sending iova starting 0x%llx with size 0x%llx\n",
+ req->ddr_range[0].start, req->ddr_range[0].size);
+ }
+
+ req->host_build_type_valid = 1;
+ req->host_build_type = cnss_get_host_build_type();
+
+ cnss_wlfw_host_cap_parse_mlo(plat_priv, req);
+
+ ret = cnss_get_feature_list(plat_priv, &feature_list);
+ if (!ret) {
+ req->feature_list_valid = 1;
+ req->feature_list = feature_list;
+ cnss_pr_dbg("Sending feature list 0x%llx\n",
+ req->feature_list);
+ }
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_host_cap_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for host capability request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_HOST_CAP_REQ_V01,
+ WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_host_cap_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send host capability request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of host capability request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Host capability request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ CNSS_QMI_ASSERT();
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_respond_mem_req_msg_v01 *req;
+ struct wlfw_respond_mem_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+ int ret = 0, i;
+
+ cnss_pr_dbg("Sending respond memory message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->mem_seg_len = plat_priv->fw_mem_seg_len;
+ for (i = 0; i < req->mem_seg_len; i++) {
+ if (!fw_mem[i].pa || !fw_mem[i].size) {
+ if (fw_mem[i].type == 0) {
+ cnss_pr_err("Invalid memory for FW type, segment = %d\n",
+ i);
+ ret = -EINVAL;
+ goto out;
+ }
+ cnss_pr_err("Memory for FW is not available for type: %u\n",
+ fw_mem[i].type);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
+ fw_mem[i].va, &fw_mem[i].pa,
+ fw_mem[i].size, fw_mem[i].type);
+
+ req->mem_seg[i].addr = fw_mem[i].pa;
+ req->mem_seg[i].size = fw_mem[i].size;
+ req->mem_seg[i].type = fw_mem[i].type;
+ }
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_respond_mem_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for respond memory request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_RESPOND_MEM_REQ_V01,
+ WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_respond_mem_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send respond memory request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of respond memory request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Respond memory request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ CNSS_QMI_ASSERT();
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_cap_req_msg_v01 *req;
+ struct wlfw_cap_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ char *fw_build_timestamp;
+ int ret = 0, i;
+
+ cnss_pr_dbg("Sending target capability message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_cap_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for target capability request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_CAP_REQ_V01,
+ WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_cap_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send respond target capability request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of target capability request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Target capability request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ if (resp->chip_info_valid) {
+ plat_priv->chip_info.chip_id = resp->chip_info.chip_id;
+ plat_priv->chip_info.chip_family = resp->chip_info.chip_family;
+ }
+ if (resp->board_info_valid)
+ plat_priv->board_info.board_id = resp->board_info.board_id;
+ else
+ plat_priv->board_info.board_id = 0xFF;
+ if (resp->soc_info_valid)
+ plat_priv->soc_info.soc_id = resp->soc_info.soc_id;
+ if (resp->fw_version_info_valid) {
+ plat_priv->fw_version_info.fw_version =
+ resp->fw_version_info.fw_version;
+ fw_build_timestamp = resp->fw_version_info.fw_build_timestamp;
+ fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN] = '\0';
+ strlcpy(plat_priv->fw_version_info.fw_build_timestamp,
+ resp->fw_version_info.fw_build_timestamp,
+ QMI_WLFW_MAX_TIMESTAMP_LEN + 1);
+ }
+ if (resp->fw_build_id_valid) {
+ resp->fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN] = '\0';
+ strlcpy(plat_priv->fw_build_id, resp->fw_build_id,
+ QMI_WLFW_MAX_BUILD_ID_LEN + 1);
+ }
+ if (resp->voltage_mv_valid) {
+ plat_priv->cpr_info.voltage = resp->voltage_mv;
+ cnss_pr_dbg("Voltage for CPR: %dmV\n",
+ plat_priv->cpr_info.voltage);
+ cnss_update_cpr_info(plat_priv);
+ }
+ if (resp->time_freq_hz_valid) {
+ plat_priv->device_freq_hz = resp->time_freq_hz;
+ cnss_pr_dbg("Device frequency is %d HZ\n",
+ plat_priv->device_freq_hz);
+ }
+ if (resp->otp_version_valid)
+ plat_priv->otp_version = resp->otp_version;
+ if (resp->dev_mem_info_valid) {
+ for (i = 0; i < QMI_WLFW_MAX_DEV_MEM_NUM_V01; i++) {
+ plat_priv->dev_mem_info[i].start =
+ resp->dev_mem_info[i].start;
+ plat_priv->dev_mem_info[i].size =
+ resp->dev_mem_info[i].size;
+ cnss_pr_buf("Device memory info[%d]: start = 0x%llx, size = 0x%llx\n",
+ i, plat_priv->dev_mem_info[i].start,
+ plat_priv->dev_mem_info[i].size);
+ }
+ }
+ if (resp->fw_caps_valid)
+ plat_priv->fw_pcie_gen_switch =
+ !!(resp->fw_caps & QMI_WLFW_HOST_PCIE_GEN_SWITCH_V01);
+
+ cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, otp_version: 0x%x\n",
+ plat_priv->chip_info.chip_id,
+ plat_priv->chip_info.chip_family,
+ plat_priv->board_info.board_id, plat_priv->soc_info.soc_id,
+ plat_priv->otp_version);
+ cnss_pr_dbg("fw_version: 0x%x, fw_build_timestamp: %s, fw_build_id: %s\n",
+ plat_priv->fw_version_info.fw_version,
+ plat_priv->fw_version_info.fw_build_timestamp,
+ plat_priv->fw_build_id);
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ CNSS_QMI_ASSERT();
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+static int cnss_get_bdf_file_name(struct cnss_plat_data *plat_priv,
+ u32 bdf_type, char *filename,
+ u32 filename_len)
+{
+ char filename_tmp[MAX_FIRMWARE_NAME_LEN];
+ int ret = 0;
+
+ switch (bdf_type) {
+ case CNSS_BDF_ELF:
+ /* Board ID will be equal or less than 0xFF in GF mask case */
+ if (plat_priv->board_info.board_id == 0xFF) {
+ if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
+ snprintf(filename_tmp, filename_len,
+ ELF_BDF_FILE_NAME_GF);
+ else
+ snprintf(filename_tmp, filename_len,
+ ELF_BDF_FILE_NAME);
+ } else if (plat_priv->board_info.board_id < 0xFF) {
+ if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
+ snprintf(filename_tmp, filename_len,
+ ELF_BDF_FILE_NAME_GF_PREFIX "%02x",
+ plat_priv->board_info.board_id);
+ else
+ snprintf(filename_tmp, filename_len,
+ ELF_BDF_FILE_NAME_PREFIX "%02x",
+ plat_priv->board_info.board_id);
+ } else {
+ snprintf(filename_tmp, filename_len,
+ BDF_FILE_NAME_PREFIX "%02x.e%02x",
+ plat_priv->board_info.board_id >> 8 & 0xFF,
+ plat_priv->board_info.board_id & 0xFF);
+ }
+ break;
+ case CNSS_BDF_BIN:
+ if (plat_priv->board_info.board_id == 0xFF) {
+ if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
+ snprintf(filename_tmp, filename_len,
+ BIN_BDF_FILE_NAME_GF);
+ else
+ snprintf(filename_tmp, filename_len,
+ BIN_BDF_FILE_NAME);
+ } else if (plat_priv->board_info.board_id < 0xFF) {
+ if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
+ snprintf(filename_tmp, filename_len,
+ BIN_BDF_FILE_NAME_GF_PREFIX "%02x",
+ plat_priv->board_info.board_id);
+ else
+ snprintf(filename_tmp, filename_len,
+ BIN_BDF_FILE_NAME_PREFIX "%02x",
+ plat_priv->board_info.board_id);
+ } else {
+ snprintf(filename_tmp, filename_len,
+ BDF_FILE_NAME_PREFIX "%02x.b%02x",
+ plat_priv->board_info.board_id >> 8 & 0xFF,
+ plat_priv->board_info.board_id & 0xFF);
+ }
+ break;
+ case CNSS_BDF_REGDB:
+ snprintf(filename_tmp, filename_len, REGDB_FILE_NAME);
+ break;
+ case CNSS_BDF_HDS:
+ snprintf(filename_tmp, filename_len, HDS_FILE_NAME);
+ break;
+ default:
+ cnss_pr_err("Invalid BDF type: %d\n",
+ plat_priv->ctrl_params.bdf_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret)
+ cnss_bus_add_fw_prefix_name(plat_priv, filename, filename_tmp);
+
+ return ret;
+}
+
+int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
+ u32 bdf_type)
+{
+ struct wlfw_bdf_download_req_msg_v01 *req;
+ struct wlfw_bdf_download_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ char filename[MAX_FIRMWARE_NAME_LEN];
+ const struct firmware *fw_entry = NULL;
+ const u8 *temp;
+ unsigned int remaining;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending BDF download message, state: 0x%lx, type: %d\n",
+ plat_priv->driver_state, bdf_type);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ ret = cnss_get_bdf_file_name(plat_priv, bdf_type,
+ filename, sizeof(filename));
+ if (ret)
+ goto err_req_fw;
+
+ if (bdf_type == CNSS_BDF_REGDB)
+ ret = cnss_request_firmware_direct(plat_priv, &fw_entry,
+ filename);
+ else
+ ret = firmware_request_nowarn(&fw_entry, filename,
+ &plat_priv->plat_dev->dev);
+
+ if (ret) {
+ cnss_pr_err("Failed to load BDF: %s, ret: %d\n", filename, ret);
+ goto err_req_fw;
+ }
+
+ temp = fw_entry->data;
+ remaining = fw_entry->size;
+
+ cnss_pr_dbg("Downloading BDF: %s, size: %u\n", filename, remaining);
+
+ while (remaining) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = plat_priv->board_info.board_id;
+ req->total_size_valid = 1;
+ req->total_size = remaining;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->end_valid = 1;
+ req->bdf_type_valid = 1;
+ req->bdf_type = bdf_type;
+
+ if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ memcpy(req->data, temp, req->data_len);
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_bdf_download_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for BDF download request, err: %d\n",
+ ret);
+ goto err_send;
+ }
+
+ ret = qmi_send_request
+ (&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
+ WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_bdf_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send respond BDF download request, err: %d\n",
+ ret);
+ goto err_send;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of BDF download request, err: %d\n",
+ ret);
+ goto err_send;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("BDF download request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto err_send;
+ }
+
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ }
+
+ release_firmware(fw_entry);
+
+ if (resp->host_bdf_data_valid) {
+ /* QCA6490 enable S3E regulator for IPA configuration only */
+ if (!(resp->host_bdf_data & QMI_WLFW_HW_XPA_V01))
+ cnss_enable_int_pow_amp_vreg(plat_priv);
+
+ plat_priv->cbc_file_download =
+ resp->host_bdf_data & QMI_WLFW_CBC_FILE_DOWNLOAD_V01;
+ cnss_pr_info("Host BDF config: HW_XPA: %d CalDB: %d\n",
+ resp->host_bdf_data & QMI_WLFW_HW_XPA_V01,
+ plat_priv->cbc_file_download);
+ }
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+err_send:
+ release_firmware(fw_entry);
+err_req_fw:
+ if (!(bdf_type == CNSS_BDF_REGDB ||
+ test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state) ||
+ ret == -EAGAIN))
+ CNSS_QMI_ASSERT();
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_m3_info_req_msg_v01 *req;
+ struct wlfw_m3_info_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending M3 information message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ if (!m3_mem->pa || !m3_mem->size) {
+ cnss_pr_err("Memory for M3 is not available\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cnss_pr_dbg("M3 memory, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+ m3_mem->va, &m3_mem->pa, m3_mem->size);
+
+ req->addr = plat_priv->m3_mem.pa;
+ req->size = plat_priv->m3_mem.size;
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_m3_info_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for M3 information request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_M3_INFO_REQ_V01,
+ WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_m3_info_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send M3 information request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of M3 information request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("M3 information request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ CNSS_QMI_ASSERT();
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_wlan_mac_req_send_sync(struct cnss_plat_data *plat_priv,
+ u8 *mac, u32 mac_len)
+{
+ struct wlfw_mac_addr_req_msg_v01 req;
+ struct wlfw_mac_addr_resp_msg_v01 resp = {0};
+ struct qmi_txn txn;
+ int ret;
+
+ if (!plat_priv || !mac || mac_len != QMI_WLFW_MAC_ADDR_SIZE_V01)
+ return -EINVAL;
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_mac_addr_resp_msg_v01_ei, &resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for mac req, err: %d\n",
+ ret);
+ ret = -EIO;
+ goto out;
+ }
+
+ cnss_pr_dbg("Sending WLAN mac req [%pM], state: 0x%lx\n",
+ mac, plat_priv->driver_state);
+ memcpy(req.mac_addr, mac, mac_len);
+ req.mac_addr_valid = 1;
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_MAC_ADDR_REQ_V01,
+ WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_mac_addr_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send mac req, err: %d\n", ret);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for resp of mac req, err: %d\n",
+ ret);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("WLAN mac req failed, result: %d, err: %d\n",
+ resp.resp.result);
+ ret = -resp.resp.result;
+ }
+out:
+ return ret;
+}
+
+int cnss_wlfw_qdss_data_send_sync(struct cnss_plat_data *plat_priv, char *file_name,
+ u32 total_size)
+{
+ int ret = 0;
+ struct wlfw_qdss_trace_data_req_msg_v01 *req;
+ struct wlfw_qdss_trace_data_resp_msg_v01 *resp;
+ unsigned char *p_qdss_trace_data_temp, *p_qdss_trace_data = NULL;
+ unsigned int remaining;
+ struct qmi_txn txn;
+
+ cnss_pr_dbg("%s\n", __func__);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ p_qdss_trace_data = kzalloc(total_size, GFP_KERNEL);
+ if (!p_qdss_trace_data) {
+ ret = ENOMEM;
+ goto end;
+ }
+
+ remaining = total_size;
+ p_qdss_trace_data_temp = p_qdss_trace_data;
+ while (remaining && resp->end == 0) {
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_qdss_trace_data_resp_msg_v01_ei, resp);
+
+ if (ret < 0) {
+ cnss_pr_err("Fail to init txn for QDSS trace resp %d\n",
+ ret);
+ goto fail;
+ }
+
+ ret = qmi_send_request
+ (&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_QDSS_TRACE_DATA_REQ_V01,
+ WLFW_QDSS_TRACE_DATA_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_qdss_trace_data_req_msg_v01_ei, req);
+
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Fail to send QDSS trace data req %d\n",
+ ret);
+ goto fail;
+ }
+
+ ret = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout);
+
+ if (ret < 0) {
+ cnss_pr_err("QDSS trace resp wait failed with rc %d\n",
+ ret);
+ goto fail;
+ } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("QMI QDSS trace request rejected, result:%d error:%d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto fail;
+ } else {
+ ret = 0;
+ }
+
+ cnss_pr_dbg("%s: response total size %d data len %d",
+ __func__, resp->total_size, resp->data_len);
+
+ if ((resp->total_size_valid == 1 &&
+ resp->total_size == total_size) &&
+ (resp->seg_id_valid == 1 && resp->seg_id == req->seg_id) &&
+ (resp->data_valid == 1 &&
+ resp->data_len <= QMI_WLFW_MAX_DATA_SIZE_V01)) {
+ memcpy(p_qdss_trace_data_temp,
+ resp->data, resp->data_len);
+ } else {
+ cnss_pr_err("%s: Unmatched qdss trace data, Expect total_size %u, seg_id %u, Recv total_size_valid %u, total_size %u, seg_id_valid %u, seg_id %u, data_len_valid %u, data_len %u",
+ __func__,
+ total_size, req->seg_id,
+ resp->total_size_valid,
+ resp->total_size,
+ resp->seg_id_valid,
+ resp->seg_id,
+ resp->data_valid,
+ resp->data_len);
+ ret = -1;
+ goto fail;
+ }
+
+ remaining -= resp->data_len;
+ p_qdss_trace_data_temp += resp->data_len;
+ req->seg_id++;
+ }
+
+ if (remaining == 0 && (resp->end_valid && resp->end)) {
+ ret = cnss_genl_send_msg(p_qdss_trace_data,
+ CNSS_GENL_MSG_TYPE_QDSS, file_name,
+ total_size);
+ if (ret < 0) {
+ cnss_pr_err("Fail to save QDSS trace data: %d\n",
+ ret);
+ ret = -1;
+ goto fail;
+ }
+ } else {
+ cnss_pr_err("%s: QDSS trace file corrupted: remaining %u, end_valid %u, end %u",
+ __func__,
+ remaining, resp->end_valid, resp->end);
+ ret = -1;
+ goto fail;
+ }
+
+fail:
+ kfree(p_qdss_trace_data);
+
+end:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+void cnss_get_qdss_cfg_filename(struct cnss_plat_data *plat_priv,
+ char *filename, u32 filename_len)
+{
+ char filename_tmp[MAX_FIRMWARE_NAME_LEN];
+ char *debug_str = QDSS_DEBUG_FILE_STR;
+
+ if (plat_priv->device_id == WCN7850_DEVICE_ID)
+ debug_str = "";
+
+ if (plat_priv->device_version.major_version == FW_V2_NUMBER)
+ snprintf(filename_tmp, filename_len, QDSS_TRACE_CONFIG_FILE
+ "_%s%s.cfg", debug_str, HW_V2_NUMBER);
+ else
+ snprintf(filename_tmp, filename_len, QDSS_TRACE_CONFIG_FILE
+ "_%s%s.cfg", debug_str, HW_V1_NUMBER);
+
+ cnss_bus_add_fw_prefix_name(plat_priv, filename, filename_tmp);
+}
+
+int cnss_wlfw_qdss_dnld_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_qdss_trace_config_download_req_msg_v01 *req;
+ struct wlfw_qdss_trace_config_download_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ const struct firmware *fw_entry = NULL;
+ const u8 *temp;
+ char qdss_cfg_filename[MAX_FIRMWARE_NAME_LEN];
+ unsigned int remaining;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending QDSS config download message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ cnss_get_qdss_cfg_filename(plat_priv, qdss_cfg_filename, sizeof(qdss_cfg_filename));
+ ret = cnss_request_firmware_direct(plat_priv, &fw_entry,
+ qdss_cfg_filename);
+ if (ret) {
+ cnss_pr_err("Failed to load QDSS: %s\n",
+ qdss_cfg_filename);
+ goto err_req_fw;
+ }
+
+ temp = fw_entry->data;
+ remaining = fw_entry->size;
+
+ cnss_pr_dbg("Downloading QDSS: %s, size: %u\n",
+ qdss_cfg_filename, remaining);
+
+ while (remaining) {
+ req->total_size_valid = 1;
+ req->total_size = remaining;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->end_valid = 1;
+
+ if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ memcpy(req->data, temp, req->data_len);
+
+ ret = qmi_txn_init
+ (&plat_priv->qmi_wlfw, &txn,
+ wlfw_qdss_trace_config_download_resp_msg_v01_ei,
+ resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for QDSS download request, err: %d\n",
+ ret);
+ goto err_send;
+ }
+
+ ret = qmi_send_request
+ (&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_V01,
+ WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_qdss_trace_config_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send respond QDSS download request, err: %d\n",
+ ret);
+ goto err_send;
+ }
+
+ ret = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of QDSS download request, err: %d\n",
+ ret);
+ goto err_send;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("QDSS download request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto err_send;
+ }
+
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ }
+
+ release_firmware(fw_entry);
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+err_send:
+ release_firmware(fw_entry);
+err_req_fw:
+
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+static int wlfw_send_qdss_trace_mode_req
+ (struct cnss_plat_data *plat_priv,
+ enum wlfw_qdss_trace_mode_enum_v01 mode,
+ unsigned long long option)
+{
+ int rc = 0;
+ int tmp = 0;
+ struct wlfw_qdss_trace_mode_req_msg_v01 *req;
+ struct wlfw_qdss_trace_mode_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->mode_valid = 1;
+ req->mode = mode;
+ req->option_valid = 1;
+ req->option = option;
+
+ tmp = plat_priv->hw_trc_override;
+
+ req->hw_trc_disable_override_valid = 1;
+ req->hw_trc_disable_override =
+ (tmp > QMI_PARAM_DISABLE_V01 ? QMI_PARAM_DISABLE_V01 :
+ (tmp < 0 ? QMI_PARAM_INVALID_V01 : tmp));
+
+ cnss_pr_dbg("%s: mode %u, option %llu, hw_trc_disable_override: %u",
+ __func__, mode, option, req->hw_trc_disable_override);
+
+ rc = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_qdss_trace_mode_resp_msg_v01_ei, resp);
+ if (rc < 0) {
+ cnss_pr_err("Fail to init txn for QDSS Mode resp %d\n",
+ rc);
+ goto out;
+ }
+
+ rc = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_QDSS_TRACE_MODE_REQ_V01,
+ WLFW_QDSS_TRACE_MODE_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_qdss_trace_mode_req_msg_v01_ei, req);
+ if (rc < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Fail to send QDSS Mode req %d\n", rc);
+ goto out;
+ }
+
+ rc = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout);
+ if (rc < 0) {
+ cnss_pr_err("QDSS Mode resp wait failed with rc %d\n",
+ rc);
+ goto out;
+ } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("QMI QDSS Mode request rejected, result:%d error:%d\n",
+ resp->resp.result, resp->resp.error);
+ rc = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(resp);
+ kfree(req);
+ return rc;
+out:
+ kfree(resp);
+ kfree(req);
+ CNSS_QMI_ASSERT();
+ return rc;
+}
+
+int wlfw_qdss_trace_start(struct cnss_plat_data *plat_priv)
+{
+ return wlfw_send_qdss_trace_mode_req(plat_priv,
+ QMI_WLFW_QDSS_TRACE_ON_V01, 0);
+}
+
+int wlfw_qdss_trace_stop(struct cnss_plat_data *plat_priv, unsigned long long option)
+{
+ return wlfw_send_qdss_trace_mode_req(plat_priv, QMI_WLFW_QDSS_TRACE_OFF_V01,
+ option);
+}
+
+int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
+ enum cnss_driver_mode mode)
+{
+ struct wlfw_wlan_mode_req_msg_v01 *req;
+ struct wlfw_wlan_mode_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Sending mode message, mode: %s(%d), state: 0x%lx\n",
+ cnss_qmi_mode_to_str(mode), mode, plat_priv->driver_state);
+
+ if (mode == CNSS_OFF &&
+ test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Recovery is in progress, ignore mode off request\n");
+ return 0;
+ }
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->mode = (enum wlfw_driver_mode_enum_v01)mode;
+ req->hw_debug_valid = 1;
+ req->hw_debug = 0;
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_wlan_mode_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for mode request, mode: %s(%d), err: %d\n",
+ cnss_qmi_mode_to_str(mode), mode, ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_WLAN_MODE_REQ_V01,
+ WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_wlan_mode_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send mode request, mode: %s(%d), err: %d\n",
+ cnss_qmi_mode_to_str(mode), mode, ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of mode request, mode: %s(%d), err: %d\n",
+ cnss_qmi_mode_to_str(mode), mode, ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Mode request failed, mode: %s(%d), result: %d, err: %d\n",
+ cnss_qmi_mode_to_str(mode), mode, resp->resp.result,
+ resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ if (mode == CNSS_OFF) {
+ cnss_pr_dbg("WLFW service is disconnected while sending mode off request\n");
+ ret = 0;
+ } else {
+ CNSS_QMI_ASSERT();
+ }
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
+ struct cnss_wlan_enable_cfg *config,
+ const char *host_version)
+{
+ struct wlfw_wlan_cfg_req_msg_v01 *req;
+ struct wlfw_wlan_cfg_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ u32 i;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->host_version_valid = 1;
+ strlcpy(req->host_version, host_version,
+ QMI_WLFW_MAX_STR_LEN_V01 + 1);
+
+ req->tgt_cfg_valid = 1;
+ if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
+ req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
+ else
+ req->tgt_cfg_len = config->num_ce_tgt_cfg;
+ for (i = 0; i < req->tgt_cfg_len; i++) {
+ req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
+ req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
+ req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
+ req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
+ req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
+ }
+
+ req->svc_cfg_valid = 1;
+ if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
+ req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
+ else
+ req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
+ for (i = 0; i < req->svc_cfg_len; i++) {
+ req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
+ req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
+ req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
+ }
+
+ req->shadow_reg_v2_valid = 1;
+ if (config->num_shadow_reg_v2_cfg >
+ QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01)
+ req->shadow_reg_v2_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01;
+ else
+ req->shadow_reg_v2_len = config->num_shadow_reg_v2_cfg;
+
+ memcpy(req->shadow_reg_v2, config->shadow_reg_v2_cfg,
+ sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01)
+ * req->shadow_reg_v2_len);
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_wlan_cfg_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for WLAN config request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_WLAN_CFG_REQ_V01,
+ WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_wlan_cfg_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send WLAN config request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of WLAN config request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("WLAN config request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ CNSS_QMI_ASSERT();
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data)
+{
+ struct wlfw_athdiag_read_req_msg_v01 *req;
+ struct wlfw_athdiag_read_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (!data || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ cnss_pr_err("Invalid parameters for athdiag read: data %pK, data_len %u\n",
+ data, data_len);
+ return -EINVAL;
+ }
+
+ cnss_pr_dbg("athdiag read: state 0x%lx, offset %x, mem_type %x, data_len %u\n",
+ plat_priv->driver_state, offset, mem_type, data_len);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->offset = offset;
+ req->mem_type = mem_type;
+ req->data_len = data_len;
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_athdiag_read_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for athdiag read request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_ATHDIAG_READ_REQ_V01,
+ WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_athdiag_read_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send athdiag read request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of athdiag read request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Athdiag read request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ if (!resp->data_valid || resp->data_len != data_len) {
+ cnss_pr_err("athdiag read data is invalid, data_valid = %u, data_len = %u\n",
+ resp->data_valid, resp->data_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(data, resp->data, resp->data_len);
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data)
+{
+ struct wlfw_athdiag_write_req_msg_v01 *req;
+ struct wlfw_athdiag_write_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (!data || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ cnss_pr_err("Invalid parameters for athdiag write: data %pK, data_len %u\n",
+ data, data_len);
+ return -EINVAL;
+ }
+
+ cnss_pr_dbg("athdiag write: state 0x%lx, offset %x, mem_type %x, data_len %u, data %pK\n",
+ plat_priv->driver_state, offset, mem_type, data_len, data);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->offset = offset;
+ req->mem_type = mem_type;
+ req->data_len = data_len;
+ memcpy(req->data, data, data_len);
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_athdiag_write_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for athdiag write request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_ATHDIAG_WRITE_REQ_V01,
+ WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_athdiag_write_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send athdiag write request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of athdiag write request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Athdiag write request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
+ u8 fw_log_mode)
+{
+ struct wlfw_ini_req_msg_v01 *req;
+ struct wlfw_ini_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log_mode: %d\n",
+ plat_priv->driver_state, fw_log_mode);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->enablefwlog_valid = 1;
+ req->enablefwlog = fw_log_mode;
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_ini_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for ini request, fw_log_mode: %d, err: %d\n",
+ fw_log_mode, ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_INI_REQ_V01,
+ WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_ini_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send ini request, fw_log_mode: %d, err: %d\n",
+ fw_log_mode, ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of ini request, fw_log_mode: %d, err: %d\n",
+ fw_log_mode, ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Ini request failed, fw_log_mode: %d, result: %d, err: %d\n",
+ fw_log_mode, resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_pcie_gen_switch_req_msg_v01 req;
+ struct wlfw_pcie_gen_switch_resp_msg_v01 resp = {0};
+ struct qmi_txn txn;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (plat_priv->pcie_gen_speed == QMI_PCIE_GEN_SPEED_INVALID_V01 ||
+ !plat_priv->fw_pcie_gen_switch) {
+ cnss_pr_dbg("PCIE Gen speed not setup\n");
+ return 0;
+ }
+
+ cnss_pr_dbg("Sending PCIE Gen speed: %d state: 0x%lx\n",
+ plat_priv->pcie_gen_speed, plat_priv->driver_state);
+ req.pcie_speed = (enum wlfw_pcie_gen_speed_v01)
+ plat_priv->pcie_gen_speed;
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_pcie_gen_switch_resp_msg_v01_ei, &resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for PCIE speed switch err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_PCIE_GEN_SWITCH_REQ_V01,
+ WLFW_PCIE_GEN_SWITCH_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_pcie_gen_switch_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send PCIE speed switch, err: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for PCIE Gen switch resp, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("PCIE Gen Switch req failed, Speed: %d, result: %d, err: %d\n",
+ plat_priv->pcie_gen_speed, resp.resp.result,
+ resp.resp.error);
+ ret = -resp.resp.result;
+ }
+out:
+ /* Reset PCIE Gen speed after one time use */
+ plat_priv->pcie_gen_speed = QMI_PCIE_GEN_SPEED_INVALID_V01;
+ return ret;
+}
+
+int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_antenna_switch_req_msg_v01 *req;
+ struct wlfw_antenna_switch_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Sending antenna switch sync request, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_antenna_switch_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for antenna switch request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_ANTENNA_SWITCH_REQ_V01,
+ WLFW_ANTENNA_SWITCH_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_antenna_switch_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send antenna switch request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of antenna switch request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Antenna switch request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ if (resp->antenna_valid)
+ plat_priv->antenna = resp->antenna;
+
+ cnss_pr_dbg("Antenna valid: %u, antenna 0x%llx\n",
+ resp->antenna_valid, resp->antenna);
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_antenna_grant_req_msg_v01 *req;
+ struct wlfw_antenna_grant_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Sending antenna grant sync request, state: 0x%lx, grant 0x%llx\n",
+ plat_priv->driver_state, plat_priv->grant);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->grant_valid = 1;
+ req->grant = plat_priv->grant;
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_antenna_grant_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for antenna grant request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_ANTENNA_GRANT_REQ_V01,
+ WLFW_ANTENNA_GRANT_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_antenna_grant_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send antenna grant request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of antenna grant request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Antenna grant request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_qdss_trace_mem_info_req_msg_v01 *req;
+ struct wlfw_qdss_trace_mem_info_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+ int ret = 0;
+ int i;
+
+ cnss_pr_dbg("Sending QDSS trace mem info, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->mem_seg_len = plat_priv->qdss_mem_seg_len;
+ for (i = 0; i < req->mem_seg_len; i++) {
+ cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
+ qdss_mem[i].va, &qdss_mem[i].pa,
+ qdss_mem[i].size, qdss_mem[i].type);
+
+ req->mem_seg[i].addr = qdss_mem[i].pa;
+ req->mem_seg[i].size = qdss_mem[i].size;
+ req->mem_seg[i].type = qdss_mem[i].type;
+ }
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_qdss_trace_mem_info_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Fail to initialize txn for QDSS trace mem request: err %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01,
+ WLFW_QDSS_TRACE_MEM_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_qdss_trace_mem_info_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Fail to send QDSS trace mem info request: err %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Fail to wait for response of QDSS trace mem info request, err %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("QDSS trace mem info request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+static int cnss_wlfw_wfc_call_status_send_sync
+ (struct cnss_plat_data *plat_priv,
+ const struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg)
+{
+ struct wlfw_wfc_call_status_req_msg_v01 *req;
+ struct wlfw_wfc_call_status_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ cnss_pr_err("Drop IMS WFC indication as FW not initialized\n");
+ return -EINVAL;
+ }
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ /**
+ * WFC Call r1 design has CNSS as pass thru using opaque hex buffer.
+ * But in r2 update QMI structure is expanded and as an effect qmi
+ * decoded structures have padding. Thus we cannot use buffer design.
+ * For backward compatibility for r1 design copy only wfc_call_active
+ * value in hex buffer.
+ */
+ req->wfc_call_status_len = sizeof(ind_msg->wfc_call_active);
+ req->wfc_call_status[0] = ind_msg->wfc_call_active;
+
+ /* wfc_call_active is mandatory in IMS indication */
+ req->wfc_call_active_valid = 1;
+ req->wfc_call_active = ind_msg->wfc_call_active;
+ req->all_wfc_calls_held_valid = ind_msg->all_wfc_calls_held_valid;
+ req->all_wfc_calls_held = ind_msg->all_wfc_calls_held;
+ req->is_wfc_emergency_valid = ind_msg->is_wfc_emergency_valid;
+ req->is_wfc_emergency = ind_msg->is_wfc_emergency;
+ req->twt_ims_start_valid = ind_msg->twt_ims_start_valid;
+ req->twt_ims_start = ind_msg->twt_ims_start;
+ req->twt_ims_int_valid = ind_msg->twt_ims_int_valid;
+ req->twt_ims_int = ind_msg->twt_ims_int;
+ req->media_quality_valid = ind_msg->media_quality_valid;
+ req->media_quality =
+ (enum wlfw_wfc_media_quality_v01)ind_msg->media_quality;
+
+ cnss_pr_dbg("CNSS->FW: WFC_CALL_REQ: state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_wfc_call_status_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Txn Init: Err %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_WFC_CALL_STATUS_REQ_V01,
+ WLFW_WFC_CALL_STATUS_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_wfc_call_status_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Send Err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("FW->CNSS: WFC_CALL_RSP: QMI Wait Err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("FW->CNSS: WFC_CALL_RSP: Result: %d Err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+ ret = 0;
+out:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_dynamic_feature_mask_req_msg_v01 *req;
+ struct wlfw_dynamic_feature_mask_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending dynamic feature mask 0x%llx, state: 0x%lx\n",
+ plat_priv->dynamic_feature,
+ plat_priv->driver_state);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->mask_valid = 1;
+ req->mask = plat_priv->dynamic_feature;
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_dynamic_feature_mask_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Fail to initialize txn for dynamic feature mask request: err %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request
+ (&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01,
+ WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_dynamic_feature_mask_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Fail to send dynamic feature mask request: err %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Fail to wait for response of dynamic feature mask request, err %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Dynamic feature mask request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+out:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_get_info_send_sync(struct cnss_plat_data *plat_priv, int type,
+ void *cmd, int cmd_len)
+{
+ struct wlfw_get_info_req_msg_v01 *req;
+ struct wlfw_get_info_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ cnss_pr_buf("Sending get info message, type: %d, cmd length: %d, state: 0x%lx\n",
+ type, cmd_len, plat_priv->driver_state);
+
+ if (cmd_len > QMI_WLFW_MAX_DATA_SIZE_V01)
+ return -EINVAL;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->type = type;
+ req->data_len = cmd_len;
+ memcpy(req->data, cmd, req->data_len);
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_get_info_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for get info request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_GET_INFO_REQ_V01,
+ WLFW_GET_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_get_info_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send get info request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of get info request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Get info request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv)
+{
+ return QMI_WLFW_TIMEOUT_MS;
+}
+
+static void cnss_wlfw_request_mem_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+ const struct wlfw_request_mem_ind_msg_v01 *ind_msg = data;
+ int i;
+
+ cnss_pr_dbg("Received QMI WLFW request memory indication\n");
+
+ if (!txn) {
+ cnss_pr_err("Spurious indication\n");
+ return;
+ }
+
+ plat_priv->fw_mem_seg_len = ind_msg->mem_seg_len;
+ for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
+ cnss_pr_dbg("FW requests for memory, size: 0x%x, type: %u\n",
+ ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
+ plat_priv->fw_mem[i].type = ind_msg->mem_seg[i].type;
+ plat_priv->fw_mem[i].size = ind_msg->mem_seg[i].size;
+ if (plat_priv->fw_mem[i].type == CNSS_MEM_TYPE_DDR)
+ plat_priv->fw_mem[i].attrs |=
+ DMA_ATTR_FORCE_CONTIGUOUS;
+ if (plat_priv->fw_mem[i].type == CNSS_MEM_CAL_V01)
+ plat_priv->cal_mem = &plat_priv->fw_mem[i];
+ }
+
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM,
+ 0, NULL);
+}
+
+static void cnss_wlfw_fw_mem_ready_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+
+ cnss_pr_dbg("Received QMI WLFW FW memory ready indication\n");
+
+ if (!txn) {
+ cnss_pr_err("Spurious indication\n");
+ return;
+ }
+
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_MEM_READY,
+ 0, NULL);
+}
+
+/**
+ * cnss_wlfw_fw_ready_ind_cb: FW ready indication handler (Helium arch)
+ *
+ * This event is not required for HST/ HSP as FW calibration done is
+ * provided in QMI_WLFW_CAL_DONE_IND_V01
+ */
+static void cnss_wlfw_fw_ready_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+ struct cnss_cal_info *cal_info;
+
+ if (!txn) {
+ cnss_pr_err("Spurious indication\n");
+ return;
+ }
+
+ if (plat_priv->device_id == QCA6390_DEVICE_ID ||
+ plat_priv->device_id == QCA6490_DEVICE_ID) {
+ cnss_pr_dbg("Ignore FW Ready Indication for HST/HSP");
+ return;
+ }
+
+ cnss_pr_dbg("Received QMI WLFW FW ready indication.\n");
+ cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
+ if (!cal_info)
+ return;
+
+ cal_info->cal_status = CNSS_CAL_DONE;
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
+ 0, cal_info);
+}
+
+static void cnss_wlfw_fw_init_done_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+
+ cnss_pr_dbg("Received QMI WLFW FW initialization done indication\n");
+
+ if (!txn) {
+ cnss_pr_err("Spurious indication\n");
+ return;
+ }
+
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_READY, 0, NULL);
+}
+
+static void cnss_wlfw_pin_result_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+ const struct wlfw_pin_connect_result_ind_msg_v01 *ind_msg = data;
+
+ cnss_pr_dbg("Received QMI WLFW pin connect result indication\n");
+
+ if (!txn) {
+ cnss_pr_err("Spurious indication\n");
+ return;
+ }
+
+ if (ind_msg->pwr_pin_result_valid)
+ plat_priv->pin_result.fw_pwr_pin_result =
+ ind_msg->pwr_pin_result;
+ if (ind_msg->phy_io_pin_result_valid)
+ plat_priv->pin_result.fw_phy_io_pin_result =
+ ind_msg->phy_io_pin_result;
+ if (ind_msg->rf_pin_result_valid)
+ plat_priv->pin_result.fw_rf_pin_result = ind_msg->rf_pin_result;
+
+ cnss_pr_dbg("Pin connect Result: pwr_pin: 0x%x phy_io_pin: 0x%x rf_io_pin: 0x%x\n",
+ ind_msg->pwr_pin_result, ind_msg->phy_io_pin_result,
+ ind_msg->rf_pin_result);
+}
+
+int cnss_wlfw_cal_report_req_send_sync(struct cnss_plat_data *plat_priv,
+ u32 cal_file_download_size)
+{
+ struct wlfw_cal_report_req_msg_v01 req = {0};
+ struct wlfw_cal_report_resp_msg_v01 resp = {0};
+ struct qmi_txn txn;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending cal file report request. File size: %d, state: 0x%lx\n",
+ cal_file_download_size, plat_priv->driver_state);
+ req.cal_file_download_size_valid = 1;
+ req.cal_file_download_size = cal_file_download_size;
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_cal_report_resp_msg_v01_ei, &resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for Cal Report request, err: %d\n",
+ ret);
+ goto out;
+ }
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_CAL_REPORT_REQ_V01,
+ WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_cal_report_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send Cal Report request, err: %d\n",
+ ret);
+ goto out;
+ }
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of Cal Report request, err: %d\n",
+ ret);
+ goto out;
+ }
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Cal Report request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -resp.resp.result;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static void cnss_wlfw_cal_done_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+ const struct wlfw_cal_done_ind_msg_v01 *ind = data;
+ struct cnss_cal_info *cal_info;
+
+ cnss_pr_dbg("Received Cal done indication. File size: %d\n",
+ ind->cal_file_upload_size);
+ cnss_pr_info("Calibration took %d ms\n",
+ jiffies_to_msecs(jiffies - plat_priv->cal_time));
+ if (!txn) {
+ cnss_pr_err("Spurious indication\n");
+ return;
+ }
+ if (ind->cal_file_upload_size_valid)
+ plat_priv->cal_file_size = ind->cal_file_upload_size;
+ cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
+ if (!cal_info)
+ return;
+
+ cal_info->cal_status = CNSS_CAL_DONE;
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
+ 0, cal_info);
+}
+
+static void cnss_wlfw_qdss_trace_req_mem_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+ const struct wlfw_qdss_trace_req_mem_ind_msg_v01 *ind_msg = data;
+ int i;
+
+ cnss_pr_dbg("Received QMI WLFW QDSS trace request mem indication\n");
+
+ if (!txn) {
+ cnss_pr_err("Spurious indication\n");
+ return;
+ }
+
+ if (plat_priv->qdss_mem_seg_len) {
+ cnss_pr_err("Ignore double allocation for QDSS trace, current len %u\n",
+ plat_priv->qdss_mem_seg_len);
+ return;
+ }
+
+ plat_priv->qdss_mem_seg_len = ind_msg->mem_seg_len;
+ for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+ cnss_pr_dbg("QDSS requests for memory, size: 0x%x, type: %u\n",
+ ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
+ plat_priv->qdss_mem[i].type = ind_msg->mem_seg[i].type;
+ plat_priv->qdss_mem[i].size = ind_msg->mem_seg[i].size;
+ }
+
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM,
+ 0, NULL);
+}
+
+/**
+ * cnss_wlfw_fw_mem_file_save_ind_cb: Save given FW mem to filesystem
+ *
+ * QDSS_TRACE_SAVE_IND feature is overloaded to provide any host allocated
+ * fw memory segment for dumping to file system. Only one type of mem can be
+ * saved per indication and is provided in mem seg index 0.
+ *
+ * Return: None
+ */
+static void cnss_wlfw_fw_mem_file_save_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+ const struct wlfw_qdss_trace_save_ind_msg_v01 *ind_msg = data;
+ struct cnss_qmi_event_fw_mem_file_save_data *event_data;
+ int i = 0;
+
+ if (!txn || !data) {
+ cnss_pr_err("Spurious indication\n");
+ return;
+ }
+ cnss_pr_dbg("QMI fw_mem_file_save: source: %d mem_seg: %d type: %u len: %u\n",
+ ind_msg->source, ind_msg->mem_seg_valid,
+ ind_msg->mem_seg[0].type, ind_msg->mem_seg_len);
+
+ event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
+ if (!event_data)
+ return;
+
+ event_data->mem_type = ind_msg->mem_seg[0].type;
+ event_data->mem_seg_len = ind_msg->mem_seg_len;
+ event_data->total_size = ind_msg->total_size;
+
+ if (ind_msg->mem_seg_valid) {
+ if (ind_msg->mem_seg_len > QMI_WLFW_MAX_STR_LEN_V01) {
+ cnss_pr_err("Invalid seg len indication\n");
+ goto free_event_data;
+ }
+ for (i = 0; i < ind_msg->mem_seg_len; i++) {
+ event_data->mem_seg[i].addr = ind_msg->mem_seg[i].addr;
+ event_data->mem_seg[i].size = ind_msg->mem_seg[i].size;
+ if (event_data->mem_type != ind_msg->mem_seg[i].type) {
+ cnss_pr_err("FW Mem file save ind cannot have multiple mem types\n");
+ goto free_event_data;
+ }
+ cnss_pr_dbg("seg-%d: addr 0x%llx size 0x%x\n",
+ i, ind_msg->mem_seg[i].addr,
+ ind_msg->mem_seg[i].size);
+ }
+ }
+
+ if (ind_msg->file_name_valid)
+ strlcpy(event_data->file_name, ind_msg->file_name,
+ QMI_WLFW_MAX_STR_LEN_V01 + 1);
+ if (ind_msg->source == 1) {
+ if (!ind_msg->file_name_valid)
+ strlcpy(event_data->file_name, "qdss_trace_wcss_etb",
+ QMI_WLFW_MAX_STR_LEN_V01 + 1);
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA,
+ 0, event_data);
+ } else {
+ if (event_data->mem_type == QMI_WLFW_MEM_QDSS_V01) {
+ if (!ind_msg->file_name_valid)
+ strlcpy(event_data->file_name, "qdss_trace_ddr",
+ QMI_WLFW_MAX_STR_LEN_V01 + 1);
+ } else {
+ if (!ind_msg->file_name_valid)
+ strlcpy(event_data->file_name, "fw_mem_dump",
+ QMI_WLFW_MAX_STR_LEN_V01 + 1);
+ }
+
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE,
+ 0, event_data);
+ }
+
+ return;
+
+free_event_data:
+ kfree(event_data);
+}
+
+static void cnss_wlfw_qdss_trace_free_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_FREE,
+ 0, NULL);
+}
+
+static void cnss_wlfw_respond_get_info_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+ const struct wlfw_respond_get_info_ind_msg_v01 *ind_msg = data;
+
+ cnss_pr_buf("Received QMI WLFW respond get info indication\n");
+
+ if (!txn) {
+ cnss_pr_err("Spurious indication\n");
+ return;
+ }
+
+ cnss_pr_buf("Extract message with event length: %d, type: %d, is last: %d, seq no: %d\n",
+ ind_msg->data_len, ind_msg->type,
+ ind_msg->is_last, ind_msg->seq_no);
+
+ if (plat_priv->get_info_cb_ctx && plat_priv->get_info_cb)
+ plat_priv->get_info_cb(plat_priv->get_info_cb_ctx,
+ (void *)ind_msg->data,
+ ind_msg->data_len);
+}
+
+static int cnss_ims_wfc_call_twt_cfg_send_sync
+ (struct cnss_plat_data *plat_priv,
+ const struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg)
+{
+ struct ims_private_service_wfc_call_twt_config_req_msg_v01 *req;
+ struct ims_private_service_wfc_call_twt_config_rsp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ if (!test_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state)) {
+ cnss_pr_err("Drop FW WFC indication as IMS QMI not connected\n");
+ return -EINVAL;
+ }
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->twt_sta_start_valid = ind_msg->twt_sta_start_valid;
+ req->twt_sta_start = ind_msg->twt_sta_start;
+ req->twt_sta_int_valid = ind_msg->twt_sta_int_valid;
+ req->twt_sta_int = ind_msg->twt_sta_int;
+ req->twt_sta_upo_valid = ind_msg->twt_sta_upo_valid;
+ req->twt_sta_upo = ind_msg->twt_sta_upo;
+ req->twt_sta_sp_valid = ind_msg->twt_sta_sp_valid;
+ req->twt_sta_sp = ind_msg->twt_sta_sp;
+ req->twt_sta_dl_valid = req->twt_sta_dl_valid;
+ req->twt_sta_dl = req->twt_sta_dl;
+ req->twt_sta_config_changed_valid =
+ ind_msg->twt_sta_config_changed_valid;
+ req->twt_sta_config_changed = ind_msg->twt_sta_config_changed;
+
+ cnss_pr_dbg("CNSS->IMS: TWT_CFG_REQ: state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ ret =
+ qmi_txn_init(&plat_priv->ims_qmi, &txn,
+ ims_private_service_wfc_call_twt_config_rsp_msg_v01_ei,
+ resp);
+ if (ret < 0) {
+ cnss_pr_err("CNSS->IMS: TWT_CFG_REQ: QMI Txn Init Err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret =
+ qmi_send_request(&plat_priv->ims_qmi, NULL, &txn,
+ QMI_IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_REQ_V01,
+ IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_REQ_MSG_V01_MAX_MSG_LEN,
+ ims_private_service_wfc_call_twt_config_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("CNSS->IMS: TWT_CFG_REQ: QMI Send Err: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("IMS->CNSS: TWT_CFG_RSP: QMI Wait Err: %d\n", ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("IMS->CNSS: TWT_CFG_RSP: Result: %d Err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+ ret = 0;
+out:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+int cnss_process_twt_cfg_ind_event(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ int ret;
+ struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg = data;
+
+ ret = cnss_ims_wfc_call_twt_cfg_send_sync(plat_priv, ind_msg);
+ kfree(data);
+ return ret;
+}
+
+static void cnss_wlfw_process_twt_cfg_ind(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+ const struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg = data;
+ struct wlfw_wfc_call_twt_config_ind_msg_v01 *event_data;
+
+ if (!txn) {
+ cnss_pr_err("FW->CNSS: TWT_CFG_IND: Spurious indication\n");
+ return;
+ }
+
+ if (!ind_msg) {
+ cnss_pr_err("FW->CNSS: TWT_CFG_IND: Invalid indication\n");
+ return;
+ }
+ cnss_pr_dbg("FW->CNSS: TWT_CFG_IND: %x %llx, %x %x, %x %x, %x %x, %x %x, %x %x\n",
+ ind_msg->twt_sta_start_valid, ind_msg->twt_sta_start,
+ ind_msg->twt_sta_int_valid, ind_msg->twt_sta_int,
+ ind_msg->twt_sta_upo_valid, ind_msg->twt_sta_upo,
+ ind_msg->twt_sta_sp_valid, ind_msg->twt_sta_sp,
+ ind_msg->twt_sta_dl_valid, ind_msg->twt_sta_dl,
+ ind_msg->twt_sta_config_changed_valid,
+ ind_msg->twt_sta_config_changed);
+
+ event_data = kmemdup(ind_msg, sizeof(*event_data), GFP_KERNEL);
+ if (!event_data)
+ return;
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND, 0,
+ event_data);
+}
+
+static struct qmi_msg_handler qmi_wlfw_msg_handlers[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
+ .ei = wlfw_request_mem_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct wlfw_request_mem_ind_msg_v01),
+ .fn = cnss_wlfw_request_mem_ind_cb
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
+ .ei = wlfw_fw_mem_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct wlfw_fw_mem_ready_ind_msg_v01),
+ .fn = cnss_wlfw_fw_mem_ready_ind_cb
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_READY_IND_V01,
+ .ei = wlfw_fw_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
+ .fn = cnss_wlfw_fw_ready_ind_cb
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_INIT_DONE_IND_V01,
+ .ei = wlfw_fw_init_done_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct wlfw_fw_init_done_ind_msg_v01),
+ .fn = cnss_wlfw_fw_init_done_ind_cb
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01,
+ .ei = wlfw_pin_connect_result_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct wlfw_pin_connect_result_ind_msg_v01),
+ .fn = cnss_wlfw_pin_result_ind_cb
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_CAL_DONE_IND_V01,
+ .ei = wlfw_cal_done_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct wlfw_cal_done_ind_msg_v01),
+ .fn = cnss_wlfw_cal_done_ind_cb
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_QDSS_TRACE_REQ_MEM_IND_V01,
+ .ei = wlfw_qdss_trace_req_mem_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct wlfw_qdss_trace_req_mem_ind_msg_v01),
+ .fn = cnss_wlfw_qdss_trace_req_mem_ind_cb
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_QDSS_TRACE_SAVE_IND_V01,
+ .ei = wlfw_qdss_trace_save_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct wlfw_qdss_trace_save_ind_msg_v01),
+ .fn = cnss_wlfw_fw_mem_file_save_ind_cb
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_QDSS_TRACE_FREE_IND_V01,
+ .ei = wlfw_qdss_trace_free_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct wlfw_qdss_trace_free_ind_msg_v01),
+ .fn = cnss_wlfw_qdss_trace_free_ind_cb
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_RESPOND_GET_INFO_IND_V01,
+ .ei = wlfw_respond_get_info_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct wlfw_respond_get_info_ind_msg_v01),
+ .fn = cnss_wlfw_respond_get_info_ind_cb
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_WFC_CALL_TWT_CONFIG_IND_V01,
+ .ei = wlfw_wfc_call_twt_config_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct wlfw_wfc_call_twt_config_ind_msg_v01),
+ .fn = cnss_wlfw_process_twt_cfg_ind
+ },
+ {}
+};
+
+static int cnss_wlfw_connect_to_server(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ struct cnss_qmi_event_server_arrive_data *event_data = data;
+ struct qmi_handle *qmi_wlfw = &plat_priv->qmi_wlfw;
+ struct sockaddr_qrtr sq = { 0 };
+ int ret = 0;
+
+ if (!event_data)
+ return -EINVAL;
+
+ sq.sq_family = AF_QIPCRTR;
+ sq.sq_node = event_data->node;
+ sq.sq_port = event_data->port;
+
+ ret = kernel_connect(qmi_wlfw->sock, (struct sockaddr *)&sq,
+ sizeof(sq), 0);
+ if (ret < 0) {
+ cnss_pr_err("Failed to connect to QMI WLFW remote service port\n");
+ goto out;
+ }
+
+ set_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
+
+ cnss_pr_info("QMI WLFW service connected, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ kfree(data);
+ return 0;
+
+out:
+ CNSS_QMI_ASSERT();
+ kfree(data);
+ return ret;
+}
+
+int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv, void *data)
+{
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state)) {
+ cnss_pr_err("Unexpected WLFW server arrive\n");
+ CNSS_ASSERT(0);
+ return -EINVAL;
+ }
+
+ cnss_ignore_qmi_failure(false);
+
+ ret = cnss_wlfw_connect_to_server(plat_priv, data);
+ if (ret < 0)
+ goto out;
+
+ ret = cnss_wlfw_ind_register_send_sync(plat_priv);
+ if (ret < 0) {
+ if (ret == -EALREADY)
+ ret = 0;
+ goto out;
+ }
+
+ ret = cnss_wlfw_host_cap_send_sync(plat_priv);
+ if (ret < 0)
+ goto out;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv)
+{
+ int ret;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ clear_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
+
+ cnss_pr_info("QMI WLFW service disconnected, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ cnss_qmi_deinit(plat_priv);
+
+ clear_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state);
+
+ ret = cnss_qmi_init(plat_priv);
+ if (ret < 0) {
+ cnss_pr_err("QMI WLFW service registraton failed, ret\n", ret);
+ CNSS_ASSERT(0);
+ }
+ return 0;
+}
+
+static int wlfw_new_server(struct qmi_handle *qmi_wlfw,
+ struct qmi_service *service)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+ struct cnss_qmi_event_server_arrive_data *event_data;
+
+ if (plat_priv && test_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state)) {
+ cnss_pr_info("WLFW server delete in progress, Ignore server arrive, state: 0x%lx\n",
+ plat_priv->driver_state);
+ return 0;
+ }
+
+ cnss_pr_dbg("WLFW server arriving: node %u port %u\n",
+ service->node, service->port);
+
+ event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
+ if (!event_data)
+ return -ENOMEM;
+
+ event_data->node = service->node;
+ event_data->port = service->port;
+
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_SERVER_ARRIVE,
+ 0, event_data);
+
+ return 0;
+}
+
+static void wlfw_del_server(struct qmi_handle *qmi_wlfw,
+ struct qmi_service *service)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+
+ if (plat_priv && test_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state)) {
+ cnss_pr_info("WLFW server delete in progress, Ignore server delete, state: 0x%lx\n",
+ plat_priv->driver_state);
+ return;
+ }
+
+ cnss_pr_dbg("WLFW server exiting\n");
+
+ if (plat_priv) {
+ cnss_ignore_qmi_failure(true);
+ set_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state);
+ }
+
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_SERVER_EXIT,
+ 0, NULL);
+}
+
+static struct qmi_ops qmi_wlfw_ops = {
+ .new_server = wlfw_new_server,
+ .del_server = wlfw_del_server,
+};
+
+int cnss_qmi_init(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ ret = qmi_handle_init(&plat_priv->qmi_wlfw,
+ QMI_WLFW_MAX_RECV_BUF_SIZE,
+ &qmi_wlfw_ops, qmi_wlfw_msg_handlers);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize WLFW QMI handle, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_add_lookup(&plat_priv->qmi_wlfw, WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01, WLFW_SERVICE_INS_ID_V01);
+ if (ret < 0)
+ cnss_pr_err("Failed to add WLFW QMI lookup, err: %d\n", ret);
+
+out:
+ return ret;
+}
+
+void cnss_qmi_deinit(struct cnss_plat_data *plat_priv)
+{
+ qmi_handle_release(&plat_priv->qmi_wlfw);
+}
+
+int cnss_qmi_get_dms_mac(struct cnss_plat_data *plat_priv)
+{
+ struct dms_get_mac_address_req_msg_v01 req;
+ struct dms_get_mac_address_resp_msg_v01 resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ if (!test_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state)) {
+ cnss_pr_err("DMS QMI connection not established\n");
+ return -EINVAL;
+ }
+ cnss_pr_dbg("Requesting DMS MAC address");
+
+ memset(&resp, 0, sizeof(resp));
+ ret = qmi_txn_init(&plat_priv->qmi_dms, &txn,
+ dms_get_mac_address_resp_msg_v01_ei, &resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for dms, err: %d\n",
+ ret);
+ goto out;
+ }
+ req.device = DMS_DEVICE_MAC_WLAN_V01;
+ ret = qmi_send_request(&plat_priv->qmi_dms, NULL, &txn,
+ QMI_DMS_GET_MAC_ADDRESS_REQ_V01,
+ DMS_GET_MAC_ADDRESS_REQ_MSG_V01_MAX_MSG_LEN,
+ dms_get_mac_address_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send QMI_DMS_GET_MAC_ADDRESS_REQ_V01, err: %d\n",
+ ret);
+ goto out;
+ }
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for QMI_DMS_GET_MAC_ADDRESS_RESP_V01, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("QMI_DMS_GET_MAC_ADDRESS_REQ_V01 failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -resp.resp.result;
+ goto out;
+ }
+ if (!resp.mac_address_valid ||
+ resp.mac_address_len != QMI_WLFW_MAC_ADDR_SIZE_V01) {
+ cnss_pr_err("Invalid MAC address received from DMS\n");
+ plat_priv->dms.mac_valid = false;
+ goto out;
+ }
+ plat_priv->dms.mac_valid = true;
+ memcpy(plat_priv->dms.mac, resp.mac_address, QMI_WLFW_MAC_ADDR_SIZE_V01);
+ cnss_pr_info("Received DMS MAC: [%pM]\n", plat_priv->dms.mac);
+out:
+ return ret;
+}
+
+static int cnss_dms_connect_to_server(struct cnss_plat_data *plat_priv,
+ unsigned int node, unsigned int port)
+{
+ struct qmi_handle *qmi_dms = &plat_priv->qmi_dms;
+ struct sockaddr_qrtr sq = {0};
+ int ret = 0;
+
+ sq.sq_family = AF_QIPCRTR;
+ sq.sq_node = node;
+ sq.sq_port = port;
+
+ ret = kernel_connect(qmi_dms->sock, (struct sockaddr *)&sq,
+ sizeof(sq), 0);
+ if (ret < 0) {
+ cnss_pr_err("Failed to connect to QMI DMS remote service Node: %d Port: %d\n",
+ node, port);
+ goto out;
+ }
+
+ set_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state);
+ cnss_pr_info("QMI DMS service connected, state: 0x%lx\n",
+ plat_priv->driver_state);
+out:
+ return ret;
+}
+
+static int dms_new_server(struct qmi_handle *qmi_dms,
+ struct qmi_service *service)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_dms, struct cnss_plat_data, qmi_dms);
+
+ if (!service)
+ return -EINVAL;
+
+ return cnss_dms_connect_to_server(plat_priv, service->node,
+ service->port);
+}
+
+static void dms_del_server(struct qmi_handle *qmi_dms,
+ struct qmi_service *service)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_dms, struct cnss_plat_data, qmi_dms);
+
+ clear_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state);
+ cnss_pr_info("QMI DMS service disconnected, state: 0x%lx\n",
+ plat_priv->driver_state);
+}
+
+static struct qmi_ops qmi_dms_ops = {
+ .new_server = dms_new_server,
+ .del_server = dms_del_server,
+};
+
+int cnss_dms_init(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ ret = qmi_handle_init(&plat_priv->qmi_dms, DMS_QMI_MAX_MSG_LEN,
+ &qmi_dms_ops, NULL);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize DMS handle, err: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_add_lookup(&plat_priv->qmi_dms, DMS_SERVICE_ID_V01,
+ DMS_SERVICE_VERS_V01, 0);
+ if (ret < 0)
+ cnss_pr_err("Failed to add DMS lookup, err: %d\n", ret);
+out:
+ return ret;
+}
+
+void cnss_dms_deinit(struct cnss_plat_data *plat_priv)
+{
+ qmi_handle_release(&plat_priv->qmi_dms);
+}
+
+int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv)
+{
+ int ret;
+ struct coex_antenna_switch_to_wlan_req_msg_v01 *req;
+ struct coex_antenna_switch_to_wlan_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Sending coex antenna switch_to_wlan\n");
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->antenna = plat_priv->antenna;
+
+ ret = qmi_txn_init(&plat_priv->coex_qmi, &txn,
+ coex_antenna_switch_to_wlan_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Fail to init txn for coex antenna switch_to_wlan resp %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request
+ (&plat_priv->coex_qmi, NULL, &txn,
+ QMI_COEX_SWITCH_ANTENNA_TO_WLAN_REQ_V01,
+ COEX_ANTENNA_SWITCH_TO_WLAN_REQ_MSG_V01_MAX_MSG_LEN,
+ coex_antenna_switch_to_wlan_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Fail to send coex antenna switch_to_wlan req %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, COEX_TIMEOUT);
+ if (ret < 0) {
+ cnss_pr_err("Coex antenna switch_to_wlan resp wait failed with ret %d\n",
+ ret);
+ goto out;
+ } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Coex antenna switch_to_wlan request rejected, result:%d error:%d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ if (resp->grant_valid)
+ plat_priv->grant = resp->grant;
+
+ cnss_pr_dbg("Coex antenna grant: 0x%llx\n", resp->grant);
+
+ kfree(resp);
+ kfree(req);
+ return 0;
+
+out:
+ kfree(resp);
+ kfree(req);
+ return ret;
+}
+
+int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv)
+{
+ int ret;
+ struct coex_antenna_switch_to_mdm_req_msg_v01 *req;
+ struct coex_antenna_switch_to_mdm_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Sending coex antenna switch_to_mdm\n");
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->antenna = plat_priv->antenna;
+
+ ret = qmi_txn_init(&plat_priv->coex_qmi, &txn,
+ coex_antenna_switch_to_mdm_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Fail to init txn for coex antenna switch_to_mdm resp %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request
+ (&plat_priv->coex_qmi, NULL, &txn,
+ QMI_COEX_SWITCH_ANTENNA_TO_MDM_REQ_V01,
+ COEX_ANTENNA_SWITCH_TO_MDM_REQ_MSG_V01_MAX_MSG_LEN,
+ coex_antenna_switch_to_mdm_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Fail to send coex antenna switch_to_mdm req %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, COEX_TIMEOUT);
+ if (ret < 0) {
+ cnss_pr_err("Coex antenna switch_to_mdm resp wait failed with ret %d\n",
+ ret);
+ goto out;
+ } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Coex antenna switch_to_mdm request rejected, result:%d error:%d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(resp);
+ kfree(req);
+ return 0;
+
+out:
+ kfree(resp);
+ kfree(req);
+ return ret;
+}
+
+static int coex_new_server(struct qmi_handle *qmi,
+ struct qmi_service *service)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi, struct cnss_plat_data, coex_qmi);
+ struct sockaddr_qrtr sq = { 0 };
+ int ret = 0;
+
+ cnss_pr_dbg("COEX server arrive: node %u port %u\n",
+ service->node, service->port);
+
+ sq.sq_family = AF_QIPCRTR;
+ sq.sq_node = service->node;
+ sq.sq_port = service->port;
+ ret = kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0);
+ if (ret < 0) {
+ cnss_pr_err("Fail to connect to remote service port\n");
+ return ret;
+ }
+
+ set_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state);
+ cnss_pr_dbg("COEX Server Connected: 0x%lx\n",
+ plat_priv->driver_state);
+ return 0;
+}
+
+static void coex_del_server(struct qmi_handle *qmi,
+ struct qmi_service *service)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi, struct cnss_plat_data, coex_qmi);
+
+ cnss_pr_dbg("COEX server exit\n");
+
+ clear_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state);
+}
+
+static struct qmi_ops coex_qmi_ops = {
+ .new_server = coex_new_server,
+ .del_server = coex_del_server,
+};
+
+int cnss_register_coex_service(struct cnss_plat_data *plat_priv)
+{ int ret;
+
+ ret = qmi_handle_init(&plat_priv->coex_qmi,
+ COEX_SERVICE_MAX_MSG_LEN,
+ &coex_qmi_ops, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = qmi_add_lookup(&plat_priv->coex_qmi, COEX_SERVICE_ID_V01,
+ COEX_SERVICE_VERS_V01, 0);
+ return ret;
+}
+
+void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv)
+{
+ qmi_handle_release(&plat_priv->coex_qmi);
+}
+
+/* IMS Service */
+int ims_subscribe_for_indication_send_async(struct cnss_plat_data *plat_priv)
+{
+ int ret;
+ struct ims_private_service_subscribe_for_indications_req_msg_v01 *req;
+ struct qmi_txn *txn;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Sending ASYNC ims subscribe for indication\n");
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->wfc_call_status_valid = 1;
+ req->wfc_call_status = 1;
+
+ txn = &plat_priv->txn;
+ ret = qmi_txn_init(&plat_priv->ims_qmi, txn, NULL, NULL);
+ if (ret < 0) {
+ cnss_pr_err("Fail to init txn for ims subscribe for indication resp %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request
+ (&plat_priv->ims_qmi, NULL, txn,
+ QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01,
+ IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_MSG_V01_MAX_MSG_LEN,
+ ims_private_service_subscribe_for_indications_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(txn);
+ cnss_pr_err("Fail to send ims subscribe for indication req %d\n",
+ ret);
+ goto out;
+ }
+
+ kfree(req);
+ return 0;
+
+out:
+ kfree(req);
+ return ret;
+}
+
+static void ims_subscribe_for_indication_resp_cb(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ const
+ struct ims_private_service_subscribe_for_indications_rsp_msg_v01 *resp =
+ data;
+
+ cnss_pr_dbg("Received IMS subscribe indication response\n");
+
+ if (!txn) {
+ cnss_pr_err("spurious response\n");
+ return;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("IMS subscribe for indication request rejected, result:%d error:%d\n",
+ resp->resp.result, resp->resp.error);
+ txn->result = -resp->resp.result;
+ }
+}
+
+int cnss_process_wfc_call_ind_event(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ int ret;
+ struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg = data;
+
+ ret = cnss_wlfw_wfc_call_status_send_sync(plat_priv, ind_msg);
+ kfree(data);
+ return ret;
+}
+
+static void
+cnss_ims_process_wfc_call_ind_cb(struct qmi_handle *ims_qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(ims_qmi, struct cnss_plat_data, ims_qmi);
+ const
+ struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg = data;
+ struct ims_private_service_wfc_call_status_ind_msg_v01 *event_data;
+
+ if (!txn) {
+ cnss_pr_err("IMS->CNSS: WFC_CALL_IND: Spurious indication\n");
+ return;
+ }
+
+ if (!ind_msg) {
+ cnss_pr_err("IMS->CNSS: WFC_CALL_IND: Invalid indication\n");
+ return;
+ }
+ cnss_pr_dbg("IMS->CNSS: WFC_CALL_IND: %x, %x %x, %x %x, %x %llx, %x %x, %x %x\n",
+ ind_msg->wfc_call_active, ind_msg->all_wfc_calls_held_valid,
+ ind_msg->all_wfc_calls_held,
+ ind_msg->is_wfc_emergency_valid, ind_msg->is_wfc_emergency,
+ ind_msg->twt_ims_start_valid, ind_msg->twt_ims_start,
+ ind_msg->twt_ims_int_valid, ind_msg->twt_ims_int,
+ ind_msg->media_quality_valid, ind_msg->media_quality);
+
+ event_data = kmemdup(ind_msg, sizeof(*event_data), GFP_KERNEL);
+ if (!event_data)
+ return;
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND,
+ 0, event_data);
+}
+
+static struct qmi_msg_handler qmi_ims_msg_handlers[] = {
+ {
+ .type = QMI_RESPONSE,
+ .msg_id =
+ QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01,
+ .ei =
+ ims_private_service_subscribe_for_indications_rsp_msg_v01_ei,
+ .decoded_size = sizeof(struct
+ ims_private_service_subscribe_for_indications_rsp_msg_v01),
+ .fn = ims_subscribe_for_indication_resp_cb
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_IMS_PRIVATE_SERVICE_WFC_CALL_STATUS_IND_V01,
+ .ei = ims_private_service_wfc_call_status_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct ims_private_service_wfc_call_status_ind_msg_v01),
+ .fn = cnss_ims_process_wfc_call_ind_cb
+ },
+ {}
+};
+
+static int ims_new_server(struct qmi_handle *qmi,
+ struct qmi_service *service)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi, struct cnss_plat_data, ims_qmi);
+ struct sockaddr_qrtr sq = { 0 };
+ int ret = 0;
+
+ cnss_pr_dbg("IMS server arrive: node %u port %u\n",
+ service->node, service->port);
+
+ sq.sq_family = AF_QIPCRTR;
+ sq.sq_node = service->node;
+ sq.sq_port = service->port;
+ ret = kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0);
+ if (ret < 0) {
+ cnss_pr_err("Fail to connect to remote service port\n");
+ return ret;
+ }
+
+ set_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state);
+ cnss_pr_dbg("IMS Server Connected: 0x%lx\n",
+ plat_priv->driver_state);
+
+ ret = ims_subscribe_for_indication_send_async(plat_priv);
+ return ret;
+}
+
+static void ims_del_server(struct qmi_handle *qmi,
+ struct qmi_service *service)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi, struct cnss_plat_data, ims_qmi);
+
+ cnss_pr_dbg("IMS server exit\n");
+
+ clear_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state);
+}
+
+static struct qmi_ops ims_qmi_ops = {
+ .new_server = ims_new_server,
+ .del_server = ims_del_server,
+};
+
+int cnss_register_ims_service(struct cnss_plat_data *plat_priv)
+{ int ret;
+
+ ret = qmi_handle_init(&plat_priv->ims_qmi,
+ IMSPRIVATE_SERVICE_MAX_MSG_LEN,
+ &ims_qmi_ops, qmi_ims_msg_handlers);
+ if (ret < 0)
+ return ret;
+
+ ret = qmi_add_lookup(&plat_priv->ims_qmi, IMSPRIVATE_SERVICE_ID_V01,
+ IMSPRIVATE_SERVICE_VERS_V01, 0);
+ return ret;
+}
+
+void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv)
+{
+ qmi_handle_release(&plat_priv->ims_qmi);
+}
diff --git a/cnss2/qmi.h b/cnss2/qmi.h
new file mode 100644
index 0000000..dc07dca
--- /dev/null
+++ b/cnss2/qmi.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */
+
+#ifndef _CNSS_QMI_H
+#define _CNSS_QMI_H
+
+#include "wlan_firmware_service_v01.h"
+
+struct cnss_plat_data;
+
+struct cnss_qmi_event_server_arrive_data {
+ unsigned int node;
+ unsigned int port;
+};
+
+struct cnss_mem_seg {
+ u64 addr;
+ u32 size;
+};
+
+struct cnss_qmi_event_fw_mem_file_save_data {
+ u32 total_size;
+ u32 mem_seg_len;
+ enum wlfw_mem_type_enum_v01 mem_type;
+ struct cnss_mem_seg mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
+ char file_name[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+};
+
+#ifdef CONFIG_CNSS2_QMI
+#include "coexistence_service_v01.h"
+#include "ip_multimedia_subsystem_private_service_v01.h"
+#include "device_management_service_v01.h"
+
+int cnss_qmi_init(struct cnss_plat_data *plat_priv);
+void cnss_qmi_deinit(struct cnss_plat_data *plat_priv);
+unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv, void *data);
+int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
+ u32 bdf_type);
+int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
+ enum cnss_driver_mode mode);
+int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
+ struct cnss_wlan_enable_cfg *config,
+ const char *host_version);
+int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data);
+int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data);
+int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
+ u8 fw_log_mode);
+int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_get_info_send_sync(struct cnss_plat_data *plat_priv, int type,
+ void *cmd, int cmd_len);
+int cnss_process_wfc_call_ind_event(struct cnss_plat_data *plat_priv,
+ void *data);
+int cnss_process_twt_cfg_ind_event(struct cnss_plat_data *plat_priv,
+ void *data);
+int cnss_register_coex_service(struct cnss_plat_data *plat_priv);
+void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv);
+int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv);
+int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_register_ims_service(struct cnss_plat_data *plat_priv);
+void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data *plat_priv);
+void cnss_ignore_qmi_failure(bool ignore);
+int cnss_qmi_get_dms_mac(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_wlan_mac_req_send_sync(struct cnss_plat_data *plat_priv,
+ u8 *mac, u32 mac_len);
+int cnss_dms_init(struct cnss_plat_data *plat_priv);
+void cnss_dms_deinit(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_qdss_dnld_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_qdss_data_send_sync(struct cnss_plat_data *plat_priv, char *file_name,
+ u32 total_size);
+int wlfw_qdss_trace_start(struct cnss_plat_data *plat_priv);
+int wlfw_qdss_trace_stop(struct cnss_plat_data *plat_priv, unsigned long long option);
+int cnss_wlfw_cal_report_req_send_sync(struct cnss_plat_data *plat_priv,
+ u32 cal_file_download_size);
+#else
+#define QMI_WLFW_TIMEOUT_MS 10000
+
+static inline int cnss_qmi_init(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline void cnss_qmi_deinit(struct cnss_plat_data *plat_priv)
+{
+}
+
+static inline
+unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv)
+{
+ return QMI_WLFW_TIMEOUT_MS;
+}
+
+static inline int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ return 0;
+}
+
+static inline int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline
+int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
+ u32 bdf_type)
+{
+ return 0;
+}
+
+static inline int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline
+int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
+ enum cnss_driver_mode mode)
+{
+ return 0;
+}
+
+static inline
+int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
+ struct cnss_wlan_enable_cfg *config,
+ const char *host_version)
+{
+ return 0;
+}
+
+static inline
+int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data)
+{
+ return 0;
+}
+
+static inline
+int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data)
+{
+ return 0;
+}
+
+static inline
+int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
+ u8 fw_log_mode)
+{
+ return 0;
+}
+
+static inline
+int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline
+int cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline
+int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline
+int cnss_wlfw_get_info_send_sync(struct cnss_plat_data *plat_priv, int type,
+ void *cmd, int cmd_len)
+{
+ return 0;
+}
+
+static inline
+int cnss_process_wfc_call_ind_event(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ return 0;
+}
+
+static inline
+int cnss_process_twt_cfg_ind_event(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ return 0;
+}
+
+static inline
+int cnss_register_coex_service(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline
+void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv) {}
+
+static inline
+int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline
+int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline
+int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline
+int cnss_register_ims_service(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline
+void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv) {}
+
+static inline
+int cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+void cnss_ignore_qmi_failure(bool ignore) {};
+static inline int cnss_qmi_get_dms_mac(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+int cnss_wlfw_wlan_mac_req_send_sync(struct cnss_plat_data *plat_priv,
+ u8 *mac, u32 mac_len)
+{
+ return 0;
+}
+
+static inline int cnss_dms_init(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+int cnss_wlfw_qdss_dnld_send_sync(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+int cnss_wlfw_qdss_data_send_sync(struct cnss_plat_data *plat_priv, char *file_name,
+ u32 total_size)
+{
+ return 0;
+}
+
+static inline void cnss_dms_deinit(struct cnss_plat_data *plat_priv) {}
+
+int wlfw_qdss_trace_start(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+int wlfw_qdss_trace_stop(struct cnss_plat_data *plat_priv, unsigned long long option)
+{
+ return 0;
+}
+
+static inline
+int cnss_wlfw_cal_report_req_send_sync(struct cnss_plat_data *plat_priv,
+ u32 cal_file_download_size)
+{
+ return 0;
+}
+#endif /* CONFIG_CNSS2_QMI */
+
+#ifdef CONFIG_CNSS2_DEBUG
+static inline u32 cnss_get_host_build_type(void)
+{
+ return QMI_HOST_BUILD_TYPE_PRIMARY_V01;
+}
+#else
+static inline u32 cnss_get_host_build_type(void)
+{
+ return QMI_HOST_BUILD_TYPE_SECONDARY_V01;
+}
+#endif
+
+#endif /* _CNSS_QMI_H */
diff --git a/cnss2/reg.h b/cnss2/reg.h
new file mode 100644
index 0000000..3d02c74
--- /dev/null
+++ b/cnss2/reg.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
+
+#ifndef _CNSS_REG_H
+#define _CNSS_REG_H
+
+#define QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET 0x310C
+
+#define QCA6390_CE_SRC_RING_REG_BASE 0xA00000
+#define QCA6390_CE_DST_RING_REG_BASE 0xA01000
+#define QCA6390_CE_COMMON_REG_BASE 0xA18000
+
+#define QCA6490_CE_SRC_RING_REG_BASE 0x1B80000
+#define QCA6490_CE_DST_RING_REG_BASE 0x1B81000
+#define QCA6490_CE_COMMON_REG_BASE 0x1B98000
+
+#define CE_SRC_RING_BASE_LSB_OFFSET 0x0
+#define CE_SRC_RING_BASE_MSB_OFFSET 0x4
+#define CE_SRC_RING_ID_OFFSET 0x8
+#define CE_SRC_RING_MISC_OFFSET 0x10
+#define CE_SRC_CTRL_OFFSET 0x58
+#define CE_SRC_R0_CE_CH_SRC_IS_OFFSET 0x5C
+#define CE_SRC_RING_HP_OFFSET 0x400
+#define CE_SRC_RING_TP_OFFSET 0x404
+
+#define CE_DEST_RING_BASE_LSB_OFFSET 0x0
+#define CE_DEST_RING_BASE_MSB_OFFSET 0x4
+#define CE_DEST_RING_ID_OFFSET 0x8
+#define CE_DEST_RING_MISC_OFFSET 0x10
+#define CE_DEST_CTRL_OFFSET 0xB0
+#define CE_CH_DST_IS_OFFSET 0xB4
+#define CE_CH_DEST_CTRL2_OFFSET 0xB8
+#define CE_DEST_RING_HP_OFFSET 0x400
+#define CE_DEST_RING_TP_OFFSET 0x404
+
+#define CE_STATUS_RING_BASE_LSB_OFFSET 0x58
+#define CE_STATUS_RING_BASE_MSB_OFFSET 0x5C
+#define CE_STATUS_RING_ID_OFFSET 0x60
+#define CE_STATUS_RING_MISC_OFFSET 0x68
+#define CE_STATUS_RING_HP_OFFSET 0x408
+#define CE_STATUS_RING_TP_OFFSET 0x40C
+
+#define CE_COMMON_GXI_ERR_INTS 0x14
+#define CE_COMMON_GXI_ERR_STATS 0x18
+#define CE_COMMON_GXI_WDOG_STATUS 0x2C
+#define CE_COMMON_TARGET_IE_0 0x48
+#define CE_COMMON_TARGET_IE_1 0x4C
+
+#define CE_REG_INTERVAL 0x2000
+
+#define SHADOW_REG_COUNT 36
+#define PCIE_SHADOW_REG_VALUE_0 0x8FC
+#define PCIE_SHADOW_REG_VALUE_34 0x984
+#define PCIE_SHADOW_REG_VALUE_35 0x988
+
+#define SHADOW_REG_INTER_COUNT 43
+#define PCIE_SHADOW_REG_INTER_0 0x1E05000
+
+#define QDSS_APB_DEC_CSR_BASE 0x1C01000
+
+#define QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET 0x6C
+#define QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET 0x70
+#define QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET 0x74
+#define QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET 0x78
+
+#define MAX_UNWINDOWED_ADDRESS 0x80000
+#define WINDOW_ENABLE_BIT 0x40000000
+#define WINDOW_SHIFT 19
+#define WINDOW_VALUE_MASK 0x3F
+#define WINDOW_START MAX_UNWINDOWED_ADDRESS
+#define WINDOW_RANGE_MASK 0x7FFFF
+
+#define TIME_SYNC_ENABLE 0x80000000
+#define TIME_SYNC_CLEAR 0x0
+
+#define DEBUG_PBL_LOG_SRAM_START 0x01403D58
+
+#define QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE 80
+#define QCA6390_V2_SBL_DATA_START 0x016C8580
+#define QCA6390_V2_SBL_DATA_END (0x016C8580 + 0x00011000)
+#define QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE 44
+
+#define QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE 40
+#define QCA6490_V1_SBL_DATA_START 0x0143B000
+#define QCA6490_V1_SBL_DATA_END (0x0143B000 + 0x00011000)
+#define QCA6490_V2_SBL_DATA_START 0x01435000
+#define QCA6490_V2_SBL_DATA_END (0x01435000 + 0x00011000)
+#define QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE 48
+
+#define WCN7850_DEBUG_PBL_LOG_SRAM_START 0x01403D98
+#define WCN7850_DEBUG_PBL_LOG_SRAM_MAX_SIZE 40
+#define WCN7850_SBL_DATA_START 0x01790000
+#define WCN7850_SBL_DATA_END (0x01790000 + 0x00011000)
+#define WCN7850_DEBUG_SBL_LOG_SRAM_MAX_SIZE 48
+#define WCN7850_PBL_BOOTSTRAP_STATUS 0x01A10008
+
+#define TCSR_PBL_LOGGING_REG 0x01B000F8
+#define PCIE_BHI_ERRDBG2_REG 0x01E0E238
+#define PCIE_BHI_ERRDBG3_REG 0x01E0E23C
+#define PBL_WLAN_BOOT_CFG 0x01E22B34
+#define PBL_BOOTSTRAP_STATUS 0x01910008
+
+#define QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG 0x01E04234
+#define QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL 0xDEAD1234
+#define QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG 0x01E03140
+#define QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG 0x1E04054
+#define QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG 0x1E04058
+#define QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG 0x1E05090
+#define QCA6390_PCIE_PCIE_PARF_LTSSM 0x01E081B0
+#define QCA6390_PCIE_PCIE_PARF_PM_STTS 0x01E08024
+#define QCA6390_PCIE_PCIE_PARF_PM_STTS_1 0x01E08028
+#define QCA6390_PCIE_PCIE_PARF_INT_STATUS 0x01E08220
+#define QCA6390_PCIE_PCIE_INT_ALL_STATUS 0x01E08224
+#define QCA6390_PCIE_PCIE_INT_ALL_MASK 0x01E0822C
+#define QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG 0x01E0AC00
+#define QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4 0x01E08530
+#define QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3 0x01E0852c
+#define QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL 0x01E08174
+#define QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER 0x01E08178
+#define QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS 0x01E084D0
+#define QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG 0x01E084d4
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0x01E0ec88
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB 0x01E0ec08
+#define QCA6390_PCIE_PCIE_CORE_CONFIG 0x01E08640
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2 0x01E0EC04
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1 0x01E0EC0C
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0x01E0EC84
+#define QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH 0x01E030C8
+#define QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW 0x01E030CC
+#define QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH 0x01E0313C
+#define QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW 0x01E03140
+#define QCA6390_PCIE_PCIE_BHI_EXECENV_REG 0x01E0E228
+
+#define QCA6390_GCC_DEBUG_CLK_CTL 0x001E4025C
+
+#define QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE 0x00D00200
+#define QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL 0x00B60164
+#define QCA6390_WCSS_PMM_TOP_PMU_CX_CSR 0x00B70080
+#define QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT 0x00B700E0
+#define QCA6390_WCSS_PMM_TOP_AON_INT_EN 0x00B700D0
+#define QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS 0x00B70020
+#define QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL 0x00B7001C
+#define QCA6390_WCSS_PMM_TOP_TESTBUS_STS 0x00B70028
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG 0x00DB0008
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK 0x20
+#define QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL 0x00D02000
+#define QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE 0x00D02004
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS 0x00DB000C
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL 0x00DB0030
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0 0x00DB0400
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9 0x00DB0424
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0 0x00D90380
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1 0x00D90384
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2 0x00D90388
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3 0x00D9038C
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4 0x00D90390
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5 0x00D90394
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6 0x00D90398
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0 0x00D90100
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1 0x00D90104
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2 0x00D90108
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3 0x00D9010C
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4 0x00D90110
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5 0x00D90114
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6 0x00D90118
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0 0x00D90500
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1 0x00D90504
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2 0x00D90508
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3 0x00D9050C
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4 0x00D90510
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5 0x00D90514
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6 0x00D90518
+#define QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR 0x00C3029C
+#define QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR 0x00C302BC
+#define QCA6390_WCSS_CC_WCSS_UMAC_GDSCR 0x00C30298
+#define QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR 0x00C300C4
+#define QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR 0x00C30138
+#define QCA6390_WCSS_PMM_TOP_PMM_INT_CLR 0x00B70168
+#define QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN 0x00B700D8
+
+#define QCA6390_TLMM_GPIO_IN_OUT57 0x01839004
+#define QCA6390_TLMM_GPIO_INTR_CFG57 0x01839008
+#define QCA6390_TLMM_GPIO_INTR_STATUS57 0x0183900C
+#define QCA6390_TLMM_GPIO_IN_OUT59 0x0183b004
+#define QCA6390_TLMM_GPIO_INTR_CFG59 0x0183b008
+#define QCA6390_TLMM_GPIO_INTR_STATUS59 0x0183b00C
+
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2 0x00B6017C
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2 0x00B60190
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1 0x00B6018C
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1 0x00B60178
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1 0x00B600B0
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1 0x00B60044
+
+#define WLAON_SOC_POWER_CTRL 0x01F80000
+#define WLAON_SOC_PWR_WDG_BARK_THRSHD 0x1F80004
+#define WLAON_SOC_PWR_WDG_BITE_THRSHD 0x1F80008
+#define WLAON_SW_COLD_RESET 0x1F8000C
+#define WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE 0x1F8001C
+#define WLAON_GDSC_DELAY_SETTING 0x1F80024
+#define WLAON_GDSC_DELAY_SETTING2 0x1F80028
+#define WLAON_WL_PWR_STATUS_REG 0x1F8002C
+#define WLAON_WL_AON_DBG_CFG_REG 0x1F80030
+#define WLAON_WL_AON_DBG_ENABLE_GRP0_REG 0x1F80034
+#define WLAON_WL_AON_DBG_ENABLE_GRP1_REG 0x1F80038
+#define WLAON_WL_AON_APM_CFG_CTRL0 0x1F80040
+#define WLAON_WL_AON_APM_CFG_CTRL1 0x1F80044
+#define WLAON_WL_AON_APM_CFG_CTRL2 0x1F80048
+#define WLAON_WL_AON_APM_CFG_CTRL3 0x1F8004C
+#define WLAON_WL_AON_APM_CFG_CTRL4 0x1F80050
+#define WLAON_WL_AON_APM_CFG_CTRL5 0x1F80054
+#define WLAON_WL_AON_APM_CFG_CTRL5_1 0x1F80058
+#define WLAON_WL_AON_APM_CFG_CTRL6 0x1F8005C
+#define WLAON_WL_AON_APM_CFG_CTRL6_1 0x1F80060
+#define WLAON_WL_AON_APM_CFG_CTRL7 0x1F80064
+#define WLAON_WL_AON_APM_CFG_CTRL8 0x1F80068
+#define WLAON_WL_AON_APM_CFG_CTRL8_1 0x1F8006C
+#define WLAON_WL_AON_APM_CFG_CTRL9 0x1F80070
+#define WLAON_WL_AON_APM_CFG_CTRL9_1 0x1F80074
+#define WLAON_WL_AON_APM_CFG_CTRL10 0x1F80078
+#define WLAON_WL_AON_APM_CFG_CTRL11 0x1F8007C
+#define WLAON_WL_AON_APM_CFG_CTRL12 0x1F80080
+#define WLAON_WL_AON_APM_OVERRIDE_REG 0x1F800B0
+#define WLAON_WL_AON_CXPC_REG 0x1F800B4
+#define WLAON_WL_AON_APM_STATUS0 0x1F800C0
+#define WLAON_WL_AON_APM_STATUS1 0x1F800C4
+#define WLAON_WL_AON_APM_STATUS2 0x1F800C8
+#define WLAON_WL_AON_APM_STATUS3 0x1F800CC
+#define WLAON_WL_AON_APM_STATUS4 0x1F800D0
+#define WLAON_WL_AON_APM_STATUS5 0x1F800D4
+#define WLAON_WL_AON_APM_STATUS6 0x1F800D8
+#define WLAON_GLOBAL_COUNTER_CTRL1 0x1F80100
+#define WLAON_GLOBAL_COUNTER_CTRL6 0x1F80108
+#define WLAON_GLOBAL_COUNTER_CTRL7 0x1F8010C
+#define WLAON_GLOBAL_COUNTER_CTRL3 0x1F80118
+#define WLAON_GLOBAL_COUNTER_CTRL4 0x1F8011C
+#define WLAON_GLOBAL_COUNTER_CTRL5 0x1F80120
+#define WLAON_GLOBAL_COUNTER_CTRL8 0x1F801F0
+#define WLAON_GLOBAL_COUNTER_CTRL2 0x1F801F4
+#define WLAON_GLOBAL_COUNTER_CTRL9 0x1F801F8
+#define WLAON_RTC_CLK_CAL_CTRL1 0x1F80200
+#define WLAON_RTC_CLK_CAL_CTRL2 0x1F80204
+#define WLAON_RTC_CLK_CAL_CTRL3 0x1F80208
+#define WLAON_RTC_CLK_CAL_CTRL4 0x1F8020C
+#define WLAON_RTC_CLK_CAL_CTRL5 0x1F80210
+#define WLAON_RTC_CLK_CAL_CTRL6 0x1F80214
+#define WLAON_RTC_CLK_CAL_CTRL7 0x1F80218
+#define WLAON_RTC_CLK_CAL_CTRL8 0x1F8021C
+#define WLAON_RTC_CLK_CAL_CTRL9 0x1F80220
+#define WLAON_WCSSAON_CONFIG_REG 0x1F80300
+#define WLAON_WLAN_OEM_DEBUG_REG 0x1F80304
+#define WLAON_WLAN_RAM_DUMP_REG 0x1F80308
+#define WLAON_QDSS_WCSS_REG 0x1F8030C
+#define WLAON_QDSS_WCSS_ACK 0x1F80310
+#define WLAON_WL_CLK_CNTL_KDF_REG 0x1F80314
+#define WLAON_WL_CLK_CNTL_PMU_HFRC_REG 0x1F80318
+#define WLAON_QFPROM_PWR_CTRL_REG 0x1F8031C
+#define QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK 0x4
+#define QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK 0x1
+#define WLAON_DLY_CONFIG 0x1F80400
+#define WLAON_WLAON_Q6_IRQ_REG 0x1F80404
+#define WLAON_PCIE_INTF_SW_CFG_REG 0x1F80408
+#define WLAON_PCIE_INTF_STICKY_SW_CFG_REG 0x1F8040C
+#define WLAON_PCIE_INTF_PHY_SW_CFG_REG 0x1F80410
+#define WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG 0x1F80414
+#define WLAON_Q6_COOKIE_BIT 0x1F80500
+#define WLAON_WARM_SW_ENTRY 0x1F80504
+#define WLAON_RESET_DBG_SW_ENTRY 0x1F80508
+#define WLAON_WL_PMUNOC_CFG_REG 0x1F8050C
+#define WLAON_RESET_CAUSE_CFG_REG 0x1F80510
+#define WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG 0x1F80514
+#define WLAON_DEBUG 0x1F80600
+#define WLAON_SOC_PARAMETERS 0x1F80604
+#define WLAON_WLPM_SIGNAL 0x1F80608
+#define WLAON_SOC_RESET_CAUSE_REG 0x1F8060C
+#define WLAON_WAKEUP_PCIE_SOC_REG 0x1F80610
+#define WLAON_PBL_STACK_CANARY 0x1F80614
+#define WLAON_MEM_TOT_NUM_GRP_REG 0x1F80618
+#define WLAON_MEM_TOT_BANKS_IN_GRP0_REG 0x1F8061C
+#define WLAON_MEM_TOT_BANKS_IN_GRP1_REG 0x1F80620
+#define WLAON_MEM_TOT_BANKS_IN_GRP2_REG 0x1F80624
+#define WLAON_MEM_TOT_BANKS_IN_GRP3_REG 0x1F80628
+#define WLAON_MEM_TOT_SIZE_IN_GRP0_REG 0x1F8062C
+#define WLAON_MEM_TOT_SIZE_IN_GRP1_REG 0x1F80630
+#define WLAON_MEM_TOT_SIZE_IN_GRP2_REG 0x1F80634
+#define WLAON_MEM_TOT_SIZE_IN_GRP3_REG 0x1F80638
+#define WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG 0x1F8063C
+#define WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG 0x1F80640
+#define WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG 0x1F80644
+#define WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG 0x1F80648
+#define WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG 0x1F8064C
+#define WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG 0x1F80650
+#define WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG 0x1F80654
+#define WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG 0x1F80658
+#define WLAON_MEM_CNT_SEL_REG 0x1F8065C
+#define WLAON_MEM_NO_EXTBHS_REG 0x1F80660
+#define WLAON_MEM_DEBUG_REG 0x1F80664
+#define WLAON_MEM_DEBUG_BUS_REG 0x1F80668
+#define WLAON_MEM_REDUN_CFG_REG 0x1F8066C
+#define WLAON_WL_AON_SPARE2 0x1F80670
+#define WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG 0x1F80680
+#define WLAON_BTFM_WLAN_IPC_STATUS_REG 0x1F80690
+#define WLAON_MPM_COUNTER_CHICKEN_BITS 0x1F806A0
+#define WLAON_WLPM_CHICKEN_BITS 0x1F806A4
+#define WLAON_PCIE_PHY_PWR_REG 0x1F806A8
+#define WLAON_WL_CLK_CNTL_PMU_LPO2M_REG 0x1F806AC
+#define WLAON_WL_SS_ROOT_CLK_SWITCH_REG 0x1F806B0
+#define WLAON_POWERCTRL_PMU_REG 0x1F806B4
+#define WLAON_POWERCTRL_MEM_REG 0x1F806B8
+#define WLAON_PCIE_PWR_CTRL_REG 0x01F806BC
+#define WLAON_SOC_PWR_PROFILE_REG 0x1F806C0
+#define WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG 0x01F806C4
+#define WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG 0x1F806C8
+#define WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG 0x1F806CC
+#define WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG 0x1F806D0
+#define WLAON_MEM_SVS_CFG_REG 0x1F806D4
+#define WLAON_CMN_AON_MISC_REG 0x1F806D8
+#define WLAON_INTR_STATUS 0x1F80700
+#define WLAON_INTR_ENABLE 0x1F807040
+#define WLAON_NOC_DBG_BUS_SEL_REG 0x1F80708
+#define WLAON_NOC_DBG_BUS_REG 0x1F8070C
+#define WLAON_WL_CTRL_MISC_REG 0x1F80710
+#define WLAON_DBG_STATUS0 0x1F80720
+#define WLAON_DBG_STATUS1 0x1F80724
+#define WLAON_TIMERSYNC_OFFSET_L 0x1F80730
+#define WLAON_TIMERSYNC_OFFSET_H 0x1F80734
+#define WLAON_PMU_LDO_SETTLE_REG 0x1F80740
+
+#define QCA6390_SYSPM_SYSPM_PWR_STATUS 0x1F82000
+#define QCA6390_SYSPM_DBG_BTFM_AON_REG 0x1F82004
+#define QCA6390_SYSPM_DBG_BUS_SEL_REG 0x1F82008
+#define QCA6390_SYSPM_WCSSAON_SR_STATUS 0x1F8200C
+#endif
diff --git a/cnss_genl/Kconfig b/cnss_genl/Kconfig
new file mode 100644
index 0000000..5d87d8e
--- /dev/null
+++ b/cnss_genl/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CNSS_GENL
+ tristate "CNSS Generic Netlink Socket Driver"
+ help
+ This module creates generic netlink family "CLD80211". This can be
+ used by cld driver and userspace utilities to communicate over
+ netlink sockets. This module creates different multicast groups to
+ facilitate the same.
diff --git a/cnss_genl/Makefile b/cnss_genl/Makefile
new file mode 100644
index 0000000..6ea0dfc
--- /dev/null
+++ b/cnss_genl/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ifeq ($(CONFIG_CNSS_OUT_OF_TREE),y)
+ccflags-y += -I$(WLAN_PLATFORM_ROOT)/inc
+endif
+obj-$(CONFIG_CNSS_GENL) := cnss_nl.o
diff --git a/cnss_genl/cnss_nl.c b/cnss_genl/cnss_nl.c
new file mode 100644
index 0000000..0c67e43
--- /dev/null
+++ b/cnss_genl/cnss_nl.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */
+
+#include <net/genetlink.h>
+#include <net/cnss_nl.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#define CLD80211_GENL_NAME "cld80211"
+
+#define CLD80211_MULTICAST_GROUP_SVC_MSGS "svc_msgs"
+#define CLD80211_MULTICAST_GROUP_HOST_LOGS "host_logs"
+#define CLD80211_MULTICAST_GROUP_FW_LOGS "fw_logs"
+#define CLD80211_MULTICAST_GROUP_PER_PKT_STATS "per_pkt_stats"
+#define CLD80211_MULTICAST_GROUP_DIAG_EVENTS "diag_events"
+#define CLD80211_MULTICAST_GROUP_FATAL_EVENTS "fatal_events"
+#define CLD80211_MULTICAST_GROUP_OEM_MSGS "oem_msgs"
+
+static const struct genl_multicast_group nl_mcgrps[] = {
+ [CLD80211_MCGRP_SVC_MSGS] = { .name =
+ CLD80211_MULTICAST_GROUP_SVC_MSGS},
+ [CLD80211_MCGRP_HOST_LOGS] = { .name =
+ CLD80211_MULTICAST_GROUP_HOST_LOGS},
+ [CLD80211_MCGRP_FW_LOGS] = { .name =
+ CLD80211_MULTICAST_GROUP_FW_LOGS},
+ [CLD80211_MCGRP_PER_PKT_STATS] = { .name =
+ CLD80211_MULTICAST_GROUP_PER_PKT_STATS},
+ [CLD80211_MCGRP_DIAG_EVENTS] = { .name =
+ CLD80211_MULTICAST_GROUP_DIAG_EVENTS},
+ [CLD80211_MCGRP_FATAL_EVENTS] = { .name =
+ CLD80211_MULTICAST_GROUP_FATAL_EVENTS},
+ [CLD80211_MCGRP_OEM_MSGS] = { .name =
+ CLD80211_MULTICAST_GROUP_OEM_MSGS},
+};
+
+struct cld_ops {
+ cld80211_cb cb;
+ void *cb_ctx;
+};
+
+struct cld80211_nl_data {
+ struct cld_ops cld_ops[CLD80211_MAX_COMMANDS];
+};
+
+static struct cld80211_nl_data nl_data;
+
+static inline struct cld80211_nl_data *get_local_ctx(void)
+{
+ return &nl_data;
+}
+
+static struct genl_ops nl_ops[CLD80211_MAX_COMMANDS];
+
+/* policy for the attributes */
+static const struct nla_policy cld80211_policy[CLD80211_ATTR_MAX + 1] = {
+ [CLD80211_ATTR_VENDOR_DATA] = { .type = NLA_NESTED },
+ [CLD80211_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = CLD80211_MAX_NL_DATA },
+ [CLD80211_ATTR_META_DATA] = { .type = NLA_BINARY,
+ .len = CLD80211_MAX_NL_DATA },
+ [CLD80211_ATTR_CMD] = { .type = NLA_U32 },
+ [CLD80211_ATTR_CMD_TAG_DATA] = { .type = NLA_NESTED },
+};
+
+static int cld80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ u8 cmd_id = ops->cmd;
+ struct cld80211_nl_data *nl = get_local_ctx();
+
+ if (cmd_id < 1 || cmd_id > CLD80211_MAX_COMMANDS) {
+ pr_err("CLD80211: Command Not supported: %u\n", cmd_id);
+ return -EOPNOTSUPP;
+ }
+ info->user_ptr[0] = nl->cld_ops[cmd_id - 1].cb;
+ info->user_ptr[1] = nl->cld_ops[cmd_id - 1].cb_ctx;
+
+ return 0;
+}
+
+/* The netlink family */
+static struct genl_family cld80211_fam __ro_after_init = {
+ .name = CLD80211_GENL_NAME,
+ .hdrsize = 0, /* no private header */
+ .version = 1, /* no particular meaning now */
+ .maxattr = CLD80211_ATTR_MAX,
+ .policy = cld80211_policy,
+ .netnsok = true,
+ .pre_doit = cld80211_pre_doit,
+ .post_doit = NULL,
+ .module = THIS_MODULE,
+ .ops = nl_ops,
+ .n_ops = ARRAY_SIZE(nl_ops),
+ .mcgrps = nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(nl_mcgrps),
+};
+
+int register_cld_cmd_cb(u8 cmd_id, cld80211_cb func, void *cb_ctx)
+{
+ struct cld80211_nl_data *nl = get_local_ctx();
+
+ pr_debug("CLD80211: Registering command: %d\n", cmd_id);
+ if (!cmd_id || cmd_id > CLD80211_MAX_COMMANDS) {
+ pr_debug("CLD80211: invalid command: %d\n", cmd_id);
+ return -EINVAL;
+ }
+
+ nl->cld_ops[cmd_id - 1].cb = func;
+ nl->cld_ops[cmd_id - 1].cb_ctx = cb_ctx;
+
+ return 0;
+}
+EXPORT_SYMBOL(register_cld_cmd_cb);
+
+int deregister_cld_cmd_cb(u8 cmd_id)
+{
+ struct cld80211_nl_data *nl = get_local_ctx();
+
+ pr_debug("CLD80211: De-registering command: %d\n", cmd_id);
+ if (!cmd_id || cmd_id > CLD80211_MAX_COMMANDS) {
+ pr_debug("CLD80211: invalid command: %d\n", cmd_id);
+ return -EINVAL;
+ }
+
+ nl->cld_ops[cmd_id - 1].cb = NULL;
+ nl->cld_ops[cmd_id - 1].cb_ctx = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(deregister_cld_cmd_cb);
+
+struct genl_family *cld80211_get_genl_family(void)
+{
+ return &cld80211_fam;
+}
+EXPORT_SYMBOL(cld80211_get_genl_family);
+
+static int cld80211_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ cld80211_cb cld_cb;
+ void *cld_ctx;
+
+ cld_cb = info->user_ptr[0];
+
+ if (!cld_cb) {
+ pr_err("CLD80211: Not supported\n");
+ return -EOPNOTSUPP;
+ }
+ cld_ctx = info->user_ptr[1];
+
+ if (info->attrs[CLD80211_ATTR_VENDOR_DATA]) {
+ cld_cb(nla_data(info->attrs[CLD80211_ATTR_VENDOR_DATA]),
+ nla_len(info->attrs[CLD80211_ATTR_VENDOR_DATA]),
+ cld_ctx, info->snd_portid);
+ } else {
+ pr_err("CLD80211: No CLD80211_ATTR_VENDOR_DATA\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __cld80211_init(void)
+{
+ int err, i;
+
+ memset(&nl_ops[0], 0, sizeof(nl_ops));
+
+ pr_info("CLD80211: Initializing\n");
+ for (i = 0; i < CLD80211_MAX_COMMANDS; i++) {
+ nl_ops[i].cmd = i + 1;
+ nl_ops[i].doit = cld80211_doit;
+ nl_ops[i].flags = GENL_ADMIN_PERM;
+ }
+
+ err = genl_register_family(&cld80211_fam);
+ if (err) {
+ pr_err("CLD80211: Failed to register cld80211 family: %d\n",
+ err);
+ }
+
+ return err;
+}
+
+static void __cld80211_exit(void)
+{
+ genl_unregister_family(&cld80211_fam);
+}
+
+/**
+ * cld80211_is_valid_dt_node_found - Check if valid device tree node present
+ *
+ * Valid device tree node means a node with "qcom,wlan" property present and
+ * "status" property not disabled.
+ *
+ * Return: true if valid device tree node found, false if not found
+ */
+static bool cld80211_is_valid_dt_node_found(void)
+{
+ struct device_node *dn = NULL;
+
+ for_each_node_with_property(dn, "qcom,wlan") {
+ if (of_device_is_available(dn))
+ break;
+ }
+
+ if (dn)
+ return true;
+
+ return false;
+}
+
+static int __init cld80211_init(void)
+{
+ if (!cld80211_is_valid_dt_node_found())
+ return -ENODEV;
+
+ return __cld80211_init();
+}
+
+static void __exit cld80211_exit(void)
+{
+ __cld80211_exit();
+}
+
+module_init(cld80211_init);
+module_exit(cld80211_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CNSS generic netlink module");
diff --git a/cnss_prealloc/Kconfig b/cnss_prealloc/Kconfig
new file mode 100644
index 0000000..8c24cd5
--- /dev/null
+++ b/cnss_prealloc/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config WCNSS_MEM_PRE_ALLOC
+ tristate "WCNSS pre-alloc memory support"
+ help
+ Pre-allocate memory for the WLAN driver module.
+ This feature enable cld wlan driver to use pre allocated memory
+ for it's internal usage and release it to back to pre allocated pool.
+ This memory is allocated at the cold boot time.
diff --git a/cnss_prealloc/Makefile b/cnss_prealloc/Makefile
new file mode 100644
index 0000000..f8d6e52
--- /dev/null
+++ b/cnss_prealloc/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ifeq ($(CONFIG_CNSS_OUT_OF_TREE),y)
+ccflags-y += -I$(WLAN_PLATFORM_ROOT)/inc
+endif
+obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc.o
diff --git a/cnss_prealloc/cnss_prealloc.c b/cnss_prealloc/cnss_prealloc.c
new file mode 100644
index 0000000..f908aed
--- /dev/null
+++ b/cnss_prealloc/cnss_prealloc.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2012,2014-2017,2019-2021 The Linux Foundation. All rights reserved. */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mempool.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <net/cnss_prealloc.h>
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CNSS prealloc driver");
+
+/* cnss preallocation scheme is a memory pool that always tries to keep a
+ * list of free memory for use in emergencies. It is implemented on kernel
+ * features: memorypool and kmem cache.
+ */
+
+struct cnss_pool {
+ size_t size;
+ int min;
+ const char name[50];
+ mempool_t *mp;
+ struct kmem_cache *cache;
+};
+
+/**
+ * Memory pool
+ * -----------
+ *
+ * How to update this table:
+ *
+ * 1. Add a new row with following elements
+ * size : Size of one allocation unit in bytes.
+ * min : Minimum units to be reserved. Used only if a regular
+ * allocation fails.
+ * name : Name of the cache/pool. Will be displayed in /proc/slabinfo
+ * if not merged with another pool.
+ * mp : A pointer to memory pool. Updated during init.
+ * cache : A pointer to cache. Updated during init.
+ * 2. Always keep the table in increasing order
+ * 3. Please keep the reserve pool as minimum as possible as it's always
+ * preallocated.
+ * 4. Always profile with different use cases after updating this table.
+ * 5. A dynamic view of this pool can be viewed at /proc/slabinfo.
+ * 6. Each pool has a sys node at /sys/kernel/slab/<name>
+ *
+ */
+
+/* size, min pool reserve, name, memorypool handler, cache handler*/
+static struct cnss_pool cnss_pools[] = {
+ {8 * 1024, 22, "cnss-pool-8k", NULL, NULL},
+ {16 * 1024, 16, "cnss-pool-16k", NULL, NULL},
+ {32 * 1024, 6, "cnss-pool-32k", NULL, NULL},
+ {64 * 1024, 8, "cnss-pool-64k", NULL, NULL},
+ {128 * 1024, 2, "cnss-pool-128k", NULL, NULL},
+};
+
+/**
+ * cnss_pool_alloc_threshold() - Allocation threshold
+ *
+ * Minimum memory size to be part of cnss pool.
+ *
+ * Return: Size
+ *
+ */
+static inline size_t cnss_pool_alloc_threshold(void)
+{
+ return cnss_pools[0].size;
+}
+
+/**
+ * cnss_pool_int() - Initialize memory pools.
+ *
+ * Create cnss pools as configured by cnss_pools[]. It is the responsibility of
+ * the caller to invoke cnss_pool_deinit() routine to clean it up. This
+ * function needs to be called at early boot to preallocate minimum buffers in
+ * the pool.
+ *
+ * Return: 0 - success, otherwise error code.
+ *
+ */
+static int cnss_pool_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
+ /* Create the slab cache */
+ cnss_pools[i].cache =
+ kmem_cache_create_usercopy(cnss_pools[i].name,
+ cnss_pools[i].size, 0,
+ SLAB_ACCOUNT, 0,
+ cnss_pools[i].size, NULL);
+ if (!cnss_pools[i].cache) {
+ pr_err("cnss_prealloc: cache %s failed\n",
+ cnss_pools[i].name);
+ continue;
+ }
+
+ /* Create the pool and associate to slab cache */
+ cnss_pools[i].mp =
+ mempool_create(cnss_pools[i].min, mempool_alloc_slab,
+ mempool_free_slab, cnss_pools[i].cache);
+
+ if (!cnss_pools[i].mp) {
+ pr_err("cnss_prealloc: mempool %s failed\n",
+ cnss_pools[i].name);
+ kmem_cache_destroy(cnss_pools[i].cache);
+ cnss_pools[i].cache = NULL;
+ continue;
+ }
+
+ pr_info("cnss_prealloc: created mempool %s of min size %d * %d\n",
+ cnss_pools[i].name, cnss_pools[i].min,
+ cnss_pools[i].size);
+ }
+
+ return 0;
+}
+
+/**
+ * cnss_pool_deinit() - Free memory pools.
+ *
+ * Free the memory pools and return resources back to the system. It warns
+ * if there is any pending element in memory pool or cache.
+ *
+ */
+static void cnss_pool_deinit(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
+ pr_info("cnss_prealloc: destroy mempool %s\n",
+ cnss_pools[i].name);
+ mempool_destroy(cnss_pools[i].mp);
+ kmem_cache_destroy(cnss_pools[i].cache);
+ }
+}
+
+/**
+ * cnss_pool_get_index() - Get the index of memory pool
+ * @mem: Allocated memory
+ *
+ * Returns the index of the memory pool which fits the reqested memory. The
+ * complexity of this check is O(num of memory pools). Returns a negative
+ * value with error code in case of failure.
+ *
+ */
+static int cnss_pool_get_index(void *mem)
+{
+ struct page *page;
+ struct kmem_cache *cache;
+ int i;
+
+ if (!virt_addr_valid(mem))
+ return -EINVAL;
+
+ /* mem -> page -> cache */
+ page = virt_to_head_page(mem);
+ if (!page)
+ return -ENOENT;
+
+ cache = page->slab_cache;
+ if (!cache)
+ return -ENOENT;
+
+
+ /* Check if memory belongs to a pool */
+ for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
+ if (cnss_pools[i].cache == cache)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * wcnss_prealloc_get() - Get preallocated memory from a pool
+ * @size: Size to allocate
+ *
+ * Memory pool is chosen based on the size. If memory is not available in a
+ * given pool it goes to next higher sized pool until it succeeds.
+ *
+ * Return: A void pointer to allocated memory
+ */
+void *wcnss_prealloc_get(size_t size)
+{
+
+ void *mem = NULL;
+ gfp_t gfp_mask = __GFP_ZERO;
+ int i;
+
+ if (in_interrupt() || irqs_disabled())
+ gfp_mask |= GFP_ATOMIC;
+ else
+ gfp_mask |= GFP_KERNEL;
+
+ if (size >= cnss_pool_alloc_threshold()) {
+
+ for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
+ if (cnss_pools[i].size >= size) {
+ mem = mempool_alloc(cnss_pools[i].mp, gfp_mask);
+ if (mem)
+ break;
+ }
+ }
+ }
+
+ if (!mem && size >= cnss_pool_alloc_threshold()) {
+ pr_debug("cnss_prealloc: not available for size %d, flag %x\n",
+ size, gfp_mask);
+ }
+
+ return mem;
+}
+EXPORT_SYMBOL(wcnss_prealloc_get);
+
+/**
+ * wcnss_prealloc_put() - Relase allocated memory
+ * @mem: Allocated memory
+ *
+ * Free the memory got by wcnss_prealloc_get() to slab or pool reserve if memory
+ * pool doesn't have enough elements.
+ *
+ * Return: 1 - success
+ * 0 - fail
+ */
+int wcnss_prealloc_put(void *mem)
+{
+ int i;
+
+ if (!mem)
+ return 0;
+
+ i = cnss_pool_get_index(mem);
+
+ if (i >= 0 && i < ARRAY_SIZE(cnss_pools)) {
+ mempool_free(mem, cnss_pools[i].mp);
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wcnss_prealloc_put);
+
+/* Not implemented. Make use of Linux SLAB features. */
+void wcnss_prealloc_check_memory_leak(void) {}
+EXPORT_SYMBOL(wcnss_prealloc_check_memory_leak);
+
+/* Not implemented. Make use of Linux SLAB features. */
+int wcnss_pre_alloc_reset(void) { return -EOPNOTSUPP; }
+EXPORT_SYMBOL(wcnss_pre_alloc_reset);
+
+/**
+ * cnss_prealloc_is_valid_dt_node_found - Check if valid device tree node
+ * present
+ *
+ * Valid device tree node means a node with "qcom,wlan" property present
+ * and "status" property not disabled.
+ *
+ * Return: true if valid device tree node found, false if not found
+ */
+static bool cnss_prealloc_is_valid_dt_node_found(void)
+{
+ struct device_node *dn = NULL;
+
+ for_each_node_with_property(dn, "qcom,wlan") {
+ if (of_device_is_available(dn))
+ break;
+ }
+
+ if (dn)
+ return true;
+
+ return false;
+}
+
+static int __init cnss_prealloc_init(void)
+{
+ if (!cnss_prealloc_is_valid_dt_node_found())
+ return -ENODEV;
+
+ return cnss_pool_init();
+}
+
+static void __exit cnss_prealloc_exit(void)
+{
+ cnss_pool_deinit();
+}
+
+module_init(cnss_prealloc_init);
+module_exit(cnss_prealloc_exit);
+
diff --git a/cnss_utils/Kconfig b/cnss_utils/Kconfig
new file mode 100644
index 0000000..a3930b1
--- /dev/null
+++ b/cnss_utils/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CNSS_UTILS
+ tristate "CNSS utilities support"
+ help
+ Add CNSS utilities support for the WLAN driver module.
+ This feature enables wlan driver to use CNSS utilities APIs to set
+ and get wlan related information.
+
+config CNSS_QMI_SVC
+ tristate "CNSS QMI SVC support"
+ help
+ Add CNSS QMI SVC support for the WLAN driver module.
+ This feature enable wlan driver to use CNSS QMI service APIs to set
+ and get wlan related information.
+
+config CNSS_PLAT_IPC_QMI_SVC
+ tristate "CNSS Platform QMI IPC Support"
+ help
+ Add CNSS platform kernel and user space components IPC using QMI.
diff --git a/cnss_utils/Makefile b/cnss_utils/Makefile
new file mode 100644
index 0000000..6ee49bd
--- /dev/null
+++ b/cnss_utils/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ifeq ($(CONFIG_CNSS_OUT_OF_TREE),y)
+ccflags-y += -I$(WLAN_PLATFORM_ROOT)/inc
+endif
+obj-$(CONFIG_CNSS_UTILS) += cnss_utils.o
+obj-$(CONFIG_CNSS_QMI_SVC) += wlan_firmware_service.o
+wlan_firmware_service-y := wlan_firmware_service_v01.o device_management_service_v01.o
+
+obj-$(CONFIG_CNSS_PLAT_IPC_QMI_SVC) += cnss_plat_ipc_qmi_svc.o
+cnss_plat_ipc_qmi_svc-y := cnss_plat_ipc_qmi.o cnss_plat_ipc_service_v01.o
diff --git a/cnss_utils/cnss_plat_ipc_qmi.c b/cnss_utils/cnss_plat_ipc_qmi.c
new file mode 100644
index 0000000..aa042f9
--- /dev/null
+++ b/cnss_utils/cnss_plat_ipc_qmi.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021, The Linux Foundation. All rights reserved. */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/qrtr.h>
+#include <linux/soc/qcom/qmi.h>
+#include <linux/ipc_logging.h>
+#include <linux/limits.h>
+#include <linux/slab.h>
+#include <linux/cnss_plat_ipc_qmi.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+#include "cnss_plat_ipc_service_v01.h"
+
+#define CNSS_MAX_FILE_SIZE (32 * 1024 * 1024)
+#define CNSS_PLAT_IPC_MAX_CLIENTS 1
+#define CNSS_PLAT_IPC_QMI_FILE_TXN_TIMEOUT 10000
+#define QMI_INIT_RETRY_MAX_TIMES 240
+#define QMI_INIT_RETRY_DELAY_MS 250
+#define NUM_LOG_PAGES 10
+
+/**
+ * struct cnss_plat_ipc_file_data: File transfer context data
+ * @name: File name
+ * @buf: Buffer provided for TX/RX file contents
+ * @id: File ID corresponding to file name
+ * @buf_size: Buffer size
+ * @file_fize: File Size
+ * @seg_index: Running index for buffer segments
+ * @seg_len: Total number of segments
+ * @end: End of transaction
+ * @complete: Completion variable for file transfer
+ */
+struct cnss_plat_ipc_file_data {
+ char *name;
+ char *buf;
+ u32 id;
+ u32 buf_size;
+ u32 file_size;
+ u32 seg_index;
+ u32 seg_len;
+ u32 end;
+ struct completion complete;
+};
+
+/**
+ * struct cnss_plat_ipc_qmi_svc_ctx: Platform context for QMI IPC service
+ * @svc_hdl: QMI server handle
+ * @client_sq: CNSS Daemon client QRTR socket
+ * @client_connected: Daemon client connection status
+ * @file_idr: File ID generator
+ * @flle_idr_lock: File ID generator usage lock
+ * @cfg: CNSS daemon provided user config
+ * @connection_update_cb: Registered user callback for daemon connection status
+ * @cb_ctx: Context for registered user
+ * @num_user: Number of registered users
+ */
+struct cnss_plat_ipc_qmi_svc_ctx {
+ struct qmi_handle *svc_hdl;
+ struct sockaddr_qrtr client_sq;
+ bool client_connected;
+ struct idr file_idr;
+ struct mutex file_idr_lock; /* File ID generator usage lock */
+ struct cnss_plat_ipc_user_config cfg;
+
+ cnss_plat_ipc_connection_update
+ connection_update_cb[CNSS_PLAT_IPC_MAX_CLIENTS];
+ void *cb_ctx[CNSS_PLAT_IPC_MAX_CLIENTS];
+ u32 num_user;
+};
+
+static struct cnss_plat_ipc_qmi_svc_ctx plat_ipc_qmi_svc;
+static void *cnss_plat_ipc_log_context;
+
+#if IS_ENABLED(CONFIG_IPC_LOGGING)
+
+void cnss_plat_ipc_debug_log_print(void *log_ctx, char *process, const char *fn,
+ const char *log_level, char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list va_args;
+
+ va_start(va_args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &va_args;
+
+ if (log_level)
+ printk("%scnss_plat: %pV", log_level, &vaf);
+
+ ipc_log_string(log_ctx, "[%s] %s: %pV", process, fn, &vaf);
+
+ va_end(va_args);
+}
+
+#define cnss_plat_ipc_log_print(_x...) \
+ cnss_plat_ipc_debug_log_print(cnss_plat_ipc_log_context, _x)
+#else
+void cnss_plat_ipc_debug_log_print(void *log_ctx, char *process, const char *fn,
+ const char *log_level, char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list va_args;
+
+ va_start(va_args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &va_args;
+
+ if (log_level)
+ printk("%scnss_plat: %pV", log_level, &vaf);
+
+ va_end(va_args);
+}
+
+#define cnss_plat_ipc_log_print(_x...) \
+ cnss_plat_ipc_debug_log_print((void *)NULL, _x)
+#endif
+
+#define proc_name (in_irq() ? "irq" : \
+ (in_softirq() ? "soft_irq" : current->comm))
+#define cnss_plat_ipc_err(_fmt, ...) \
+ cnss_plat_ipc_log_print(proc_name, __func__, \
+ KERN_ERR, _fmt, ##__VA_ARGS__)
+
+#define cnss_plat_ipc_info(_fmt, ...) \
+ cnss_plat_ipc_log_print(proc_name, __func__, \
+ KERN_INFO, _fmt, ##__VA_ARGS__)
+
+#define cnss_plat_ipc_dbg(_fmt, ...) \
+ cnss_plat_ipc_log_print(proc_name, __func__, \
+ KERN_DEBUG, _fmt, ##__VA_ARGS__)
+/**
+ * cnss_plat_ipc_init_file_data() - Initialize file transfer context data
+ * @name: File name
+ * @buf: Buffer pointer for file contents
+ * @buf_size: Buffer size for download / upload
+ * @file_size: File size for upload
+ *
+ * Return: File data pointer
+ */
+static
+struct cnss_plat_ipc_file_data *cnss_plat_ipc_init_file_data(char *name,
+ char *buf,
+ u32 buf_size,
+ u32 file_size)
+{
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ struct cnss_plat_ipc_file_data *fd;
+
+ fd = kmalloc(sizeof(*fd), GFP_KERNEL);
+ if (!fd)
+ goto end;
+ fd->name = name;
+ fd->buf = buf;
+ fd->buf_size = buf_size;
+ fd->file_size = file_size;
+ fd->seg_index = 0;
+ fd->end = 0;
+ if (file_size)
+ fd->seg_len =
+ (file_size / CNSS_PLAT_IPC_QMI_MAX_DATA_SIZE_V01) +
+ !!(file_size % CNSS_PLAT_IPC_QMI_MAX_DATA_SIZE_V01);
+ else
+ fd->seg_len = 0;
+ init_completion(&fd->complete);
+ mutex_lock(&svc->file_idr_lock);
+ fd->id = idr_alloc_cyclic(&svc->file_idr, fd, 0, U32_MAX, GFP_KERNEL);
+ if (fd->id < 0) {
+ kfree(fd);
+ fd = NULL;
+ }
+ mutex_unlock(&svc->file_idr_lock);
+end:
+ return fd;
+}
+
+/**
+ * cnss_plat_ipc_deinit_file_data() - Release file transfer context data
+ * @fd: File data pointer
+ *
+ * Return: 0 on success, negative error values otherwise
+ */
+static int cnss_plat_ipc_deinit_file_data(struct cnss_plat_ipc_file_data *fd)
+{
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ int ret = 0;
+
+ if (unlikely(!fd))
+ return -EINVAL;
+
+ mutex_lock(&svc->file_idr_lock);
+ idr_remove(&svc->file_idr, fd->id);
+ mutex_unlock(&svc->file_idr_lock);
+
+ if (!fd->end)
+ ret = -EINVAL;
+ kfree(fd);
+ return ret;
+}
+
+/**
+ * cnss_plat_ipc_qmi_update_clients() - Inform registered clients for status
+ * update
+ *
+ * Return: None
+ */
+static void cnss_plat_ipc_qmi_update_clients(void)
+{
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ int i;
+
+ for (i = 0; i < CNSS_PLAT_IPC_MAX_CLIENTS; i++) {
+ if (svc->connection_update_cb[i])
+ svc->connection_update_cb[i](svc->cb_ctx[i],
+ svc->client_connected);
+ }
+}
+
+/**
+ * cnss_plat_ipc_qmi_file_upload() - Upload data as platform accessible file
+ * @file_mame: File name to store in platform data location
+ * @file_buf: Pointer to buffer with file contents
+ * @file_size: Provides the size of buffer / file size
+ *
+ * Return: 0 on success, negative error values otherwise
+ */
+int cnss_plat_ipc_qmi_file_upload(char *file_name, u8 *file_buf,
+ u32 file_size)
+{
+ struct cnss_plat_ipc_qmi_file_upload_ind_msg_v01 ind;
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ int ret;
+ struct cnss_plat_ipc_file_data *fd;
+
+ if (!svc->client_connected || !file_name || !file_buf)
+ return -EINVAL;
+
+ cnss_plat_ipc_info("File name: %s Size: %d\n", file_name, file_size);
+
+ if (file_size == 0 || file_size > CNSS_MAX_FILE_SIZE)
+ return -EINVAL;
+
+ fd = cnss_plat_ipc_init_file_data(file_name, file_buf, file_size,
+ file_size);
+ if (!fd) {
+ cnss_plat_ipc_err("Unable to initialize file transfer data\n");
+ return -EINVAL;
+ }
+ scnprintf(ind.file_name, CNSS_PLAT_IPC_QMI_MAX_FILE_NAME_LEN_V01, "%s",
+ fd->name);
+ ind.file_size = fd->file_size;
+ ind.file_id = fd->id;
+
+ ret = qmi_send_indication
+ (svc->svc_hdl, &svc->client_sq,
+ CNSS_PLAT_IPC_QMI_FILE_UPLOAD_IND_V01,
+ CNSS_PLAT_IPC_QMI_FILE_UPLOAD_IND_MSG_V01_MAX_MSG_LEN,
+ cnss_plat_ipc_qmi_file_upload_ind_msg_v01_ei, &ind);
+
+ if (ret < 0) {
+ cnss_plat_ipc_err("QMI failed: %d\n", ret);
+ goto end;
+ }
+ ret = wait_for_completion_timeout(&fd->complete,
+ msecs_to_jiffies
+ (CNSS_PLAT_IPC_QMI_FILE_TXN_TIMEOUT));
+ if (!ret)
+ cnss_plat_ipc_err("Timeout Uploading file: %s\n", fd->name);
+
+end:
+ ret = cnss_plat_ipc_deinit_file_data(fd);
+ cnss_plat_ipc_dbg("Status: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_file_upload);
+
+/**
+ * cnss_plat_ipc_qmi_file_upload_req_handler() - QMI Upload data request handler
+ * @handle: Pointer to QMI handle
+ * @sq: QMI socket
+ * @txn: QMI transaction pointer
+ * @decoded_msg: Pointer to decoded QMI message
+ *
+ * Handles the QMI upload sequence from userspace. It uses the file descriptor
+ * ID to upload buffer contents to QMI messages as segments.
+ *
+ * Return: None
+ */
+static void
+cnss_plat_ipc_qmi_file_upload_req_handler(struct qmi_handle *handle,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded_msg)
+{
+ struct cnss_plat_ipc_qmi_file_upload_req_msg_v01 *req_msg;
+ struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01 *resp;
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ int ret = 0;
+ struct cnss_plat_ipc_file_data *fd;
+
+ req_msg = (struct cnss_plat_ipc_qmi_file_upload_req_msg_v01 *)
+ decoded_msg;
+ if (!req_msg)
+ return;
+ cnss_plat_ipc_dbg("File ID: %d Seg Index: %d\n", req_msg->file_id,
+ req_msg->seg_index);
+
+ mutex_lock(&svc->file_idr_lock);
+ fd = idr_find(&svc->file_idr, req_msg->file_id);
+ mutex_unlock(&svc->file_idr_lock);
+ if (!fd) {
+ cnss_plat_ipc_err("Invalid File ID %d\n", req_msg->file_id);
+ return;
+ }
+
+ if (req_msg->seg_index != fd->seg_index) {
+ cnss_plat_ipc_err("File %s transfer segment failure\n", fd->name);
+ complete(&fd->complete);
+ }
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return;
+
+ resp->file_id = fd->id;
+ resp->seg_index = fd->seg_index++;
+ resp->seg_buf_len =
+ (fd->buf_size > CNSS_PLAT_IPC_QMI_MAX_DATA_SIZE_V01 ?
+ CNSS_PLAT_IPC_QMI_MAX_DATA_SIZE_V01 : fd->buf_size);
+ resp->end = (fd->seg_index == fd->seg_len);
+ memcpy(resp->seg_buf, fd->buf, resp->seg_buf_len);
+
+ cnss_plat_ipc_dbg("ID: %d Seg ID: %d Len: %d End: %d\n", resp->file_id,
+ resp->seg_index, resp->seg_buf_len, resp->end);
+
+ ret = qmi_send_response
+ (svc->svc_hdl, sq, txn,
+ CNSS_PLAT_IPC_QMI_FILE_UPLOAD_RESP_V01,
+ CNSS_PLAT_IPC_QMI_FILE_UPLOAD_RESP_MSG_V01_MAX_MSG_LEN,
+ cnss_plat_ipc_qmi_file_upload_resp_msg_v01_ei,
+ resp);
+
+ if (ret < 0) {
+ cnss_plat_ipc_err("QMI failed: %d\n", ret);
+ goto end;
+ }
+
+ fd->buf_size -= resp->seg_buf_len;
+ fd->buf += resp->seg_buf_len;
+ if (resp->end) {
+ fd->end = true;
+ complete(&fd->complete);
+ }
+end:
+ kfree(resp);
+}
+
+/**
+ * cnss_plat_ipc_qmi_file_download() - Download platform accessible file
+ * @file_mame: File name to get from platform data location
+ * @buf: Pointer of the buffer to store file contents
+ * @size: Provides the size of buffer. It is updated to reflect the file size
+ * at the end of file download.
+ */
+int cnss_plat_ipc_qmi_file_download(char *file_name, char *buf, u32 *size)
+{
+ struct cnss_plat_ipc_qmi_file_download_ind_msg_v01 ind;
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ int ret;
+ struct cnss_plat_ipc_file_data *fd;
+
+ if (!svc->client_connected || !file_name || !buf)
+ return -EINVAL;
+
+ fd = cnss_plat_ipc_init_file_data(file_name, buf, *size, 0);
+ if (!fd) {
+ cnss_plat_ipc_err("Unable to initialize file transfer data\n");
+ return -EINVAL;
+ }
+
+ scnprintf(ind.file_name, CNSS_PLAT_IPC_QMI_MAX_FILE_NAME_LEN_V01, "%s",
+ file_name);
+ ind.file_id = fd->id;
+
+ ret = qmi_send_indication
+ (svc->svc_hdl, &svc->client_sq,
+ CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_IND_V01,
+ CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN,
+ cnss_plat_ipc_qmi_file_download_ind_msg_v01_ei, &ind);
+
+ if (ret < 0) {
+ cnss_plat_ipc_err("QMI failed: %d\n", ret);
+ goto end;
+ }
+ ret = wait_for_completion_timeout(&fd->complete,
+ msecs_to_jiffies
+ (CNSS_PLAT_IPC_QMI_FILE_TXN_TIMEOUT));
+ if (!ret)
+ cnss_plat_ipc_err("Timeout downloading file:%s\n", fd->name);
+
+end:
+ *size = fd->file_size;
+ ret = cnss_plat_ipc_deinit_file_data(fd);
+ cnss_plat_ipc_dbg("Status: %d Size: %d\n", ret, *size);
+
+ return ret;
+}
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_file_download);
+
+/**
+ * cnss_plat_ipc_qmi_file_download_req_handler() - QMI download request handler
+ * @handle: Pointer to QMI handle
+ * @sq: QMI socket
+ * @txn: QMI transaction pointer
+ * @decoded_msg: Pointer to decoded QMI message
+ *
+ * Handles the QMI download request sequence to userspace. It uses the file
+ * descriptor ID to download QMI message buffer segment to file descriptor
+ * buffer.
+ *
+ * Return: None
+ */
+static void
+cnss_plat_ipc_qmi_file_download_req_handler(struct qmi_handle *handle,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded_msg)
+{
+ struct cnss_plat_ipc_qmi_file_download_req_msg_v01 *req_msg;
+ struct cnss_plat_ipc_qmi_file_download_resp_msg_v01 resp = {0};
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ int ret = 0;
+ struct cnss_plat_ipc_file_data *fd;
+
+ req_msg = (struct cnss_plat_ipc_qmi_file_download_req_msg_v01 *)
+ decoded_msg;
+ if (!req_msg)
+ return;
+ cnss_plat_ipc_dbg("File ID: %d Size: %d Seg Len: %d Index: %d End: %d\n",
+ req_msg->file_id, req_msg->file_size,
+ req_msg->seg_buf_len, req_msg->seg_index,
+ req_msg->end);
+
+ mutex_lock(&svc->file_idr_lock);
+ fd = idr_find(&svc->file_idr, req_msg->file_id);
+ mutex_unlock(&svc->file_idr_lock);
+ if (!fd) {
+ cnss_plat_ipc_err("Invalid File ID: %d\n", req_msg->file_id);
+ return;
+ }
+
+ if (req_msg->file_size > fd->buf_size) {
+ cnss_plat_ipc_err("File %s size %d larger than buffer size %d\n",
+ fd->name, req_msg->file_size, fd->buf_size);
+ goto file_error;
+ }
+ if (req_msg->seg_buf_len > CNSS_PLAT_IPC_QMI_MAX_DATA_SIZE_V01 ||
+ ((req_msg->seg_buf_len + fd->file_size) > fd->buf_size)) {
+ cnss_plat_ipc_err("Segment buf ID: %d buffer size %d not allowed\n",
+ req_msg->seg_index, req_msg->seg_buf_len);
+ goto file_error;
+ }
+ if (req_msg->seg_index != fd->seg_index) {
+ cnss_plat_ipc_err("File %s transfer segment failure\n",
+ fd->name);
+ goto file_error;
+ }
+
+ memcpy(fd->buf, req_msg->seg_buf, req_msg->seg_buf_len);
+ fd->seg_index++;
+ fd->buf += req_msg->seg_buf_len;
+ fd->file_size += req_msg->seg_buf_len;
+
+ resp.file_id = fd->id;
+ resp.seg_index = fd->seg_index;
+ ret = qmi_send_response
+ (svc->svc_hdl, sq, txn,
+ CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_RESP_V01,
+ CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN,
+ cnss_plat_ipc_qmi_file_download_resp_msg_v01_ei,
+ &resp);
+
+ if (ret < 0)
+ cnss_plat_ipc_err("QMI failed: %d\n", ret);
+
+ if (req_msg->end) {
+ fd->end = true;
+ complete(&fd->complete);
+ }
+
+ return;
+file_error:
+ complete(&fd->complete);
+}
+
+/**
+ * cnss_plat_ipc_qmi_init_setup_req_handler() - Init_Setup QMI message handler
+ * @handle: Pointer to QMI handle
+ * @sq: QMI socket
+ * @txn: QMI transaction pointer
+ * @decoded_msg: Pointer to decoded QMI message
+ *
+ * Handles the QMI Init setup handshake message from userspace.
+ * buffer.
+ *
+ * Return: None
+ */
+static void
+cnss_plat_ipc_qmi_init_setup_req_handler(struct qmi_handle *handle,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded_msg)
+{
+ struct cnss_plat_ipc_qmi_init_setup_req_msg_v01 *req_msg;
+ struct cnss_plat_ipc_qmi_init_setup_resp_msg_v01 resp = {0};
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ int ret = 0;
+
+ if (!svc->client_connected) {
+ cnss_plat_ipc_info("CNSS Daemon Connected. QMI Socket Node: %d Port: %d\n",
+ sq->sq_node, sq->sq_port);
+ svc->client_sq = *sq;
+ svc->client_connected = true;
+ cnss_plat_ipc_qmi_update_clients();
+ } else {
+ cnss_plat_ipc_err("CNSS Daemon already connected. Invalid new client\n");
+ return;
+ }
+
+ req_msg =
+ (struct cnss_plat_ipc_qmi_init_setup_req_msg_v01 *)decoded_msg;
+ cnss_plat_ipc_dbg("MAC: %d HW_TRC: %d CAL: %d\n",
+ req_msg->dms_mac_addr_supported,
+ req_msg->qdss_hw_trace_override,
+ req_msg->cal_file_available_bitmask);
+
+ svc->cfg.dms_mac_addr_supported = req_msg->dms_mac_addr_supported;
+ svc->cfg.qdss_hw_trace_override = req_msg->qdss_hw_trace_override;
+ svc->cfg.cal_file_available_bitmask =
+ req_msg->cal_file_available_bitmask;
+
+ ret = qmi_send_response
+ (svc->svc_hdl, sq, txn,
+ CNSS_PLAT_IPC_QMI_INIT_SETUP_RESP_V01,
+ CNSS_PLAT_IPC_QMI_INIT_SETUP_RESP_MSG_V01_MAX_MSG_LEN,
+ cnss_plat_ipc_qmi_init_setup_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ cnss_plat_ipc_err("QMI failed: %d\n", ret);
+}
+
+/**
+ * cnss_plat_ipc_qmi_disconnect_cb() - Handler for QMI node disconnect specific
+ * to node and port
+ * @handle: Pointer to QMI handle
+ * @node: QMI node that is disconnected
+ * @port: QMI port that is disconnected
+ *
+ * Return: None
+ */
+static void cnss_plat_ipc_qmi_disconnect_cb(struct qmi_handle *handle,
+ unsigned int node,
+ unsigned int port)
+{
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ struct cnss_plat_ipc_file_data *fd;
+ u32 file_id;
+
+ if (svc->svc_hdl != handle) {
+ cnss_plat_ipc_err("Invalid QMI Handle\n");
+ return;
+ }
+
+ if (svc->client_connected && svc->client_sq.sq_node == node &&
+ svc->client_sq.sq_port == port) {
+ cnss_plat_ipc_err("CNSS Daemon disconnected. QMI Socket Node:%d Port:%d\n",
+ node, port);
+ svc->client_sq.sq_node = 0;
+ svc->client_sq.sq_port = 0;
+ svc->client_sq.sq_family = 0;
+ svc->client_connected = false;
+
+ /* Daemon killed. Fail any download / upload in progress. This
+ * will also free stale fd
+ */
+ mutex_lock(&svc->file_idr_lock);
+ idr_for_each_entry(&svc->file_idr, fd, file_id)
+ complete(&fd->complete);
+ mutex_unlock(&svc->file_idr_lock);
+ cnss_plat_ipc_qmi_update_clients();
+ }
+}
+
+/**
+ * cnss_plat_ipc_qmi_bye_cb() - Handler for QMI node disconnect for all port of
+ * the given node.
+ * @handle: Pointer to QMI handle
+ * @node: QMI node that is disconnected
+ *
+ * Return: None
+ */
+static void cnss_plat_ipc_qmi_bye_cb(struct qmi_handle *handle,
+ unsigned int node)
+{
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+
+ cnss_plat_ipc_qmi_disconnect_cb(handle, node, svc->client_sq.sq_port);
+}
+
+static struct qmi_ops cnss_plat_ipc_qmi_ops = {
+ /* inform a client that all clients from a node are gone */
+ .bye = cnss_plat_ipc_qmi_bye_cb,
+ .del_client = cnss_plat_ipc_qmi_disconnect_cb,
+};
+
+static struct qmi_msg_handler cnss_plat_ipc_qmi_req_handlers[] = {
+ {
+ .type = QMI_REQUEST,
+ .msg_id = CNSS_PLAT_IPC_QMI_INIT_SETUP_REQ_V01,
+ .ei = cnss_plat_ipc_qmi_init_setup_req_msg_v01_ei,
+ .decoded_size =
+ CNSS_PLAT_IPC_QMI_INIT_SETUP_REQ_MSG_V01_MAX_MSG_LEN,
+ .fn = cnss_plat_ipc_qmi_init_setup_req_handler,
+ },
+ {
+ .type = QMI_REQUEST,
+ .msg_id = CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_REQ_V01,
+ .ei = cnss_plat_ipc_qmi_file_download_req_msg_v01_ei,
+ .decoded_size =
+ CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+ .fn = cnss_plat_ipc_qmi_file_download_req_handler,
+ },
+ {
+ .type = QMI_REQUEST,
+ .msg_id = CNSS_PLAT_IPC_QMI_FILE_UPLOAD_REQ_V01,
+ .ei = cnss_plat_ipc_qmi_file_upload_req_msg_v01_ei,
+ .decoded_size =
+ CNSS_PLAT_IPC_QMI_FILE_UPLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+ .fn = cnss_plat_ipc_qmi_file_upload_req_handler,
+ }
+};
+
+/**
+ * cnss_plat_ipc_qmi_user_config() - Get User space config for CNSS platform
+ *
+ * Return: Pointer to user space client config
+ */
+struct cnss_plat_ipc_user_config *cnss_plat_ipc_qmi_user_config(void)
+{
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+
+ if (!svc->client_connected)
+ return NULL;
+
+ return &svc->cfg;
+}
+EXPORT_SYMBOL(cnss_plat_ipc_qmi_user_config);
+
+/**
+ * cnss_plat_ipc_register() - Register for QMI IPC client status update
+ * @connect_update_cb: Function pointer for callback
+ * @cb_cbt: Callback context
+ *
+ * Return: 0 on success, negative error value otherwise
+ */
+int cnss_plat_ipc_register(cnss_plat_ipc_connection_update
+ connection_update_cb, void *cb_ctx)
+{
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+
+ if (svc->num_user >= CNSS_PLAT_IPC_MAX_CLIENTS) {
+ cnss_plat_ipc_err("Max Service users reached\n");
+ return -EINVAL;
+ }
+
+ svc->connection_update_cb[svc->num_user] = connection_update_cb;
+ svc->cb_ctx[svc->num_user] = cb_ctx;
+ svc->num_user++;
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_plat_ipc_register);
+
+/**
+ * cnss_plat_ipc_register() - Unregister QMI IPC client status callback
+ * @cb_cbt: Callback context provided during registration
+ *
+ * Return: None
+ */
+void cnss_plat_ipc_unregister(void *cb_ctx)
+{
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+ int i;
+
+ for (i = 0; i < svc->num_user; i++) {
+ if (svc->cb_ctx[i] == cb_ctx) {
+ svc->cb_ctx[i] = NULL;
+ svc->connection_update_cb[i] = NULL;
+ svc->num_user--;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(cnss_plat_ipc_unregister);
+
+/**
+ * cnss_plat_ipc_init_fn() - CNSS Platform qmi service init function
+ *
+ * Initialize a QMI client handle and register new QMI service for CNSS Platform
+ *
+ * Return: None
+ */
+static void cnss_plat_ipc_init_fn(struct work_struct *work)
+{
+ int ret = 0, retry = 0;
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+
+ svc->svc_hdl = kzalloc(sizeof(*svc->svc_hdl), GFP_KERNEL);
+ if (!svc->svc_hdl)
+ return;
+
+retry:
+ ret = qmi_handle_init(svc->svc_hdl,
+ CNSS_PLAT_IPC_QMI_MAX_MSG_SIZE_V01,
+ &cnss_plat_ipc_qmi_ops,
+ cnss_plat_ipc_qmi_req_handlers);
+ if (ret < 0) {
+ /* If QMI fails to init, retry for total
+ * QMI_INIT_RETRY_DELAY_MS * QMI_INIT_RETRY_MAX_TIMES ms.
+ */
+ if (retry++ < QMI_INIT_RETRY_MAX_TIMES) {
+ msleep(QMI_INIT_RETRY_DELAY_MS);
+ goto retry;
+ }
+ cnss_plat_ipc_err("Failed to init QMI handle after %d ms * %d, err = %d\n",
+ ret, QMI_INIT_RETRY_DELAY_MS,
+ QMI_INIT_RETRY_MAX_TIMES);
+ goto free_svc_hdl;
+ }
+
+ ret = qmi_add_server(svc->svc_hdl,
+ CNSS_PLATFORM_SERVICE_ID_V01,
+ CNSS_PLATFORM_SERVICE_VERS_V01, 0);
+ if (ret < 0) {
+ cnss_plat_ipc_err("Server add fail: %d\n", ret);
+ goto release_svc_hdl;
+ }
+
+ cnss_plat_ipc_info("CNSS Platform IPC QMI Service is started\n");
+ idr_init(&svc->file_idr);
+ mutex_init(&svc->file_idr_lock);
+ return;
+
+release_svc_hdl:
+ qmi_handle_release(svc->svc_hdl);
+free_svc_hdl:
+ kfree(svc->svc_hdl);
+}
+
+/**
+ * cnss_plat_ipc_is_valid_dt_node_found - Check if valid device tree node
+ * present
+ *
+ * Valid device tree node means a node with "qcom,wlan" property present
+ * and "status" property not disabled.
+ *
+ * Return: true if valid device tree node found, false if not found
+ */
+static bool cnss_plat_ipc_is_valid_dt_node_found(void)
+{
+ struct device_node *dn = NULL;
+
+ for_each_node_with_property(dn, "qcom,wlan") {
+ if (of_device_is_available(dn))
+ break;
+ }
+
+ if (dn)
+ return true;
+
+ return false;
+}
+
+void cnss_plat_ipc_logging_init(void)
+{
+ cnss_plat_ipc_log_context = ipc_log_context_create(NUM_LOG_PAGES, "cnss_plat", 0);
+ if (!cnss_plat_ipc_log_context)
+ cnss_plat_ipc_err("Unable to create log context\n");
+}
+
+void cnss_plat_ipc_lgging_deinit(void)
+{
+ if (cnss_plat_ipc_log_context) {
+ ipc_log_context_destroy(cnss_plat_ipc_log_context);
+ cnss_plat_ipc_log_context = NULL;
+ }
+}
+
+static DECLARE_WORK(cnss_plat_ipc_init_work, cnss_plat_ipc_init_fn);
+
+static int __init cnss_plat_ipc_qmi_svc_init(void)
+{
+ if (!cnss_plat_ipc_is_valid_dt_node_found())
+ return -ENODEV;
+
+ /* Schedule a work to do real init to avoid blocking here */
+ cnss_plat_ipc_logging_init();
+ schedule_work(&cnss_plat_ipc_init_work);
+ return 0;
+}
+
+/**
+ * cnss_plat_ipc_qmi_svc_exit() - CNSS Platform qmi service exit
+ *
+ * Release all resources during exit
+ *
+ * Return: None
+ */
+static void __exit cnss_plat_ipc_qmi_svc_exit(void)
+{
+ struct cnss_plat_ipc_qmi_svc_ctx *svc = &plat_ipc_qmi_svc;
+
+ cancel_work_sync(&cnss_plat_ipc_init_work);
+
+ if (svc->svc_hdl) {
+ qmi_handle_release(svc->svc_hdl);
+ kfree(svc->svc_hdl);
+ idr_destroy(&svc->file_idr);
+ }
+
+ cnss_plat_ipc_lgging_deinit();
+}
+
+module_init(cnss_plat_ipc_qmi_svc_init);
+module_exit(cnss_plat_ipc_qmi_svc_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CNSS Platform IPC QMI Service");
diff --git a/cnss_utils/cnss_plat_ipc_service_v01.c b/cnss_utils/cnss_plat_ipc_service_v01.c
new file mode 100644
index 0000000..c8cb323
--- /dev/null
+++ b/cnss_utils/cnss_plat_ipc_service_v01.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021, The Linux Foundation. All rights reserved. */
+
+#include <linux/soc/qcom/qmi.h>
+
+#include "cnss_plat_ipc_service_v01.h"
+
+struct qmi_elem_info cnss_plat_ipc_qmi_init_setup_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_init_setup_req_msg_v01,
+ dms_mac_addr_supported),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_init_setup_req_msg_v01,
+ qdss_hw_trace_override),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_init_setup_req_msg_v01,
+ cal_file_available_bitmask),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info cnss_plat_ipc_qmi_init_setup_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_init_setup_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_init_setup_resp_msg_v01,
+ drv_status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info cnss_plat_ipc_qmi_file_download_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = CNSS_PLAT_IPC_QMI_MAX_FILE_NAME_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_ind_msg_v01,
+ file_name),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_ind_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info cnss_plat_ipc_qmi_file_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_req_msg_v01,
+ file_size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x04,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_req_msg_v01,
+ seg_index),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x05,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_req_msg_v01,
+ seg_buf_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = CNSS_PLAT_IPC_QMI_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x05,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_req_msg_v01,
+ seg_buf),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info cnss_plat_ipc_qmi_file_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_resp_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x04,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_download_resp_msg_v01,
+ seg_index),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = CNSS_PLAT_IPC_QMI_MAX_FILE_NAME_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_ind_msg_v01,
+ file_name),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_ind_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_ind_msg_v01,
+ file_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_req_msg_v01,
+ seg_index),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x04,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x05,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
+ seg_index),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x06,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
+ seg_buf_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = CNSS_PLAT_IPC_QMI_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x06,
+ .offset = offsetof(struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01,
+ seg_buf),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
diff --git a/cnss_utils/cnss_plat_ipc_service_v01.h b/cnss_utils/cnss_plat_ipc_service_v01.h
new file mode 100644
index 0000000..6d19af1
--- /dev/null
+++ b/cnss_utils/cnss_plat_ipc_service_v01.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CNSS_PLAT_IPC_SERVICE_V01_H
+#define CNSS_PLAT_IPC_SERVICE_V01_H
+
+#define CNSS_PLATFORM_SERVICE_ID_V01 0x42E
+#define CNSS_PLATFORM_SERVICE_VERS_V01 0x01
+
+#define CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_REQ_V01 0x0003
+#define CNSS_PLAT_IPC_QMI_FILE_UPLOAD_IND_V01 0x0004
+#define CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_IND_V01 0x0002
+#define CNSS_PLAT_IPC_QMI_INIT_SETUP_REQ_V01 0x0001
+#define CNSS_PLAT_IPC_QMI_FILE_UPLOAD_REQ_V01 0x0005
+#define CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_RESP_V01 0x0003
+#define CNSS_PLAT_IPC_QMI_FILE_UPLOAD_RESP_V01 0x0005
+#define CNSS_PLAT_IPC_QMI_INIT_SETUP_RESP_V01 0x0001
+
+#define CNSS_PLAT_IPC_QMI_MAX_FILE_NAME_LEN_V01 32
+#define CNSS_PLAT_IPC_QMI_MAX_DATA_SIZE_V01 61440
+#define CNSS_PLAT_IPC_QMI_MAX_MSG_SIZE_V01 65535
+
+#define CNSS_PLAT_IPC_QMI_DRIVER_CBC_DONE_V01 ((u64)0x01ULL)
+#define CNSS_PLAT_IPC_QMI_DRIVER_WLAN_ACTIVE_V01 ((u64)0x02ULL)
+
+struct cnss_plat_ipc_qmi_init_setup_req_msg_v01 {
+ u8 dms_mac_addr_supported;
+ u8 qdss_hw_trace_override;
+ u32 cal_file_available_bitmask;
+};
+
+#define CNSS_PLAT_IPC_QMI_INIT_SETUP_REQ_MSG_V01_MAX_MSG_LEN 15
+extern struct qmi_elem_info cnss_plat_ipc_qmi_init_setup_req_msg_v01_ei[];
+
+struct cnss_plat_ipc_qmi_init_setup_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u64 drv_status;
+};
+
+#define CNSS_PLAT_IPC_QMI_INIT_SETUP_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info cnss_plat_ipc_qmi_init_setup_resp_msg_v01_ei[];
+
+struct cnss_plat_ipc_qmi_file_download_ind_msg_v01 {
+ char file_name[CNSS_PLAT_IPC_QMI_MAX_FILE_NAME_LEN_V01 + 1];
+ u32 file_id;
+};
+
+#define CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 42
+extern struct qmi_elem_info cnss_plat_ipc_qmi_file_download_ind_msg_v01_ei[];
+
+struct cnss_plat_ipc_qmi_file_download_req_msg_v01 {
+ u32 file_id;
+ u32 file_size;
+ u8 end;
+ u32 seg_index;
+ u32 seg_buf_len;
+ u8 seg_buf[CNSS_PLAT_IPC_QMI_MAX_DATA_SIZE_V01];
+};
+
+#define CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 61470
+extern struct qmi_elem_info cnss_plat_ipc_qmi_file_download_req_msg_v01_ei[];
+
+struct cnss_plat_ipc_qmi_file_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u32 file_id;
+ u32 seg_index;
+};
+
+#define CNSS_PLAT_IPC_QMI_FILE_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 21
+extern struct qmi_elem_info cnss_plat_ipc_qmi_file_download_resp_msg_v01_ei[];
+
+struct cnss_plat_ipc_qmi_file_upload_ind_msg_v01 {
+ char file_name[CNSS_PLAT_IPC_QMI_MAX_FILE_NAME_LEN_V01 + 1];
+ u32 file_id;
+ u32 file_size;
+};
+
+#define CNSS_PLAT_IPC_QMI_FILE_UPLOAD_IND_MSG_V01_MAX_MSG_LEN 49
+extern struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_ind_msg_v01_ei[];
+
+struct cnss_plat_ipc_qmi_file_upload_req_msg_v01 {
+ u32 file_id;
+ u32 seg_index;
+};
+
+#define CNSS_PLAT_IPC_QMI_FILE_UPLOAD_REQ_MSG_V01_MAX_MSG_LEN 14
+extern struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_req_msg_v01_ei[];
+
+struct cnss_plat_ipc_qmi_file_upload_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u32 file_id;
+ u8 end;
+ u32 seg_index;
+ u32 seg_buf_len;
+ u8 seg_buf[CNSS_PLAT_IPC_QMI_MAX_DATA_SIZE_V01];
+};
+
+#define CNSS_PLAT_IPC_QMI_FILE_UPLOAD_RESP_MSG_V01_MAX_MSG_LEN 61470
+extern struct qmi_elem_info cnss_plat_ipc_qmi_file_upload_resp_msg_v01_ei[];
+
+#endif
diff --git a/cnss_utils/cnss_utils.c b/cnss_utils/cnss_utils.c
new file mode 100644
index 0000000..6bb7dfc
--- /dev/null
+++ b/cnss_utils/cnss_utils.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2017, 2019, 2021 The Linux Foundation. All rights reserved. */
+
+#define pr_fmt(fmt) "cnss_utils: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <net/cnss_utils.h>
+
+#define CNSS_MAX_CH_NUM 157
+struct cnss_unsafe_channel_list {
+ u16 unsafe_ch_count;
+ u16 unsafe_ch_list[CNSS_MAX_CH_NUM];
+};
+
+struct cnss_dfs_nol_info {
+ void *dfs_nol_info;
+ u16 dfs_nol_info_len;
+};
+
+#define MAX_NO_OF_MAC_ADDR 4
+#define MAC_PREFIX_LEN 2
+struct cnss_wlan_mac_addr {
+ u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN];
+ u32 no_of_mac_addr_set;
+};
+
+enum mac_type {
+ CNSS_MAC_PROVISIONED,
+ CNSS_MAC_DERIVED,
+};
+
+static struct cnss_utils_priv {
+ struct cnss_unsafe_channel_list unsafe_channel_list;
+ struct cnss_dfs_nol_info dfs_nol_info;
+ /* generic mutex for unsafe channel */
+ struct mutex unsafe_channel_list_lock;
+ /* generic spin-lock for dfs_nol info */
+ spinlock_t dfs_nol_info_lock;
+ int driver_load_cnt;
+ struct cnss_wlan_mac_addr wlan_mac_addr;
+ struct cnss_wlan_mac_addr wlan_der_mac_addr;
+ enum cnss_utils_cc_src cc_source;
+ struct dentry *root_dentry;
+} *cnss_utils_priv;
+
+int cnss_utils_set_wlan_unsafe_channel(struct device *dev,
+ u16 *unsafe_ch_list, u16 ch_count)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ mutex_lock(&priv->unsafe_channel_list_lock);
+ if (!unsafe_ch_list || ch_count > CNSS_MAX_CH_NUM) {
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+ return -EINVAL;
+ }
+
+ priv->unsafe_channel_list.unsafe_ch_count = ch_count;
+
+ if (ch_count == 0)
+ goto end;
+
+ memcpy(priv->unsafe_channel_list.unsafe_ch_list,
+ unsafe_ch_list, ch_count * sizeof(u16));
+
+end:
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_unsafe_channel);
+
+int cnss_utils_get_wlan_unsafe_channel(struct device *dev,
+ u16 *unsafe_ch_list,
+ u16 *ch_count, u16 buf_len)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ mutex_lock(&priv->unsafe_channel_list_lock);
+ if (!unsafe_ch_list || !ch_count) {
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+ return -EINVAL;
+ }
+
+ if (buf_len <
+ (priv->unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+ return -ENOMEM;
+ }
+
+ *ch_count = priv->unsafe_channel_list.unsafe_ch_count;
+ memcpy(unsafe_ch_list, priv->unsafe_channel_list.unsafe_ch_list,
+ priv->unsafe_channel_list.unsafe_ch_count * sizeof(u16));
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_unsafe_channel);
+
+int cnss_utils_wlan_set_dfs_nol(struct device *dev,
+ const void *info, u16 info_len)
+{
+ void *temp;
+ void *old_nol_info;
+ struct cnss_dfs_nol_info *dfs_info;
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!info || !info_len)
+ return -EINVAL;
+
+ temp = kmemdup(info, info_len, GFP_ATOMIC);
+ if (!temp)
+ return -ENOMEM;
+
+ spin_lock_bh(&priv->dfs_nol_info_lock);
+ dfs_info = &priv->dfs_nol_info;
+ old_nol_info = dfs_info->dfs_nol_info;
+ dfs_info->dfs_nol_info = temp;
+ dfs_info->dfs_nol_info_len = info_len;
+ spin_unlock_bh(&priv->dfs_nol_info_lock);
+ kfree(old_nol_info);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_utils_wlan_set_dfs_nol);
+
+int cnss_utils_wlan_get_dfs_nol(struct device *dev,
+ void *info, u16 info_len)
+{
+ int len;
+ struct cnss_dfs_nol_info *dfs_info;
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!info || !info_len)
+ return -EINVAL;
+
+ spin_lock_bh(&priv->dfs_nol_info_lock);
+
+ dfs_info = &priv->dfs_nol_info;
+ if (!dfs_info->dfs_nol_info ||
+ dfs_info->dfs_nol_info_len == 0) {
+ spin_unlock_bh(&priv->dfs_nol_info_lock);
+ return -ENOENT;
+ }
+
+ len = min(info_len, dfs_info->dfs_nol_info_len);
+ memcpy(info, dfs_info->dfs_nol_info, len);
+ spin_unlock_bh(&priv->dfs_nol_info_lock);
+
+ return len;
+}
+EXPORT_SYMBOL(cnss_utils_wlan_get_dfs_nol);
+
+void cnss_utils_increment_driver_load_cnt(struct device *dev)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return;
+
+ ++(priv->driver_load_cnt);
+}
+EXPORT_SYMBOL(cnss_utils_increment_driver_load_cnt);
+
+int cnss_utils_get_driver_load_cnt(struct device *dev)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ return priv->driver_load_cnt;
+}
+EXPORT_SYMBOL(cnss_utils_get_driver_load_cnt);
+
+static int set_wlan_mac_address(const u8 *mac_list, const uint32_t len,
+ enum mac_type type)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+ u32 no_of_mac_addr;
+ struct cnss_wlan_mac_addr *addr = NULL;
+ int iter;
+ u8 *temp = NULL;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (len == 0 || (len % ETH_ALEN) != 0) {
+ pr_err("Invalid length %d\n", len);
+ return -EINVAL;
+ }
+
+ no_of_mac_addr = len / ETH_ALEN;
+ if (no_of_mac_addr > MAX_NO_OF_MAC_ADDR) {
+ pr_err("Exceed maximum supported MAC address %u %u\n",
+ MAX_NO_OF_MAC_ADDR, no_of_mac_addr);
+ return -EINVAL;
+ }
+
+ if (type == CNSS_MAC_PROVISIONED)
+ addr = &priv->wlan_mac_addr;
+ else
+ addr = &priv->wlan_der_mac_addr;
+
+ if (addr->no_of_mac_addr_set) {
+ pr_err("WLAN MAC address is already set, num %d type %d\n",
+ addr->no_of_mac_addr_set, type);
+ return 0;
+ }
+
+ addr->no_of_mac_addr_set = no_of_mac_addr;
+ temp = &addr->mac_addr[0][0];
+
+ for (iter = 0; iter < no_of_mac_addr;
+ ++iter, temp += ETH_ALEN, mac_list += ETH_ALEN) {
+ ether_addr_copy(temp, mac_list);
+ pr_debug("MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ temp[0], temp[1], temp[2],
+ temp[3], temp[4], temp[5]);
+ }
+ return 0;
+}
+
+int cnss_utils_set_wlan_mac_address(const u8 *mac_list, const uint32_t len)
+{
+ return set_wlan_mac_address(mac_list, len, CNSS_MAC_PROVISIONED);
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_mac_address);
+
+int cnss_utils_set_wlan_derived_mac_address(const u8 *mac_list,
+ const uint32_t len)
+{
+ return set_wlan_mac_address(mac_list, len, CNSS_MAC_DERIVED);
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_derived_mac_address);
+
+static u8 *get_wlan_mac_address(struct device *dev,
+ u32 *num, enum mac_type type)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+ struct cnss_wlan_mac_addr *addr = NULL;
+
+ if (!priv)
+ goto out;
+
+ if (type == CNSS_MAC_PROVISIONED)
+ addr = &priv->wlan_mac_addr;
+ else
+ addr = &priv->wlan_der_mac_addr;
+
+ if (!addr->no_of_mac_addr_set) {
+ pr_err("WLAN MAC address is not set, type %d\n", type);
+ goto out;
+ }
+ *num = addr->no_of_mac_addr_set;
+ return &addr->mac_addr[0][0];
+
+out:
+ *num = 0;
+ return NULL;
+}
+
+u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num)
+{
+ return get_wlan_mac_address(dev, num, CNSS_MAC_PROVISIONED);
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_mac_address);
+
+u8 *cnss_utils_get_wlan_derived_mac_address(struct device *dev,
+ uint32_t *num)
+{
+ return get_wlan_mac_address(dev, num, CNSS_MAC_DERIVED);
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_derived_mac_address);
+
+void cnss_utils_set_cc_source(struct device *dev,
+ enum cnss_utils_cc_src cc_source)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return;
+
+ priv->cc_source = cc_source;
+}
+EXPORT_SYMBOL(cnss_utils_set_cc_source);
+
+enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ return priv->cc_source;
+}
+EXPORT_SYMBOL(cnss_utils_get_cc_source);
+
+static ssize_t cnss_utils_mac_write(struct file *fp,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct cnss_utils_priv *priv =
+ ((struct seq_file *)fp->private_data)->private;
+ char buf[128];
+ char *input, *mac_type, *mac_address;
+ u8 *dest_mac;
+ u8 val;
+ const char *delim = "\n";
+ size_t len = 0;
+ char temp[3] = "";
+
+ len = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+ buf[len] = '\0';
+
+ input = buf;
+
+ mac_type = strsep(&input, delim);
+ if (!mac_type)
+ return -EINVAL;
+ if (!input)
+ return -EINVAL;
+
+ mac_address = strsep(&input, delim);
+ if (!mac_address)
+ return -EINVAL;
+ if (strcmp("0x", mac_address)) {
+ pr_err("Invalid MAC prefix\n");
+ return -EINVAL;
+ }
+
+ len = strlen(mac_address);
+ mac_address += MAC_PREFIX_LEN;
+ len -= MAC_PREFIX_LEN;
+ if (len < ETH_ALEN * 2 || len > ETH_ALEN * 2 * MAX_NO_OF_MAC_ADDR ||
+ len % (ETH_ALEN * 2) != 0) {
+ pr_err("Invalid MAC address length %zu\n", len);
+ return -EINVAL;
+ }
+
+ if (!strcmp("provisioned", mac_type)) {
+ dest_mac = &priv->wlan_mac_addr.mac_addr[0][0];
+ priv->wlan_mac_addr.no_of_mac_addr_set = len / (ETH_ALEN * 2);
+ } else if (!strcmp("derived", mac_type)) {
+ dest_mac = &priv->wlan_der_mac_addr.mac_addr[0][0];
+ priv->wlan_der_mac_addr.no_of_mac_addr_set =
+ len / (ETH_ALEN * 2);
+ } else {
+ pr_err("Invalid MAC address type %s\n", mac_type);
+ return -EINVAL;
+ }
+
+ while (len--) {
+ temp[0] = *mac_address++;
+ temp[1] = *mac_address++;
+ if (kstrtou8(temp, 16, &val))
+ return -EINVAL;
+ *dest_mac++ = val;
+ }
+ return count;
+}
+
+static int cnss_utils_mac_show(struct seq_file *s, void *data)
+{
+ u8 mac[6];
+ int i;
+ struct cnss_utils_priv *priv = s->private;
+ struct cnss_wlan_mac_addr *addr = NULL;
+
+ addr = &priv->wlan_mac_addr;
+ if (addr->no_of_mac_addr_set) {
+ seq_puts(s, "\nProvisioned MAC addresseses\n");
+ for (i = 0; i < addr->no_of_mac_addr_set; i++) {
+ ether_addr_copy(mac, addr->mac_addr[i]);
+ seq_printf(s, "MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2],
+ mac[3], mac[4], mac[5]);
+ }
+ }
+
+ addr = &priv->wlan_der_mac_addr;
+ if (addr->no_of_mac_addr_set) {
+ seq_puts(s, "\nDerived MAC addresseses\n");
+ for (i = 0; i < addr->no_of_mac_addr_set; i++) {
+ ether_addr_copy(mac, addr->mac_addr[i]);
+ seq_printf(s, "MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2],
+ mac[3], mac[4], mac[5]);
+ }
+ }
+
+ return 0;
+}
+
+static int cnss_utils_mac_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_utils_mac_show, inode->i_private);
+}
+
+static const struct file_operations cnss_utils_mac_fops = {
+ .read = seq_read,
+ .write = cnss_utils_mac_write,
+ .release = single_release,
+ .open = cnss_utils_mac_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static int cnss_utils_debugfs_create(struct cnss_utils_priv *priv)
+{
+ int ret = 0;
+ struct dentry *root_dentry;
+
+ root_dentry = debugfs_create_dir("cnss_utils", NULL);
+
+ if (IS_ERR(root_dentry)) {
+ ret = PTR_ERR(root_dentry);
+ pr_err("Unable to create debugfs %d\n", ret);
+ goto out;
+ }
+ priv->root_dentry = root_dentry;
+ debugfs_create_file("mac_address", 0600, root_dentry, priv,
+ &cnss_utils_mac_fops);
+out:
+ return ret;
+}
+
+/**
+ * cnss_utils_is_valid_dt_node_found - Check if valid device tree node present
+ *
+ * Valid device tree node means a node with "qcom,wlan" property present and
+ * "status" property not disabled.
+ *
+ * Return: true if valid device tree node found, false if not found
+ */
+static bool cnss_utils_is_valid_dt_node_found(void)
+{
+ struct device_node *dn = NULL;
+
+ for_each_node_with_property(dn, "qcom,wlan") {
+ if (of_device_is_available(dn))
+ break;
+ }
+
+ if (dn)
+ return true;
+
+ return false;
+}
+
+static int __init cnss_utils_init(void)
+{
+ struct cnss_utils_priv *priv = NULL;
+
+ if (!cnss_utils_is_valid_dt_node_found())
+ return -ENODEV;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->cc_source = CNSS_UTILS_SOURCE_CORE;
+
+ mutex_init(&priv->unsafe_channel_list_lock);
+ spin_lock_init(&priv->dfs_nol_info_lock);
+ cnss_utils_debugfs_create(priv);
+ cnss_utils_priv = priv;
+
+ return 0;
+}
+
+static void __exit cnss_utils_exit(void)
+{
+ kfree(cnss_utils_priv);
+ cnss_utils_priv = NULL;
+}
+
+module_init(cnss_utils_init);
+module_exit(cnss_utils_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CNSS Utilities Driver");
diff --git a/cnss_utils/device_management_service_v01.c b/cnss_utils/device_management_service_v01.c
new file mode 100644
index 0000000..729615f
--- /dev/null
+++ b/cnss_utils/device_management_service_v01.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */
+
+#include <linux/soc/qcom/qmi.h>
+
+#include "device_management_service_v01.h"
+
+struct qmi_elem_info dms_get_mac_address_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum dms_device_mac_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ dms_get_mac_address_req_msg_v01,
+ device),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(dms_get_mac_address_req_msg_v01_ei);
+
+struct qmi_elem_info dms_get_mac_address_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ dms_get_mac_address_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ dms_get_mac_address_resp_msg_v01,
+ mac_address_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ dms_get_mac_address_resp_msg_v01,
+ mac_address_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_DMS_MAC_ADDR_MAX_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ dms_get_mac_address_resp_msg_v01,
+ mac_address),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(dms_get_mac_address_resp_msg_v01_ei);
diff --git a/cnss_utils/device_management_service_v01.h b/cnss_utils/device_management_service_v01.h
new file mode 100644
index 0000000..8dbdd2e
--- /dev/null
+++ b/cnss_utils/device_management_service_v01.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef DEVICE_MANAGEMENT_SERVICE_V01_H
+#define DEVICE_MANAGEMENT_SERVICE_V01_H
+
+#define DMS_SERVICE_ID_V01 0x02
+#define DMS_SERVICE_VERS_V01 0x01
+
+#define QMI_DMS_GET_MAC_ADDRESS_RESP_V01 0x005C
+#define QMI_DMS_GET_MAC_ADDRESS_REQ_V01 0x005C
+#define QMI_DMS_MAC_ADDR_MAX_V01 8
+
+enum dms_device_mac_enum_v01 {
+ DMS_DEVICE_MAC_ENUM_MIN_VAL_V01 = INT_MIN,
+ DMS_DEVICE_MAC_WLAN_V01 = 0,
+ DMS_DEVICE_MAC_BT_V01 = 1,
+ DMS_DEVICE_MAC_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct dms_get_mac_address_req_msg_v01 {
+ enum dms_device_mac_enum_v01 device;
+};
+
+#define DMS_GET_MAC_ADDRESS_REQ_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info dms_get_mac_address_req_msg_v01_ei[];
+
+struct dms_get_mac_address_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 mac_address_valid;
+ u32 mac_address_len;
+ u8 mac_address[QMI_DMS_MAC_ADDR_MAX_V01];
+};
+
+#define DMS_GET_MAC_ADDRESS_RESP_MSG_V01_MAX_MSG_LEN 19
+extern struct qmi_elem_info dms_get_mac_address_resp_msg_v01_ei[];
+
+#endif
diff --git a/cnss_utils/wlan_firmware_service_v01.c b/cnss_utils/wlan_firmware_service_v01.c
new file mode 100644
index 0000000..93a6d59
--- /dev/null
+++ b/cnss_utils/wlan_firmware_service_v01.c
@@ -0,0 +1,5495 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */
+
+#include "wlan_firmware_service_v01.h"
+#include <linux/module.h>
+#include <linux/of.h>
+
+static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_shadow_reg_cfg_s_v01,
+ id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_shadow_reg_v2_cfg_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_rri_over_ddr_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_rri_over_ddr_cfg_s_v01,
+ base_addr_low),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_rri_over_ddr_cfg_s_v01,
+ base_addr_high),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_msi_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_msi_cfg_s_v01,
+ ce_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_msi_cfg_s_v01,
+ msi_vector),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_memory_region_info_s_v01,
+ region_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_memory_region_info_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_memory_region_info_s_v01,
+ secure_flag),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_mem_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_mem_cfg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_mem_cfg_s_v01,
+ secure_flag),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_mem_seg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_mem_seg_s_v01,
+ type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_mem_seg_s_v01,
+ mem_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_CFG_V01,
+ .elem_size = sizeof(struct wlfw_mem_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_mem_seg_s_v01,
+ mem_cfg),
+ .ei_array = wlfw_mem_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_mem_seg_resp_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_mem_seg_resp_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_mem_seg_resp_s_v01,
+ type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_mem_seg_resp_s_v01,
+ restore),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_soc_info_s_v01,
+ soc_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_host_ddr_range_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_host_ddr_range_s_v01,
+ start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_host_ddr_range_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_m3_segment_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_m3_segment_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_m3_segment_info_s_v01,
+ type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_m3_segment_info_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_m3_segment_info_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_m3_segment_info_s_v01,
+ name),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_dev_mem_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_dev_mem_info_s_v01,
+ start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_dev_mem_info_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_host_mlo_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_host_mlo_chip_info_s_v01,
+ num_local_links),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01,
+ .elem_size = sizeof(u8),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_host_mlo_chip_info_s_v01,
+ hw_link_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01,
+ .elem_size = sizeof(u8),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_host_mlo_chip_info_s_v01,
+ valid_mlo_link_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ fw_init_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ fw_init_done_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ xo_cal_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ xo_cal_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ cal_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ cal_done_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ qdss_trace_req_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ qdss_trace_req_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ qdss_trace_save_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ qdss_trace_save_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ qdss_trace_free_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ qdss_trace_free_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1F,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ respond_get_info_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1F,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ respond_get_info_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x20,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ m3_dump_upload_req_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x20,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ m3_dump_upload_req_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x21,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ wfc_call_twt_config_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x21,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ wfc_call_twt_config_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x22,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ qdss_mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x22,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ qdss_mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x23,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ m3_dump_upload_segments_req_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x23,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ m3_dump_upload_segments_req_enable),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_ind_register_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_ind_register_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_fw_ready_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_msa_ready_ind_msg_v01,
+ hang_data_addr_offset_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_msa_ready_ind_msg_v01,
+ hang_data_addr_offset),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_msa_ready_ind_msg_v01,
+ hang_data_length_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_msa_ready_ind_msg_v01,
+ hang_data_length),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_msa_ready_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_pin_connect_result_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_driver_mode_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_wlan_mode_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_wlan_mode_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = wlfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = wlfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = wlfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01,
+ .elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2),
+ .ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ rri_over_ddr_cfg_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rri_over_ddr_cfg_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ rri_over_ddr_cfg),
+ .ei_array = wlfw_rri_over_ddr_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ msi_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ msi_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(struct wlfw_msi_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ msi_cfg),
+ .ei_array = wlfw_msi_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_wlan_cfg_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_wlan_cfg_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_cap_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_chip_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = wlfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_board_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = wlfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_soc_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = wlfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_fw_version_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = wlfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ num_macs_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ num_macs),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ voltage_mv_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ voltage_mv),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ time_freq_hz_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ time_freq_hz),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ otp_version_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ otp_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ eeprom_caldata_read_timeout_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ eeprom_caldata_read_timeout),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ fw_caps_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ fw_caps),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ rd_card_chain_cap_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_rd_card_chain_cap_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ rd_card_chain_cap),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ dev_mem_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_DEV_MEM_NUM_V01,
+ .elem_size = sizeof(struct wlfw_dev_mem_info_s_v01),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ dev_mem_info),
+ .ei_array = wlfw_dev_mem_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_cap_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ bdf_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_bdf_download_req_msg_v01,
+ bdf_type),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_bdf_download_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_bdf_download_resp_msg_v01,
+ host_bdf_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_bdf_download_resp_msg_v01,
+ host_bdf_data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_bdf_download_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_cal_report_req_msg_v01,
+ meta_data_len),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = QMI_WLFW_MAX_NUM_CAL_V01,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_cal_report_req_msg_v01,
+ meta_data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_cal_report_req_msg_v01,
+ xo_cal_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_cal_report_req_msg_v01,
+ xo_cal_data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_cal_report_req_msg_v01,
+ cal_remove_supported_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_cal_report_req_msg_v01,
+ cal_remove_supported),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_cal_report_req_msg_v01,
+ cal_file_download_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_cal_report_req_msg_v01,
+ cal_file_download_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_cal_report_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_cal_report_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_cal_report_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_download_ind_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_download_ind_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_download_ind_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_download_ind_msg_v01,
+ cal_data_location_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_download_ind_msg_v01,
+ cal_data_location),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_initiate_cal_download_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ cal_data_location_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_cal_download_req_msg_v01,
+ cal_data_location),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_cal_download_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_cal_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_cal_download_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_update_ind_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_update_ind_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_update_ind_msg_v01,
+ cal_data_location_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_update_ind_msg_v01,
+ cal_data_location),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_initiate_cal_update_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_cal_update_req_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_cal_update_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_cal_update_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ cal_data_location_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_cal_update_resp_msg_v01,
+ cal_data_location),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_cal_update_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_msa_info_req_msg_v01,
+ msa_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_msa_info_req_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_msa_info_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_msa_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct
+ wlfw_msa_info_resp_msg_v01,
+ mem_region_info_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01,
+ .elem_size = sizeof(struct wlfw_memory_region_info_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct
+ wlfw_msa_info_resp_msg_v01,
+ mem_region_info),
+ .ei_array = wlfw_memory_region_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_msa_info_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_msa_ready_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_msa_ready_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_msa_ready_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ini_req_msg_v01,
+ enablefwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ini_req_msg_v01,
+ enablefwlog),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_ini_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_ini_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_ini_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_athdiag_read_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_resp_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_athdiag_read_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_athdiag_write_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_athdiag_write_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_vbatt_req_msg_v01,
+ voltage_uv),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_vbatt_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_vbatt_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_vbatt_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_mac_addr_req_msg_v01,
+ mac_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_mac_addr_req_msg_v01,
+ mac_addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_mac_addr_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_mac_addr_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_mac_addr_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ num_clients_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ num_clients),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ wake_msi_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ wake_msi),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ gpios_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ gpios_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
+ .elem_size = sizeof(u32),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ gpios),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ nm_modem_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ nm_modem),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ bdf_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ bdf_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ bdf_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ bdf_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ m3_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ m3_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ m3_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ m3_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ cal_filesys_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ cal_filesys_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ cal_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ cal_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ cal_done_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ cal_done),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mem_bucket_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mem_bucket),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mem_cfg_mode_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mem_cfg_mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ cal_duration_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ cal_duration),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ platform_name_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_PLATFORM_NAME_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ platform_name),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1F,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ ddr_range_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_HOST_DDR_RANGE_SIZE_V01,
+ .elem_size = sizeof(struct wlfw_host_ddr_range_s_v01),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x1F,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ ddr_range),
+ .ei_array = wlfw_host_ddr_range_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x20,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ host_build_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_host_build_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x20,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ host_build_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x21,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mlo_capable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x21,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mlo_capable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x22,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mlo_chip_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x22,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mlo_chip_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x23,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mlo_group_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x23,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mlo_group_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x24,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ max_mlo_peer_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x24,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ max_mlo_peer),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x25,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mlo_num_chips_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x25,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mlo_num_chips),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x26,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mlo_chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MLO_CHIPS_V01,
+ .elem_size = sizeof(struct wlfw_host_mlo_chip_info_s_v01),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x26,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ mlo_chip_info),
+ .ei_array = wlfw_host_mlo_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x27,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ feature_list_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x27,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ feature_list),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x28,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ num_wlan_clients_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x28,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ num_wlan_clients),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x29,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ num_wlan_vaps_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x29,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ num_wlan_vaps),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x2A,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ wake_msi_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x2A,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ wake_msi_addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_host_cap_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_host_cap_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_host_cap_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_request_mem_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_request_mem_ind_msg_v01,
+ mem_seg),
+ .ei_array = wlfw_mem_seg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_request_mem_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_respond_mem_req_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_respond_mem_req_msg_v01,
+ mem_seg),
+ .ei_array = wlfw_mem_seg_resp_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_respond_mem_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_respond_mem_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_fw_mem_ready_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_fw_init_done_ind_msg_v01,
+ hang_data_addr_offset_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_fw_init_done_ind_msg_v01,
+ hang_data_addr_offset),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_fw_init_done_ind_msg_v01,
+ hang_data_length_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_fw_init_done_ind_msg_v01,
+ hang_data_length),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_fw_init_done_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_rejuvenate_ind_msg_v01,
+ line_number_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_rejuvenate_ind_msg_v01,
+ line_number),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_rejuvenate_ind_msg_v01,
+ function_name_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_rejuvenate_ind_msg_v01,
+ function_name),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_rejuvenate_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_rejuvenate_ack_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_rejuvenate_ack_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_rejuvenate_ack_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_dynamic_feature_mask_req_msg_v01,
+ mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_dynamic_feature_mask_req_msg_v01,
+ mask),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_dynamic_feature_mask_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_dynamic_feature_mask_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_dynamic_feature_mask_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_m3_info_req_msg_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_m3_info_req_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_m3_info_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_m3_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_m3_info_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_xo_cal_ind_msg_v01,
+ xo_cal_data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_xo_cal_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_cal_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_cal_done_ind_msg_v01,
+ cal_file_upload_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_cal_done_ind_msg_v01,
+ cal_file_upload_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_cal_done_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_qdss_trace_req_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_req_mem_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_req_mem_ind_msg_v01,
+ mem_seg),
+ .ei_array = wlfw_mem_seg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_qdss_trace_req_mem_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_qdss_trace_mem_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_mem_info_req_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_mem_info_req_msg_v01,
+ mem_seg),
+ .ei_array = wlfw_mem_seg_resp_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_qdss_trace_mem_info_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_qdss_trace_mem_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_mem_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_qdss_trace_mem_info_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_qdss_trace_save_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_save_ind_msg_v01,
+ source),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_save_ind_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_save_ind_msg_v01,
+ mem_seg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_save_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_save_ind_msg_v01,
+ mem_seg),
+ .ei_array = wlfw_mem_seg_resp_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_save_ind_msg_v01,
+ file_name_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_save_ind_msg_v01,
+ file_name),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_qdss_trace_save_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_qdss_trace_data_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_data_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_qdss_trace_data_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_qdss_trace_data_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_data_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_data_resp_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_data_resp_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_data_resp_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_data_resp_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_data_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_data_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_data_resp_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_data_resp_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_data_resp_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_qdss_trace_data_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_qdss_trace_config_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_config_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_config_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_config_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_config_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_config_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_config_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_config_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_config_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_config_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_qdss_trace_config_download_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_qdss_trace_config_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_config_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_qdss_trace_config_download_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_qdss_trace_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_mode_req_msg_v01,
+ mode_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_qdss_trace_mode_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_mode_req_msg_v01,
+ option_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_mode_req_msg_v01,
+ option),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_mode_req_msg_v01,
+ hw_trc_disable_override_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_qmi_param_value_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_mode_req_msg_v01,
+ hw_trc_disable_override),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_qdss_trace_mode_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_qdss_trace_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_mode_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_qdss_trace_mode_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_qdss_trace_free_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_free_ind_msg_v01,
+ mem_seg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_free_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_qdss_trace_free_ind_msg_v01,
+ mem_seg),
+ .ei_array = wlfw_mem_seg_resp_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_qdss_trace_free_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_shutdown_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_shutdown_req_msg_v01,
+ shutdown_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_shutdown_req_msg_v01,
+ shutdown),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_shutdown_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_shutdown_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_shutdown_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_shutdown_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_antenna_switch_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_antenna_switch_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_antenna_switch_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_antenna_switch_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_antenna_switch_resp_msg_v01,
+ antenna_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_antenna_switch_resp_msg_v01,
+ antenna),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_antenna_switch_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_antenna_grant_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_antenna_grant_req_msg_v01,
+ grant_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_antenna_grant_req_msg_v01,
+ grant),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_antenna_grant_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_antenna_grant_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_antenna_grant_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_antenna_grant_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_wfc_call_status_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ wfc_call_status_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_WFC_CALL_STATUS_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ wfc_call_status),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ wfc_call_active_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ wfc_call_active),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ all_wfc_calls_held_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ all_wfc_calls_held),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ is_wfc_emergency_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ is_wfc_emergency),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ twt_ims_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ twt_ims_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ twt_ims_int_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ twt_ims_int),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ media_quality_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_wfc_media_quality_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_req_msg_v01,
+ media_quality),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_wfc_call_status_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_wfc_call_status_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_wfc_call_status_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_wfc_call_status_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_get_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_get_info_req_msg_v01,
+ type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_get_info_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_get_info_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_get_info_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_get_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_get_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_get_info_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_respond_get_info_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ is_last_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ is_last),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ seq_no_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ seq_no),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_respond_get_info_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_device_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_device_info_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_device_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_device_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_device_info_resp_msg_v01,
+ bar_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_device_info_resp_msg_v01,
+ bar_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_device_info_resp_msg_v01,
+ bar_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_device_info_resp_msg_v01,
+ bar_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_device_info_resp_msg_v01,
+ mhi_state_info_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_device_info_resp_msg_v01,
+ mhi_state_info_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_device_info_resp_msg_v01,
+ mhi_state_info_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_device_info_resp_msg_v01,
+ mhi_state_info_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_device_info_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_m3_dump_upload_req_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_m3_dump_upload_req_ind_msg_v01,
+ pdev_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_m3_dump_upload_req_ind_msg_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct
+ wlfw_m3_dump_upload_req_ind_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_m3_dump_upload_req_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_m3_dump_upload_done_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_m3_dump_upload_done_req_msg_v01,
+ pdev_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_m3_dump_upload_done_req_msg_v01,
+ status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_m3_dump_upload_done_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_m3_dump_upload_done_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_m3_dump_upload_done_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_m3_dump_upload_done_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_soc_wake_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_soc_wake_req_msg_v01,
+ wake_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_soc_wake_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_soc_wake_req_msg_v01,
+ wake),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_soc_wake_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_soc_wake_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_soc_wake_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_soc_wake_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_power_save_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_power_save_req_msg_v01,
+ power_save_mode_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_power_save_mode_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_power_save_req_msg_v01,
+ power_save_mode),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_power_save_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_power_save_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_power_save_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_power_save_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_wfc_call_twt_config_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_wfc_call_twt_config_ind_msg_v01,
+ twt_sta_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_wfc_call_twt_config_ind_msg_v01,
+ twt_sta_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_wfc_call_twt_config_ind_msg_v01,
+ twt_sta_int_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_wfc_call_twt_config_ind_msg_v01,
+ twt_sta_int),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_wfc_call_twt_config_ind_msg_v01,
+ twt_sta_upo_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_wfc_call_twt_config_ind_msg_v01,
+ twt_sta_upo),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_wfc_call_twt_config_ind_msg_v01,
+ twt_sta_sp_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_wfc_call_twt_config_ind_msg_v01,
+ twt_sta_sp),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_wfc_call_twt_config_ind_msg_v01,
+ twt_sta_dl_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_wfc_call_twt_config_ind_msg_v01,
+ twt_sta_dl),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_wfc_call_twt_config_ind_msg_v01,
+ twt_sta_config_changed_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct
+ wlfw_wfc_call_twt_config_ind_msg_v01,
+ twt_sta_config_changed),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_wfc_call_twt_config_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_qdss_mem_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_qdss_mem_ready_ind_msg_v01_ei);
+
+struct qmi_elem_info wlfw_pcie_gen_switch_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pcie_gen_speed_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_pcie_gen_switch_req_msg_v01,
+ pcie_speed),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_pcie_gen_switch_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_pcie_gen_switch_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_pcie_gen_switch_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_pcie_gen_switch_resp_msg_v01_ei);
+
+struct qmi_elem_info wlfw_m3_dump_upload_segments_req_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_m3_dump_upload_segments_req_ind_msg_v01,
+ pdev_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_m3_dump_upload_segments_req_ind_msg_v01,
+ no_of_valid_segments),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_M3_SEGMENTS_SIZE_V01,
+ .elem_size = sizeof(struct wlfw_m3_segment_info_s_v01),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct
+ wlfw_m3_dump_upload_segments_req_ind_msg_v01,
+ m3_segment),
+ .ei_array = wlfw_m3_segment_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_m3_dump_upload_segments_req_ind_msg_v01_ei);
+
+/**
+ * wlfw_is_valid_dt_node_found - Check if valid device tree node present
+ *
+ * Valid device tree node means a node with "qcom,wlan" property present and
+ * "status" property not disabled.
+ *
+ * Return: true if valid device tree node found, false if not found
+ */
+static bool wlfw_is_valid_dt_node_found(void)
+{
+ struct device_node *dn = NULL;
+
+ for_each_node_with_property(dn, "qcom,wlan") {
+ if (of_device_is_available(dn))
+ break;
+ }
+
+ if (dn)
+ return true;
+
+ return false;
+}
+
+static int __init wlfw_init(void)
+{
+ if (!wlfw_is_valid_dt_node_found())
+ return -ENODEV;
+
+ return 0;
+}
+
+module_init(wlfw_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("WLAN FW QMI service");
diff --git a/cnss_utils/wlan_firmware_service_v01.h b/cnss_utils/wlan_firmware_service_v01.h
new file mode 100644
index 0000000..458f472
--- /dev/null
+++ b/cnss_utils/wlan_firmware_service_v01.h
@@ -0,0 +1,1271 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */
+
+#ifndef WLAN_FIRMWARE_SERVICE_V01_H
+#define WLAN_FIRMWARE_SERVICE_V01_H
+
+#include <linux/soc/qcom/qmi.h>
+
+#define WLFW_SERVICE_ID_V01 0x45
+#define WLFW_SERVICE_VERS_V01 0x01
+
+#define QMI_WLFW_POWER_SAVE_RESP_V01 0x0050
+#define QMI_WLFW_CAP_REQ_V01 0x0024
+#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
+#define QMI_WLFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
+#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
+#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
+#define QMI_WLFW_QDSS_TRACE_MODE_RESP_V01 0x0045
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_QDSS_TRACE_MEM_INFO_RESP_V01 0x0040
+#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_QDSS_TRACE_DATA_RESP_V01 0x0042
+#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLFW_VBATT_RESP_V01 0x0032
+#define QMI_WLFW_QDSS_TRACE_MODE_REQ_V01 0x0045
+#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027
+#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
+#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
+#define QMI_WLFW_M3_INFO_REQ_V01 0x003C
+#define QMI_WLFW_PCIE_GEN_SWITCH_REQ_V01 0x0053
+#define QMI_WLFW_ANTENNA_GRANT_RESP_V01 0x0048
+#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
+#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MSA_READY_IND_V01 0x002B
+#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
+#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
+#define QMI_WLFW_SOC_WAKE_REQ_V01 0x004F
+#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
+#define QMI_WLFW_M3_DUMP_UPLOAD_DONE_RESP_V01 0x004E
+#define QMI_WLFW_QDSS_TRACE_SAVE_IND_V01 0x0041
+#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A
+#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D
+#define QMI_WLFW_SHUTDOWN_REQ_V01 0x0043
+#define QMI_WLFW_VBATT_REQ_V01 0x0032
+#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
+#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLFW_ANTENNA_GRANT_REQ_V01 0x0048
+#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_RESPOND_GET_INFO_IND_V01 0x004B
+#define QMI_WLFW_QDSS_TRACE_DATA_REQ_V01 0x0042
+#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
+#define QMI_WLFW_INI_RESP_V01 0x002F
+#define QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01 0x0040
+#define QMI_WLFW_ANTENNA_SWITCH_REQ_V01 0x0047
+#define QMI_WLFW_QDSS_TRACE_REQ_MEM_IND_V01 0x003F
+#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028
+#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031
+#define QMI_WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_RESP_V01 0x0044
+#define QMI_WLFW_SOC_WAKE_RESP_V01 0x004F
+#define QMI_WLFW_GET_INFO_RESP_V01 0x004A
+#define QMI_WLFW_PCIE_GEN_SWITCH_RESP_V01 0x0053
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define QMI_WLFW_M3_DUMP_UPLOAD_SEGMENTS_REQ_IND_V01 0x0054
+#define QMI_WLFW_MSA_READY_REQ_V01 0x002E
+#define QMI_WLFW_M3_DUMP_UPLOAD_DONE_REQ_V01 0x004E
+#define QMI_WLFW_CAP_RESP_V01 0x0024
+#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A
+#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030
+#define QMI_WLFW_ANTENNA_SWITCH_RESP_V01 0x0047
+#define QMI_WLFW_DEVICE_INFO_REQ_V01 0x004C
+#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
+#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_V01 0x0044
+#define QMI_WLFW_GET_INFO_REQ_V01 0x004A
+#define QMI_WLFW_CAL_DONE_IND_V01 0x003E
+#define QMI_WLFW_M3_DUMP_UPLOAD_REQ_IND_V01 0x004D
+#define QMI_WLFW_WFC_CALL_STATUS_RESP_V01 0x0049
+#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
+#define QMI_WLFW_POWER_SAVE_REQ_V01 0x0050
+#define QMI_WLFW_XO_CAL_IND_V01 0x003D
+#define QMI_WLFW_SHUTDOWN_RESP_V01 0x0043
+#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030
+#define QMI_WLFW_WFC_CALL_TWT_CONFIG_IND_V01 0x0051
+#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLFW_WFC_CALL_STATUS_REQ_V01 0x0049
+#define QMI_WLFW_DEVICE_INFO_RESP_V01 0x004C
+#define QMI_WLFW_MSA_READY_RESP_V01 0x002E
+#define QMI_WLFW_QDSS_TRACE_FREE_IND_V01 0x0046
+#define QMI_WLFW_QDSS_MEM_READY_IND_V01 0x0052
+
+#define QMI_WLFW_MAX_NUM_CAL_V01 5
+#define QMI_WLFW_MAX_PLATFORM_NAME_LEN_V01 64
+#define QMI_WLFW_MAX_HOST_DDR_RANGE_SIZE_V01 3
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
+#define QMI_WLFW_MAX_DEV_MEM_NUM_V01 4
+#define QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01 2
+#define QMI_WLFW_MAX_NUM_SVC_V01 24
+#define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2
+#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
+#define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2
+#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 52
+#define QMI_WLFW_MAX_WFC_CALL_STATUS_DATA_SIZE_V01 256
+#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
+#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_CE_V01 12
+#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
+#define QMI_WLFW_MAX_M3_SEGMENTS_SIZE_V01 10
+#define QMI_WLFW_MAX_STR_LEN_V01 16
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01 36
+#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
+#define QMI_WLFW_MAX_NUM_MLO_CHIPS_V01 3
+
+enum wlfw_driver_mode_enum_v01 {
+ WLFW_DRIVER_MODE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_MISSION_V01 = 0,
+ QMI_WLFW_FTM_V01 = 1,
+ QMI_WLFW_EPPING_V01 = 2,
+ QMI_WLFW_WALTEST_V01 = 3,
+ QMI_WLFW_OFF_V01 = 4,
+ QMI_WLFW_CCPM_V01 = 5,
+ QMI_WLFW_QVIT_V01 = 6,
+ QMI_WLFW_CALIBRATION_V01 = 7,
+ QMI_WLFW_FTM_CALIBRATION_V01 = 10,
+ WLFW_DRIVER_MODE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_cal_temp_id_enum_v01 {
+ WLFW_CAL_TEMP_ID_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4,
+ WLFW_CAL_TEMP_ID_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_pipedir_enum_v01 {
+ WLFW_PIPEDIR_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+ WLFW_PIPEDIR_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_mem_type_enum_v01 {
+ WLFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_MEM_TYPE_MSA_V01 = 0,
+ QMI_WLFW_MEM_TYPE_DDR_V01 = 1,
+ QMI_WLFW_MEM_BDF_V01 = 2,
+ QMI_WLFW_MEM_M3_V01 = 3,
+ QMI_WLFW_MEM_CAL_V01 = 4,
+ QMI_WLFW_MEM_DPD_V01 = 5,
+ QMI_WLFW_MEM_QDSS_V01 = 6,
+ QMI_WLFW_MEM_HANG_DATA_V01 = 7,
+ QMI_WLFW_MLO_GLOBAL_MEM_V01 = 8,
+ QMI_WLFW_PAGEABLE_MEM_V01 = 9,
+ WLFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_qdss_trace_mode_enum_v01 {
+ WLFW_QDSS_TRACE_MODE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_QDSS_TRACE_OFF_V01 = 0,
+ QMI_WLFW_QDSS_TRACE_ON_V01 = 1,
+ WLFW_QDSS_TRACE_MODE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_wfc_media_quality_v01 {
+ WLFW_WFC_MEDIA_QUALITY_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_WFC_MEDIA_QUAL_NOT_AVAILABLE_V01 = 0,
+ QMI_WLFW_WFC_MEDIA_QUAL_BAD_V01 = 1,
+ QMI_WLFW_WFC_MEDIA_QUAL_GOOD_V01 = 2,
+ QMI_WLFW_WFC_MEDIA_QUAL_EXCELLENT_V01 = 3,
+ WLFW_WFC_MEDIA_QUALITY_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_soc_wake_enum_v01 {
+ WLFW_SOC_WAKE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_WAKE_REQUEST_V01 = 0,
+ QMI_WLFW_WAKE_RELEASE_V01 = 1,
+ WLFW_SOC_WAKE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_host_build_type_v01 {
+ WLFW_HOST_BUILD_TYPE_MIN_VAL_V01 = INT_MIN,
+ QMI_HOST_BUILD_TYPE_UNSPECIFIED_V01 = 0,
+ QMI_HOST_BUILD_TYPE_PRIMARY_V01 = 1,
+ QMI_HOST_BUILD_TYPE_SECONDARY_V01 = 2,
+ WLFW_HOST_BUILD_TYPE_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_qmi_param_value_v01 {
+ WLFW_QMI_PARAM_VALUE_MIN_VAL_V01 = INT_MIN,
+ QMI_PARAM_INVALID_V01 = 0,
+ QMI_PARAM_ENABLE_V01 = 1,
+ QMI_PARAM_DISABLE_V01 = 2,
+ WLFW_QMI_PARAM_VALUE_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_rd_card_chain_cap_v01 {
+ WLFW_RD_CARD_CHAIN_CAP_MIN_VAL_V01 = INT_MIN,
+ WLFW_RD_CARD_CHAIN_CAP_UNSPECIFIED_V01 = 0,
+ WLFW_RD_CARD_CHAIN_CAP_1x1_V01 = 1,
+ WLFW_RD_CARD_CHAIN_CAP_2x2_V01 = 2,
+ WLFW_RD_CARD_CHAIN_CAP_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_pcie_gen_speed_v01 {
+ WLFW_PCIE_GEN_SPEED_MIN_VAL_V01 = INT_MIN,
+ QMI_PCIE_GEN_SPEED_INVALID_V01 = 0,
+ QMI_PCIE_GEN_SPEED_1_V01 = 1,
+ QMI_PCIE_GEN_SPEED_2_V01 = 2,
+ QMI_PCIE_GEN_SPEED_3_V01 = 3,
+ WLFW_PCIE_GEN_SPEED_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_power_save_mode_v01 {
+ WLFW_POWER_SAVE_MODE_MIN_VAL_V01 = INT_MIN,
+ WLFW_POWER_SAVE_ENTER_V01 = 0,
+ WLFW_POWER_SAVE_EXIT_V01 = 1,
+ WLFW_POWER_SAVE_MODE_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_m3_segment_type_v01 {
+ WLFW_M3_SEGMENT_TYPE_MIN_VAL_V01 = INT_MIN,
+ QMI_M3_SEGMENT_INVALID_V01 = 0,
+ QMI_M3_SEGMENT_PHYAREG_V01 = 1,
+ QMI_M3_SEGMENT_PHYDBG_V01 = 2,
+ QMI_M3_SEGMENT_WMAC0_REG_V01 = 3,
+ QMI_M3_SEGMENT_WCSSDBG_V01 = 4,
+ QMI_M3_SEGMENT_PHYAPDMEM_V01 = 5,
+ QMI_M3_SEGMENT_MAX_V01 = 6,
+ WLFW_M3_SEGMENT_TYPE_MAX_VAL_V01 = INT_MAX,
+};
+
+enum cnss_feature_v01 {
+ CNSS_FEATURE_MIN_VAL_V01 = INT_MIN,
+ BOOTSTRAP_CLOCK_SELECT_V01 = 0,
+ CNSS_DRV_SUPPORT_V01 = 1,
+ CNSS_MAX_FEATURE_V01 = 64,
+ CNSS_FEATURE_MAX_VAL_V01 = INT_MAX,
+};
+
+#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00)
+#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01)
+#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02)
+#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((u32)0x04)
+#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((u32)0x08)
+#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((u32)0x10)
+
+#define QMI_WLFW_ALREADY_REGISTERED_V01 ((u64)0x01ULL)
+#define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL)
+#define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL)
+#define QMI_WLFW_FW_MEM_READY_V01 ((u64)0x08ULL)
+#define QMI_WLFW_FW_INIT_DONE_V01 ((u64)0x10ULL)
+
+#define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL)
+
+#define QMI_WLFW_HW_XPA_V01 ((u64)0x01ULL)
+#define QMI_WLFW_CBC_FILE_DOWNLOAD_V01 ((u64)0x02ULL)
+
+#define QMI_WLFW_HOST_PCIE_GEN_SWITCH_V01 ((u64)0x01ULL)
+
+struct wlfw_ce_tgt_pipe_cfg_s_v01 {
+ u32 pipe_num;
+ enum wlfw_pipedir_enum_v01 pipe_dir;
+ u32 nentries;
+ u32 nbytes_max;
+ u32 flags;
+};
+
+struct wlfw_ce_svc_pipe_cfg_s_v01 {
+ u32 service_id;
+ enum wlfw_pipedir_enum_v01 pipe_dir;
+ u32 pipe_num;
+};
+
+struct wlfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct wlfw_shadow_reg_v2_cfg_s_v01 {
+ u32 addr;
+};
+
+struct wlfw_rri_over_ddr_cfg_s_v01 {
+ u32 base_addr_low;
+ u32 base_addr_high;
+};
+
+struct wlfw_msi_cfg_s_v01 {
+ u16 ce_id;
+ u16 msi_vector;
+};
+
+struct wlfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct wlfw_mem_cfg_s_v01 {
+ u64 offset;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct wlfw_mem_seg_s_v01 {
+ u32 size;
+ enum wlfw_mem_type_enum_v01 type;
+ u32 mem_cfg_len;
+ struct wlfw_mem_cfg_s_v01 mem_cfg[QMI_WLFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct wlfw_mem_seg_resp_s_v01 {
+ u64 addr;
+ u32 size;
+ enum wlfw_mem_type_enum_v01 type;
+ u8 restore;
+};
+
+struct wlfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct wlfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct wlfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct wlfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+struct wlfw_host_ddr_range_s_v01 {
+ u64 start;
+ u64 size;
+};
+
+struct wlfw_m3_segment_info_s_v01 {
+ enum wlfw_m3_segment_type_v01 type;
+ u64 addr;
+ u64 size;
+ char name[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+};
+
+struct wlfw_dev_mem_info_s_v01 {
+ u64 start;
+ u64 size;
+};
+
+struct wlfw_host_mlo_chip_info_s_v01 {
+ u8 chip_id;
+ u8 num_local_links;
+ u8 hw_link_id[QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01];
+ u8 valid_mlo_link_id[QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01];
+};
+
+struct wlfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 fw_mem_ready_enable_valid;
+ u8 fw_mem_ready_enable;
+ u8 fw_init_done_enable_valid;
+ u8 fw_init_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+ u8 xo_cal_enable_valid;
+ u8 xo_cal_enable;
+ u8 cal_done_enable_valid;
+ u8 cal_done_enable;
+ u8 qdss_trace_req_mem_enable_valid;
+ u8 qdss_trace_req_mem_enable;
+ u8 qdss_trace_save_enable_valid;
+ u8 qdss_trace_save_enable;
+ u8 qdss_trace_free_enable_valid;
+ u8 qdss_trace_free_enable;
+ u8 respond_get_info_enable_valid;
+ u8 respond_get_info_enable;
+ u8 m3_dump_upload_req_enable_valid;
+ u8 m3_dump_upload_req_enable;
+ u8 wfc_call_twt_config_enable_valid;
+ u8 wfc_call_twt_config_enable;
+ u8 qdss_mem_ready_enable_valid;
+ u8 qdss_mem_ready_enable;
+ u8 m3_dump_upload_segments_req_enable_valid;
+ u8 m3_dump_upload_segments_req_enable;
+};
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 86
+extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
+
+struct wlfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[];
+
+struct wlfw_fw_ready_ind_msg_v01 {
+ char placeholder;
+};
+#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[];
+
+struct wlfw_msa_ready_ind_msg_v01 {
+ u8 hang_data_addr_offset_valid;
+ u32 hang_data_addr_offset;
+ u8 hang_data_length_valid;
+ u16 hang_data_length;
+};
+#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 12
+extern struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[];
+
+struct wlfw_pin_connect_result_ind_msg_v01 {
+ u8 pwr_pin_result_valid;
+ u32 pwr_pin_result;
+ u8 phy_io_pin_result_valid;
+ u32 phy_io_pin_result;
+ u8 rf_pin_result_valid;
+ u32 rf_pin_result;
+};
+#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
+extern struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
+
+struct wlfw_wlan_mode_req_msg_v01 {
+ enum wlfw_driver_mode_enum_v01 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[];
+
+struct wlfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct wlfw_shadow_reg_cfg_s_v01 shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v2_valid;
+ u32 shadow_reg_v2_len;
+ struct wlfw_shadow_reg_v2_cfg_s_v01 shadow_reg_v2[QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01];
+ u8 rri_over_ddr_cfg_valid;
+ struct wlfw_rri_over_ddr_cfg_s_v01 rri_over_ddr_cfg;
+ u8 msi_cfg_valid;
+ u32 msi_cfg_len;
+ struct wlfw_msi_cfg_s_v01 msi_cfg[QMI_WLFW_MAX_NUM_CE_V01];
+};
+#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 866
+extern struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
+
+struct wlfw_cap_req_msg_v01 {
+ char placeholder;
+};
+#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_cap_req_msg_v01_ei[];
+
+struct wlfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct wlfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct wlfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct wlfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct wlfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
+ u8 num_macs_valid;
+ u8 num_macs;
+ u8 voltage_mv_valid;
+ u32 voltage_mv;
+ u8 time_freq_hz_valid;
+ u32 time_freq_hz;
+ u8 otp_version_valid;
+ u32 otp_version;
+ u8 eeprom_caldata_read_timeout_valid;
+ u32 eeprom_caldata_read_timeout;
+ u8 fw_caps_valid;
+ u64 fw_caps;
+ u8 rd_card_chain_cap_valid;
+ enum wlfw_rd_card_chain_cap_v01 rd_card_chain_cap;
+ u8 dev_mem_info_valid;
+ struct wlfw_dev_mem_info_s_v01 dev_mem_info[QMI_WLFW_MAX_DEV_MEM_NUM_V01];
+};
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 320
+extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
+
+struct wlfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 bdf_type_valid;
+ u8 bdf_type;
+};
+#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182
+extern struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[];
+
+struct wlfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 host_bdf_data_valid;
+ u64 host_bdf_data;
+};
+#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[];
+
+struct wlfw_cal_report_req_msg_v01 {
+ u32 meta_data_len;
+ enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01];
+ u8 xo_cal_data_valid;
+ u8 xo_cal_data;
+ u8 cal_remove_supported_valid;
+ u8 cal_remove_supported;
+ u8 cal_file_download_size_valid;
+ u64 cal_file_download_size;
+};
+#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 43
+extern struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[];
+
+struct wlfw_cal_report_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_download_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 cal_data_location_valid;
+ u32 cal_data_location;
+};
+#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 21
+extern struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
+
+struct wlfw_cal_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 cal_data_location_valid;
+ u32 cal_data_location;
+};
+#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6185
+extern struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[];
+
+struct wlfw_cal_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_update_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 total_size;
+ u8 cal_data_location_valid;
+ u32 cal_data_location;
+};
+#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 21
+extern struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
+
+struct wlfw_cal_update_req_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 seg_id;
+};
+#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
+extern struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[];
+
+struct wlfw_cal_update_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 cal_data_location_valid;
+ u32 cal_data_location;
+};
+#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6188
+extern struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[];
+
+struct wlfw_msa_info_req_msg_v01 {
+ u64 msa_addr;
+ u32 size;
+};
+#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[];
+
+struct wlfw_msa_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u32 mem_region_info_len;
+ struct wlfw_memory_region_info_s_v01 mem_region_info[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
+};
+#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
+extern struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[];
+
+struct wlfw_msa_ready_req_msg_v01 {
+ char placeholder;
+};
+#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[];
+
+struct wlfw_msa_ready_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[];
+
+struct wlfw_ini_req_msg_v01 {
+ u8 enablefwlog_valid;
+ u8 enablefwlog;
+};
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct qmi_elem_info wlfw_ini_req_msg_v01_ei[];
+
+struct wlfw_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_read_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+};
+#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
+extern struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[];
+
+struct wlfw_athdiag_read_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156
+extern struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_write_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163
+extern struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[];
+
+struct wlfw_athdiag_write_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
+
+struct wlfw_vbatt_req_msg_v01 {
+ u64 voltage_uv;
+};
+#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[];
+
+struct wlfw_vbatt_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[];
+
+struct wlfw_mac_addr_req_msg_v01 {
+ u8 mac_addr_valid;
+ u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01];
+};
+#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
+extern struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[];
+
+struct wlfw_mac_addr_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[];
+
+struct wlfw_host_cap_req_msg_v01 {
+ u8 num_clients_valid;
+ u32 num_clients;
+ u8 wake_msi_valid;
+ u32 wake_msi;
+ u8 gpios_valid;
+ u32 gpios_len;
+ u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
+ u8 nm_modem_valid;
+ u8 nm_modem;
+ u8 bdf_support_valid;
+ u8 bdf_support;
+ u8 bdf_cache_support_valid;
+ u8 bdf_cache_support;
+ u8 m3_support_valid;
+ u8 m3_support;
+ u8 m3_cache_support_valid;
+ u8 m3_cache_support;
+ u8 cal_filesys_support_valid;
+ u8 cal_filesys_support;
+ u8 cal_cache_support_valid;
+ u8 cal_cache_support;
+ u8 cal_done_valid;
+ u8 cal_done;
+ u8 mem_bucket_valid;
+ u32 mem_bucket;
+ u8 mem_cfg_mode_valid;
+ u8 mem_cfg_mode;
+ u8 cal_duration_valid;
+ u16 cal_duration;
+ u8 platform_name_valid;
+ char platform_name[QMI_WLFW_MAX_PLATFORM_NAME_LEN_V01 + 1];
+ u8 ddr_range_valid;
+ struct wlfw_host_ddr_range_s_v01 ddr_range[QMI_WLFW_MAX_HOST_DDR_RANGE_SIZE_V01];
+ u8 host_build_type_valid;
+ enum wlfw_host_build_type_v01 host_build_type;
+ u8 mlo_capable_valid;
+ u8 mlo_capable;
+ u8 mlo_chip_id_valid;
+ u16 mlo_chip_id;
+ u8 mlo_group_id_valid;
+ u8 mlo_group_id;
+ u8 max_mlo_peer_valid;
+ u16 max_mlo_peer;
+ u8 mlo_num_chips_valid;
+ u8 mlo_num_chips;
+ u8 mlo_chip_info_valid;
+ struct wlfw_host_mlo_chip_info_s_v01 mlo_chip_info[QMI_WLFW_MAX_NUM_MLO_CHIPS_V01];
+ u8 feature_list_valid;
+ u64 feature_list;
+ u8 num_wlan_clients_valid;
+ u16 num_wlan_clients;
+ u8 num_wlan_vaps_valid;
+ u8 num_wlan_vaps;
+ u8 wake_msi_addr_valid;
+ u32 wake_msi_addr;
+};
+
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 389
+extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
+
+struct wlfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[];
+
+struct wlfw_request_mem_ind_msg_v01 {
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
+};
+
+#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 1824
+extern struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[];
+
+struct wlfw_respond_mem_req_msg_v01 {
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
+};
+
+#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 888
+extern struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[];
+
+struct wlfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[];
+
+struct wlfw_fw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[];
+
+struct wlfw_fw_init_done_ind_msg_v01 {
+ u8 hang_data_addr_offset_valid;
+ u32 hang_data_addr_offset;
+ u8 hang_data_length_valid;
+ u16 hang_data_length;
+};
+
+#define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 12
+extern struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ind_msg_v01 {
+ u8 cause_for_rejuvenation_valid;
+ u8 cause_for_rejuvenation;
+ u8 requesting_sub_system_valid;
+ u8 requesting_sub_system;
+ u8 line_number_valid;
+ u16 line_number;
+ u8 function_name_valid;
+ char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+};
+
+#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
+extern struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_req_msg_v01 {
+ u8 mask_valid;
+ u64 mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 prev_mask_valid;
+ u64 prev_mask;
+ u8 curr_mask_valid;
+ u64 curr_mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
+extern struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
+
+struct wlfw_m3_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+#define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[];
+
+struct wlfw_m3_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[];
+
+struct wlfw_xo_cal_ind_msg_v01 {
+ u8 xo_cal_data;
+};
+
+#define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4
+extern struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[];
+
+struct wlfw_cal_done_ind_msg_v01 {
+ u8 cal_file_upload_size_valid;
+ u64 cal_file_upload_size;
+};
+
+#define WLFW_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info wlfw_cal_done_ind_msg_v01_ei[];
+
+struct wlfw_qdss_trace_req_mem_ind_msg_v01 {
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
+};
+
+#define WLFW_QDSS_TRACE_REQ_MEM_IND_MSG_V01_MAX_MSG_LEN 1824
+extern struct qmi_elem_info wlfw_qdss_trace_req_mem_ind_msg_v01_ei[];
+
+struct wlfw_qdss_trace_mem_info_req_msg_v01 {
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
+};
+
+#define WLFW_QDSS_TRACE_MEM_INFO_REQ_MSG_V01_MAX_MSG_LEN 888
+extern struct qmi_elem_info wlfw_qdss_trace_mem_info_req_msg_v01_ei[];
+
+struct wlfw_qdss_trace_mem_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_QDSS_TRACE_MEM_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_qdss_trace_mem_info_resp_msg_v01_ei[];
+
+struct wlfw_qdss_trace_save_ind_msg_v01 {
+ u32 source;
+ u32 total_size;
+ u8 mem_seg_valid;
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
+ u8 file_name_valid;
+ char file_name[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+};
+
+#define WLFW_QDSS_TRACE_SAVE_IND_MSG_V01_MAX_MSG_LEN 921
+extern struct qmi_elem_info wlfw_qdss_trace_save_ind_msg_v01_ei[];
+
+struct wlfw_qdss_trace_data_req_msg_v01 {
+ u32 seg_id;
+};
+
+#define WLFW_QDSS_TRACE_DATA_REQ_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_qdss_trace_data_req_msg_v01_ei[];
+
+struct wlfw_qdss_trace_data_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_QDSS_TRACE_DATA_RESP_MSG_V01_MAX_MSG_LEN 6174
+extern struct qmi_elem_info wlfw_qdss_trace_data_resp_msg_v01_ei[];
+
+struct wlfw_qdss_trace_config_download_req_msg_v01 {
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6167
+extern struct qmi_elem_info wlfw_qdss_trace_config_download_req_msg_v01_ei[];
+
+struct wlfw_qdss_trace_config_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_qdss_trace_config_download_resp_msg_v01_ei[];
+
+struct wlfw_qdss_trace_mode_req_msg_v01 {
+ u8 mode_valid;
+ enum wlfw_qdss_trace_mode_enum_v01 mode;
+ u8 option_valid;
+ u64 option;
+ u8 hw_trc_disable_override_valid;
+ enum wlfw_qmi_param_value_v01 hw_trc_disable_override;
+};
+
+#define WLFW_QDSS_TRACE_MODE_REQ_MSG_V01_MAX_MSG_LEN 25
+extern struct qmi_elem_info wlfw_qdss_trace_mode_req_msg_v01_ei[];
+
+struct wlfw_qdss_trace_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_QDSS_TRACE_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_qdss_trace_mode_resp_msg_v01_ei[];
+
+struct wlfw_qdss_trace_free_ind_msg_v01 {
+ u8 mem_seg_valid;
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
+};
+
+#define WLFW_QDSS_TRACE_FREE_IND_MSG_V01_MAX_MSG_LEN 888
+extern struct qmi_elem_info wlfw_qdss_trace_free_ind_msg_v01_ei[];
+
+struct wlfw_shutdown_req_msg_v01 {
+ u8 shutdown_valid;
+ u8 shutdown;
+};
+
+#define WLFW_SHUTDOWN_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct qmi_elem_info wlfw_shutdown_req_msg_v01_ei[];
+
+struct wlfw_shutdown_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_SHUTDOWN_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_shutdown_resp_msg_v01_ei[];
+
+struct wlfw_antenna_switch_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_ANTENNA_SWITCH_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_antenna_switch_req_msg_v01_ei[];
+
+struct wlfw_antenna_switch_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 antenna_valid;
+ u64 antenna;
+};
+
+#define WLFW_ANTENNA_SWITCH_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info wlfw_antenna_switch_resp_msg_v01_ei[];
+
+struct wlfw_antenna_grant_req_msg_v01 {
+ u8 grant_valid;
+ u64 grant;
+};
+
+#define WLFW_ANTENNA_GRANT_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info wlfw_antenna_grant_req_msg_v01_ei[];
+
+struct wlfw_antenna_grant_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_ANTENNA_GRANT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_antenna_grant_resp_msg_v01_ei[];
+
+struct wlfw_wfc_call_status_req_msg_v01 {
+ u32 wfc_call_status_len;
+ u8 wfc_call_status[QMI_WLFW_MAX_WFC_CALL_STATUS_DATA_SIZE_V01];
+ u8 wfc_call_active_valid;
+ u8 wfc_call_active;
+ u8 all_wfc_calls_held_valid;
+ u8 all_wfc_calls_held;
+ u8 is_wfc_emergency_valid;
+ u8 is_wfc_emergency;
+ u8 twt_ims_start_valid;
+ u64 twt_ims_start;
+ u8 twt_ims_int_valid;
+ u16 twt_ims_int;
+ u8 media_quality_valid;
+ enum wlfw_wfc_media_quality_v01 media_quality;
+};
+
+#define WLFW_WFC_CALL_STATUS_REQ_MSG_V01_MAX_MSG_LEN 296
+extern struct qmi_elem_info wlfw_wfc_call_status_req_msg_v01_ei[];
+
+struct wlfw_wfc_call_status_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WFC_CALL_STATUS_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_wfc_call_status_resp_msg_v01_ei[];
+
+struct wlfw_get_info_req_msg_v01 {
+ u8 type;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+};
+
+#define WLFW_GET_INFO_REQ_MSG_V01_MAX_MSG_LEN 6153
+extern struct qmi_elem_info wlfw_get_info_req_msg_v01_ei[];
+
+struct wlfw_get_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_GET_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_get_info_resp_msg_v01_ei[];
+
+struct wlfw_respond_get_info_ind_msg_v01 {
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 type_valid;
+ u8 type;
+ u8 is_last_valid;
+ u8 is_last;
+ u8 seq_no_valid;
+ u32 seq_no;
+};
+
+#define WLFW_RESPOND_GET_INFO_IND_MSG_V01_MAX_MSG_LEN 6164
+extern struct qmi_elem_info wlfw_respond_get_info_ind_msg_v01_ei[];
+
+struct wlfw_device_info_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_DEVICE_INFO_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_device_info_req_msg_v01_ei[];
+
+struct wlfw_device_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 bar_addr_valid;
+ u64 bar_addr;
+ u8 bar_size_valid;
+ u32 bar_size;
+ u8 mhi_state_info_addr_valid;
+ u64 mhi_state_info_addr;
+ u8 mhi_state_info_size_valid;
+ u32 mhi_state_info_size;
+};
+
+#define WLFW_DEVICE_INFO_RESP_MSG_V01_MAX_MSG_LEN 43
+extern struct qmi_elem_info wlfw_device_info_resp_msg_v01_ei[];
+
+struct wlfw_m3_dump_upload_req_ind_msg_v01 {
+ u32 pdev_id;
+ u64 addr;
+ u64 size;
+};
+
+#define WLFW_M3_DUMP_UPLOAD_REQ_IND_MSG_V01_MAX_MSG_LEN 29
+extern struct qmi_elem_info wlfw_m3_dump_upload_req_ind_msg_v01_ei[];
+
+struct wlfw_m3_dump_upload_done_req_msg_v01 {
+ u32 pdev_id;
+ u32 status;
+};
+
+#define WLFW_M3_DUMP_UPLOAD_DONE_REQ_MSG_V01_MAX_MSG_LEN 14
+extern struct qmi_elem_info wlfw_m3_dump_upload_done_req_msg_v01_ei[];
+
+struct wlfw_m3_dump_upload_done_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_M3_DUMP_UPLOAD_DONE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_m3_dump_upload_done_resp_msg_v01_ei[];
+
+struct wlfw_soc_wake_req_msg_v01 {
+ u8 wake_valid;
+ enum wlfw_soc_wake_enum_v01 wake;
+};
+
+#define WLFW_SOC_WAKE_REQ_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_soc_wake_req_msg_v01_ei[];
+
+struct wlfw_soc_wake_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_SOC_WAKE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_soc_wake_resp_msg_v01_ei[];
+
+struct wlfw_power_save_req_msg_v01 {
+ u8 power_save_mode_valid;
+ enum wlfw_power_save_mode_v01 power_save_mode;
+};
+
+#define WLFW_POWER_SAVE_REQ_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_power_save_req_msg_v01_ei[];
+
+struct wlfw_power_save_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_POWER_SAVE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_power_save_resp_msg_v01_ei[];
+
+struct wlfw_wfc_call_twt_config_ind_msg_v01 {
+ u8 twt_sta_start_valid;
+ u64 twt_sta_start;
+ u8 twt_sta_int_valid;
+ u16 twt_sta_int;
+ u8 twt_sta_upo_valid;
+ u16 twt_sta_upo;
+ u8 twt_sta_sp_valid;
+ u16 twt_sta_sp;
+ u8 twt_sta_dl_valid;
+ u16 twt_sta_dl;
+ u8 twt_sta_config_changed_valid;
+ u8 twt_sta_config_changed;
+};
+
+#define WLFW_WFC_CALL_TWT_CONFIG_IND_MSG_V01_MAX_MSG_LEN 35
+extern struct qmi_elem_info wlfw_wfc_call_twt_config_ind_msg_v01_ei[];
+
+struct wlfw_qdss_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_QDSS_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_qdss_mem_ready_ind_msg_v01_ei[];
+
+struct wlfw_pcie_gen_switch_req_msg_v01 {
+ enum wlfw_pcie_gen_speed_v01 pcie_speed;
+};
+
+#define WLFW_PCIE_GEN_SWITCH_REQ_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_pcie_gen_switch_req_msg_v01_ei[];
+
+struct wlfw_pcie_gen_switch_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_PCIE_GEN_SWITCH_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_pcie_gen_switch_resp_msg_v01_ei[];
+
+struct wlfw_m3_dump_upload_segments_req_ind_msg_v01 {
+ u32 pdev_id;
+ u32 no_of_valid_segments;
+ struct wlfw_m3_segment_info_s_v01 m3_segment[QMI_WLFW_MAX_M3_SEGMENTS_SIZE_V01];
+};
+
+#define WLFW_M3_DUMP_UPLOAD_SEGMENTS_REQ_IND_MSG_V01_MAX_MSG_LEN 387
+extern struct qmi_elem_info wlfw_m3_dump_upload_segments_req_ind_msg_v01_ei[];
+
+#endif
diff --git a/inc/cnss2.h b/inc/cnss2.h
new file mode 100644
index 0000000..1376995
--- /dev/null
+++ b/inc/cnss2.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
+
+#ifndef _NET_CNSS2_H
+#define _NET_CNSS2_H
+
+#include <linux/pci.h>
+
+#define CNSS_MAX_FILE_NAME 20
+#define CNSS_MAX_TIMESTAMP_LEN 32
+#define CNSS_MAX_DEV_MEM_NUM 4
+
+/*
+ * Temporary change for compilation, will be removed
+ * after WLAN host driver switched to use new APIs
+ */
+#define CNSS_API_WITH_DEV
+
+enum cnss_bus_width_type {
+ CNSS_BUS_WIDTH_NONE,
+ CNSS_BUS_WIDTH_IDLE,
+ CNSS_BUS_WIDTH_LOW,
+ CNSS_BUS_WIDTH_MEDIUM,
+ CNSS_BUS_WIDTH_HIGH,
+ CNSS_BUS_WIDTH_VERY_HIGH,
+ CNSS_BUS_WIDTH_LOW_LATENCY
+};
+
+enum cnss_platform_cap_flag {
+ CNSS_HAS_EXTERNAL_SWREG = 0x01,
+ CNSS_HAS_UART_ACCESS = 0x02,
+ CNSS_HAS_DRV_SUPPORT = 0x04,
+};
+
+struct cnss_platform_cap {
+ u32 cap_flag;
+};
+
+struct cnss_fw_files {
+ char image_file[CNSS_MAX_FILE_NAME];
+ char board_data[CNSS_MAX_FILE_NAME];
+ char otp_data[CNSS_MAX_FILE_NAME];
+ char utf_file[CNSS_MAX_FILE_NAME];
+ char utf_board_data[CNSS_MAX_FILE_NAME];
+ char epping_file[CNSS_MAX_FILE_NAME];
+ char evicted_data[CNSS_MAX_FILE_NAME];
+};
+
+struct cnss_device_version {
+ u32 family_number;
+ u32 device_number;
+ u32 major_version;
+ u32 minor_version;
+};
+
+struct cnss_dev_mem_info {
+ u64 start;
+ u64 size;
+};
+
+struct cnss_soc_info {
+ void __iomem *va;
+ phys_addr_t pa;
+ uint32_t chip_id;
+ uint32_t chip_family;
+ uint32_t board_id;
+ uint32_t soc_id;
+ uint32_t fw_version;
+ char fw_build_timestamp[CNSS_MAX_TIMESTAMP_LEN + 1];
+ struct cnss_device_version device_version;
+ struct cnss_dev_mem_info dev_mem_info[CNSS_MAX_DEV_MEM_NUM];
+};
+
+struct cnss_wlan_runtime_ops {
+ int (*runtime_suspend)(struct pci_dev *pdev);
+ int (*runtime_resume)(struct pci_dev *pdev);
+};
+
+enum cnss_driver_status {
+ CNSS_UNINITIALIZED,
+ CNSS_INITIALIZED,
+ CNSS_LOAD_UNLOAD,
+ CNSS_RECOVERY,
+ CNSS_FW_DOWN,
+ CNSS_HANG_EVENT,
+ CNSS_BUS_EVENT,
+};
+
+enum cnss_bus_event_type {
+ BUS_EVENT_PCI_LINK_DOWN = 0,
+
+ BUS_EVENT_INVALID = 0xFFFF,
+};
+
+struct cnss_hang_event {
+ void *hang_event_data;
+ u16 hang_event_data_len;
+};
+
+struct cnss_bus_event {
+ enum cnss_bus_event_type etype;
+ void *event_data;
+};
+
+struct cnss_uevent_data {
+ enum cnss_driver_status status;
+ void *data;
+};
+
+struct cnss_wlan_driver {
+ char *name;
+ int (*probe)(struct pci_dev *pdev, const struct pci_device_id *id);
+ void (*remove)(struct pci_dev *pdev);
+ int (*idle_restart)(struct pci_dev *pdev,
+ const struct pci_device_id *id);
+ int (*idle_shutdown)(struct pci_dev *pdev);
+ int (*reinit)(struct pci_dev *pdev, const struct pci_device_id *id);
+ void (*shutdown)(struct pci_dev *pdev);
+ void (*crash_shutdown)(struct pci_dev *pdev);
+ int (*suspend)(struct pci_dev *pdev, pm_message_t state);
+ int (*resume)(struct pci_dev *pdev);
+ int (*suspend_noirq)(struct pci_dev *pdev);
+ int (*resume_noirq)(struct pci_dev *pdev);
+ void (*modem_status)(struct pci_dev *pdev, int state);
+ void (*update_status)(struct pci_dev *pdev, uint32_t status);
+ int (*update_event)(struct pci_dev *pdev,
+ struct cnss_uevent_data *uevent);
+ struct cnss_wlan_runtime_ops *runtime_ops;
+ const struct pci_device_id *id_table;
+};
+
+struct cnss_ce_tgt_pipe_cfg {
+ u32 pipe_num;
+ u32 pipe_dir;
+ u32 nentries;
+ u32 nbytes_max;
+ u32 flags;
+ u32 reserved;
+};
+
+struct cnss_ce_svc_pipe_cfg {
+ u32 service_id;
+ u32 pipe_dir;
+ u32 pipe_num;
+};
+
+struct cnss_shadow_reg_cfg {
+ u16 ce_id;
+ u16 reg_offset;
+};
+
+struct cnss_shadow_reg_v2_cfg {
+ u32 addr;
+};
+
+struct cnss_rri_over_ddr_cfg {
+ u32 base_addr_low;
+ u32 base_addr_high;
+};
+
+struct cnss_wlan_enable_cfg {
+ u32 num_ce_tgt_cfg;
+ struct cnss_ce_tgt_pipe_cfg *ce_tgt_cfg;
+ u32 num_ce_svc_pipe_cfg;
+ struct cnss_ce_svc_pipe_cfg *ce_svc_cfg;
+ u32 num_shadow_reg_cfg;
+ struct cnss_shadow_reg_cfg *shadow_reg_cfg;
+ u32 num_shadow_reg_v2_cfg;
+ struct cnss_shadow_reg_v2_cfg *shadow_reg_v2_cfg;
+ bool rri_over_ddr_cfg_valid;
+ struct cnss_rri_over_ddr_cfg rri_over_ddr_cfg;
+};
+
+enum cnss_driver_mode {
+ CNSS_MISSION,
+ CNSS_FTM,
+ CNSS_EPPING,
+ CNSS_WALTEST,
+ CNSS_OFF,
+ CNSS_CCPM,
+ CNSS_QVIT,
+ CNSS_CALIBRATION,
+};
+
+enum cnss_recovery_reason {
+ CNSS_REASON_DEFAULT,
+ CNSS_REASON_LINK_DOWN,
+ CNSS_REASON_RDDM,
+ CNSS_REASON_TIMEOUT,
+};
+
+enum cnss_remote_mem_type {
+ CNSS_REMOTE_MEM_TYPE_FW,
+ CNSS_REMOTE_MEM_TYPE_QDSS,
+ CNSS_REMOTE_MEM_TYPE_MAX,
+};
+
+struct cnss_mem_segment {
+ size_t size;
+ void *va;
+ phys_addr_t pa;
+};
+
+extern int cnss_wlan_register_driver(struct cnss_wlan_driver *driver);
+extern void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver);
+extern void cnss_device_crashed(struct device *dev);
+extern int cnss_pci_prevent_l1(struct device *dev);
+extern void cnss_pci_allow_l1(struct device *dev);
+extern int cnss_pci_link_down(struct device *dev);
+extern int cnss_pci_is_device_down(struct device *dev);
+extern void cnss_schedule_recovery(struct device *dev,
+ enum cnss_recovery_reason reason);
+extern int cnss_self_recovery(struct device *dev,
+ enum cnss_recovery_reason reason);
+extern int cnss_force_fw_assert(struct device *dev);
+extern int cnss_force_collect_rddm(struct device *dev);
+extern int cnss_qmi_send_get(struct device *dev);
+extern int cnss_qmi_send_put(struct device *dev);
+extern int cnss_qmi_send(struct device *dev, int type, void *cmd,
+ int cmd_len, void *cb_ctx,
+ int (*cb)(void *ctx, void *event, int event_len));
+extern void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size);
+extern int cnss_get_fw_files_for_target(struct device *dev,
+ struct cnss_fw_files *pfw_files,
+ u32 target_type, u32 target_version);
+extern int cnss_get_platform_cap(struct device *dev,
+ struct cnss_platform_cap *cap);
+extern struct iommu_domain *cnss_smmu_get_domain(struct device *dev);
+extern int cnss_smmu_map(struct device *dev,
+ phys_addr_t paddr, uint32_t *iova_addr, size_t size);
+extern int cnss_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size);
+extern int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info);
+extern int cnss_request_bus_bandwidth(struct device *dev, int bandwidth);
+extern int cnss_power_up(struct device *dev);
+extern int cnss_power_down(struct device *dev);
+extern int cnss_idle_restart(struct device *dev);
+extern int cnss_idle_shutdown(struct device *dev);
+extern void cnss_request_pm_qos(struct device *dev, u32 qos_val);
+extern void cnss_remove_pm_qos(struct device *dev);
+extern void cnss_lock_pm_sem(struct device *dev);
+extern void cnss_release_pm_sem(struct device *dev);
+extern void cnss_pci_lock_reg_window(struct device *dev, unsigned long *flags);
+extern void cnss_pci_unlock_reg_window(struct device *dev,
+ unsigned long *flags);
+extern int cnss_wlan_pm_control(struct device *dev, bool vote);
+extern int cnss_auto_suspend(struct device *dev);
+extern int cnss_auto_resume(struct device *dev);
+extern int cnss_pci_is_drv_connected(struct device *dev);
+extern int cnss_pci_force_wake_request_sync(struct device *dev, int timeout);
+extern int cnss_pci_force_wake_request(struct device *dev);
+extern int cnss_pci_is_device_awake(struct device *dev);
+extern int cnss_pci_force_wake_release(struct device *dev);
+extern int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
+ int *num_vectors,
+ uint32_t *user_base_data,
+ uint32_t *base_vector);
+extern int cnss_get_msi_irq(struct device *dev, unsigned int vector);
+extern void cnss_get_msi_address(struct device *dev, uint32_t *msi_addr_low,
+ uint32_t *msi_addr_high);
+extern int cnss_wlan_enable(struct device *dev,
+ struct cnss_wlan_enable_cfg *config,
+ enum cnss_driver_mode mode,
+ const char *host_version);
+extern int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode);
+extern unsigned int cnss_get_boot_timeout(struct device *dev);
+extern int cnss_athdiag_read(struct device *dev, uint32_t offset,
+ uint32_t mem_type, uint32_t data_len,
+ uint8_t *output);
+extern int cnss_athdiag_write(struct device *dev, uint32_t offset,
+ uint32_t mem_type, uint32_t data_len,
+ uint8_t *input);
+extern int cnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode);
+extern int cnss_set_pcie_gen_speed(struct device *dev, u8 pcie_gen_speed);
+extern int cnss_get_mem_seg_count(enum cnss_remote_mem_type type, u32 *seg);
+extern int cnss_get_mem_segment_info(enum cnss_remote_mem_type type,
+ struct cnss_mem_segment segment[],
+ u32 segment_count);
+#endif /* _NET_CNSS2_H */
diff --git a/inc/cnss_nl.h b/inc/cnss_nl.h
new file mode 100644
index 0000000..e861513
--- /dev/null
+++ b/inc/cnss_nl.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. */
+
+#ifndef _NET_CNSS_GENETLINK_H_
+#define _NET_CNSS_GENETLINK_H_
+
+#include <linux/types.h>
+
+#define CLD80211_MAX_COMMANDS 40
+#define CLD80211_MAX_NL_DATA 4096
+
+/**
+ * enum cld80211_attr - Driver/Application embeds the data in nlmsg with the
+ * help of below attributes
+ *
+ * @CLD80211_ATTR_VENDOR_DATA: Embed all other attributes in this nested
+ * attribute.
+ * @CLD80211_ATTR_DATA: Embed complete data in this attribute
+ * @CLD80211_ATTR_META_DATA: Embed meta data for above data. This will help
+ * wlan driver to peek into request message packet without opening up definition
+ * of complete request message.
+ * @CLD80211_ATTR_CMD: cld80211 vendor subcommand in this attribute
+ * @CLD80211_ATTR_CMD_TAG_DATA: cld80211 vendor subcommand data is present in
+ * this attribute. It is a nested attribute with sub attributes of specified
+ * vendor sub command.
+ *
+ * Any new message in future can be added as another attribute
+ */
+enum cld80211_attr {
+ CLD80211_ATTR_VENDOR_DATA = 1,
+ CLD80211_ATTR_DATA,
+ CLD80211_ATTR_META_DATA,
+ CLD80211_ATTR_CMD,
+ CLD80211_ATTR_CMD_TAG_DATA,
+ /* add new attributes above here */
+
+ __CLD80211_ATTR_AFTER_LAST,
+ CLD80211_ATTR_MAX = __CLD80211_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum cld80211_multicast_groups - List of multicast groups supported
+ *
+ * @CLD80211_MCGRP_SVC_MSGS: WLAN service message will be sent to this group.
+ * Ex: Status ind messages
+ * @CLD80211_MCGRP_HOST_LOGS: All logging related messages from driver will be
+ * sent to this multicast group
+ * @CLD80211_MCGRP_FW_LOGS: Firmware logging messages will be sent to this group
+ * @CLD80211_MCGRP_PER_PKT_STATS: Messages related packet stats debugging infra
+ * will be sent to this group
+ * @CLD80211_MCGRP_DIAG_EVENTS: Driver/Firmware status logging diag events will
+ * be sent to this group
+ * @CLD80211_MCGRP_FATAL_EVENTS: Any fatal message generated in driver/firmware
+ * will be sent to this group
+ * @CLD80211_MCGRP_OEM_MSGS: All OEM message will be sent to this group
+ * Ex: LOWI messages
+ */
+enum cld80211_multicast_groups {
+ CLD80211_MCGRP_SVC_MSGS,
+ CLD80211_MCGRP_HOST_LOGS,
+ CLD80211_MCGRP_FW_LOGS,
+ CLD80211_MCGRP_PER_PKT_STATS,
+ CLD80211_MCGRP_DIAG_EVENTS,
+ CLD80211_MCGRP_FATAL_EVENTS,
+ CLD80211_MCGRP_OEM_MSGS,
+};
+
+/**
+ * typedef cld80211_cb - Callback to be called when an nlmsg is received with
+ * the registered cmd_id command from userspace
+ * @data: Payload of the message to be sent to driver
+ * @data_len: Length of the payload
+ * @cb_ctx: callback context to be returned to driver when the callback
+ * is called
+ * @pid: process id of the sender
+ */
+typedef void (*cld80211_cb)(const void *data, int data_len,
+ void *cb_ctx, int pid);
+
+/**
+ * register_cld_cmd_cb() - Allows cld driver to register for commands with
+ * callback
+ * @cmd_id: Command to be registered. Valid range [1, CLD80211_MAX_COMMANDS]
+ * @cb: Callback to be called when an nlmsg is received with cmd_id command
+ * from userspace
+ * @cb_ctx: context provided by driver; Send this as cb_ctx of func()
+ * to driver
+ */
+int register_cld_cmd_cb(u8 cmd_id, cld80211_cb cb, void *cb_ctx);
+
+/**
+ * deregister_cld_cmd_cb() - Allows cld driver to de-register the command it
+ * has already registered
+ * @cmd_id: Command to be deregistered.
+ */
+int deregister_cld_cmd_cb(u8 cmd_id);
+
+/**
+ * cld80211_get_genl_family() - Returns current netlink family context
+ */
+struct genl_family *cld80211_get_genl_family(void);
+
+#endif /* _NET_CNSS_GENETLINK_H_ */
diff --git a/inc/cnss_plat_ipc_qmi.h b/inc/cnss_plat_ipc_qmi.h
new file mode 100644
index 0000000..c77c186
--- /dev/null
+++ b/inc/cnss_plat_ipc_qmi.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CNSS_PLAT_IPC_QMI_H
+#define _CNSS_PLAT_IPC_QMI_H
+
+/**
+ * struct cnss_plat_user_config: Config options provided by user space
+ * @dms_mac_addr_supported: DMS MAC address provisioning support
+ * @qdss_hw_trace_override: QDSS config for HW trace enable
+ * @cal_file_available_bitmask: Calibration file available
+ */
+struct cnss_plat_ipc_user_config {
+ u8 dms_mac_addr_supported;
+ u8 qdss_hw_trace_override;
+ u32 cal_file_available_bitmask;
+};
+
+typedef void (*cnss_plat_ipc_connection_update)(void *cb_ctx,
+ bool connection_status);
+
+int cnss_plat_ipc_register(cnss_plat_ipc_connection_update
+ connection_update_cb, void *cb_ctx);
+void cnss_plat_ipc_unregister(void *cb_ctx);
+int cnss_plat_ipc_qmi_file_download(char *file_name, char *buf, u32 *size);
+int cnss_plat_ipc_qmi_file_upload(char *file_name, u8 *file_buf,
+ u32 file_size);
+struct cnss_plat_ipc_user_config *cnss_plat_ipc_qmi_user_config(void);
+#endif
diff --git a/inc/cnss_prealloc.h b/inc/cnss_prealloc.h
new file mode 100644
index 0000000..ee074f5
--- /dev/null
+++ b/inc/cnss_prealloc.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2016,2019 The Linux Foundation. All rights reserved. */
+
+#ifndef _NET_CNSS_PREALLOC_H_
+#define _NET_CNSS_PREALLOC_H_
+
+#include <linux/types.h>
+
+#define WCNSS_PRE_ALLOC_GET_THRESHOLD (4*1024)
+
+extern void *wcnss_prealloc_get(size_t size);
+extern int wcnss_prealloc_put(void *ptr);
+extern int wcnss_pre_alloc_reset(void);
+void wcnss_prealloc_check_memory_leak(void);
+
+#endif /* _NET_CNSS__PREALLOC_H_ */
diff --git a/inc/cnss_utils.h b/inc/cnss_utils.h
new file mode 100644
index 0000000..c8d03a2
--- /dev/null
+++ b/inc/cnss_utils.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */
+
+#ifndef _CNSS_UTILS_H_
+#define _CNSS_UTILS_H_
+
+#include <linux/types.h>
+
+struct device;
+
+enum cnss_utils_cc_src {
+ CNSS_UTILS_SOURCE_CORE,
+ CNSS_UTILS_SOURCE_11D,
+ CNSS_UTILS_SOURCE_USER
+};
+
+extern int cnss_utils_set_wlan_unsafe_channel(struct device *dev,
+ u16 *unsafe_ch_list,
+ u16 ch_count);
+extern int cnss_utils_get_wlan_unsafe_channel(struct device *dev,
+ u16 *unsafe_ch_list,
+ u16 *ch_count, u16 buf_len);
+extern int cnss_utils_wlan_set_dfs_nol(struct device *dev,
+ const void *info, u16 info_len);
+extern int cnss_utils_wlan_get_dfs_nol(struct device *dev,
+ void *info, u16 info_len);
+extern int cnss_utils_get_driver_load_cnt(struct device *dev);
+extern void cnss_utils_increment_driver_load_cnt(struct device *dev);
+extern int cnss_utils_set_wlan_mac_address(const u8 *in, uint32_t len);
+extern u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern int cnss_utils_set_wlan_derived_mac_address(const u8 *in, uint32_t len);
+extern u8 *cnss_utils_get_wlan_derived_mac_address(struct device *dev,
+ uint32_t *num);
+extern void cnss_utils_set_cc_source(struct device *dev,
+ enum cnss_utils_cc_src cc_source);
+extern enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev);
+
+#endif