summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHsiu-Chang Chen <hsiuchangchen@google.com>2022-07-05 11:33:57 +0800
committerHsiu-Chang Chen <hsiuchangchen@google.com>2022-07-05 11:33:57 +0800
commitb3bbdb9cf83a472565ede2ae636367f448d79b54 (patch)
treeb7748870722efaa98445b18436a5c3b08c08399e
parenta669d625291d0f932429991e9855e2fc1f93f4f0 (diff)
downloadcnss2-b3bbdb9cf83a472565ede2ae636367f448d79b54.tar.gz
wcn6740: Update cnss/mhi/qmi/qrtr drivers
Migrate wlan codes to preCS release Bug: 237922233 Test: Regression Test Change-Id: I02eb261b022db86ace9a9f25327d55da1452c065
-rw-r--r--cnss2/bus.c3
-rw-r--r--cnss2/bus.h4
-rw-r--r--cnss2/debug.c18
-rw-r--r--cnss2/main.c15
-rw-r--r--cnss2/main.h7
-rw-r--r--cnss2/pci.c52
-rw-r--r--cnss2/pci.h9
-rw-r--r--cnss2/pci_platform_google.c2
-rw-r--r--cnss2/pci_qcom.c14
-rw-r--r--cnss2/power.c390
-rw-r--r--cnss2/qcom_ramdump.c118
-rw-r--r--cnss2/qmi.c189
-rw-r--r--cnss2/qmi.h8
-rw-r--r--cnss2/reg.h7
-rw-r--r--cnss_prealloc/cnss_prealloc.c2
-rw-r--r--cnss_utils/wlan_firmware_service_v01.c337
-rw-r--r--cnss_utils/wlan_firmware_service_v01.h104
-rw-r--r--inc/cnss2.h6
-rw-r--r--inc/mhi.h2
-rw-r--r--inc/qcom_ramdump.h11
-rw-r--r--mhi/core/misc.c16
-rw-r--r--mhi/core/misc.h2
-rw-r--r--mhi/core/pm.c2
-rw-r--r--qrtr/Kconfig8
-rw-r--r--qrtr/Makefile4
-rw-r--r--qrtr/af_qrtr.c2121
-rw-r--r--qrtr/debug.c200
-rw-r--r--qrtr/debug.h52
28 files changed, 3615 insertions, 88 deletions
diff --git a/cnss2/bus.c b/cnss2/bus.c
index 04f050c..58fd891 100644
--- a/cnss2/bus.c
+++ b/cnss2/bus.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "bus.h"
@@ -30,6 +30,7 @@ enum cnss_dev_bus_type cnss_get_bus_type(unsigned long device_id)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
return CNSS_BUS_PCI;
default:
cnss_pr_err("Unknown device_id: 0x%lx\n", device_id);
diff --git a/cnss2/bus.h b/cnss2/bus.h
index 581b78d..c7f759b 100644
--- a/cnss2/bus.h
+++ b/cnss2/bus.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CNSS_BUS_H
@@ -22,6 +22,8 @@
#define QCA6490_DEVICE_ID 0x1103
#define KIWI_VENDOR_ID 0x17CB
#define KIWI_DEVICE_ID 0x1107
+#define MANGO_VENDOR_ID 0x17CB
+#define MANGO_DEVICE_ID 0x110A
enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev);
enum cnss_dev_bus_type cnss_get_bus_type(unsigned long device_id);
diff --git a/cnss2/debug.c b/cnss2/debug.c
index cb84c4d..e456c9a 100644
--- a/cnss2/debug.c
+++ b/cnss2/debug.c
@@ -188,6 +188,8 @@ static ssize_t cnss_dev_boot_debug_write(struct file *fp,
char buf[64];
char *cmd;
unsigned int len = 0;
+ char *sptr, *token;
+ const char *delim = " ";
int ret = 0;
if (!plat_priv)
@@ -198,7 +200,13 @@ static ssize_t cnss_dev_boot_debug_write(struct file *fp,
return -EFAULT;
buf[len] = '\0';
- cmd = buf;
+ sptr = buf;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+ cmd = token;
+
cnss_pr_dbg("Received dev_boot debug command: %s\n", cmd);
if (sysfs_streq(cmd, "on")) {
@@ -221,6 +229,10 @@ static ssize_t cnss_dev_boot_debug_write(struct file *fp,
ret = cnss_set_host_sol_value(plat_priv, 1);
} else if (sysfs_streq(cmd, "deassert_host_sol")) {
ret = cnss_set_host_sol_value(plat_priv, 0);
+ } else if (sysfs_streq(cmd, "pdc_update")) {
+ if (!sptr)
+ return -EINVAL;
+ ret = cnss_aop_send_msg(plat_priv, sptr);
} else {
pci_priv = plat_priv->bus_priv;
if (!pci_priv)
@@ -266,7 +278,9 @@ static int cnss_dev_boot_debug_show(struct seq_file *s, void *data)
seq_puts(s, "shutdown: full power off sequence to shutdown device\n");
seq_puts(s, "assert: trigger firmware assert\n");
seq_puts(s, "set_cbc_done: Set cold boot calibration done status\n");
-
+ seq_puts(s, "\npdc_update usage:");
+ seq_puts(s, "1. echo pdc_update {class: wlan_pdc ss: <pdc_ss>, res: <vreg>.<mode>, <seq>: <val>} > <debugfs_path>/cnss/dev_boot\n");
+ seq_puts(s, "2. echo pdc_update {class: wlan_pdc ss: <pdc_ss>, res: pdc, enable: <val>} > <debugfs_path>/cnss/dev_boot\n");
return 0;
}
diff --git a/cnss2/main.c b/cnss2/main.c
index 4fe9d6f..cc8a23b 100644
--- a/cnss2/main.c
+++ b/cnss2/main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -479,8 +479,11 @@ static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv)
if (plat_priv->hds_enabled)
cnss_wlfw_bdf_dnld_send_sync(plat_priv, CNSS_BDF_HDS);
+
cnss_wlfw_bdf_dnld_send_sync(plat_priv, CNSS_BDF_REGDB);
+ cnss_wlfw_ini_file_send_sync(plat_priv, WLFW_CONN_ROAM_INI_V01);
+
ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv,
plat_priv->ctrl_params.bdf_type);
if (ret)
@@ -1226,6 +1229,7 @@ int cnss_enable_dev_sol_irq(struct cnss_plat_data *plat_priv)
if (sol_gpio->dev_sol_gpio < 0 || sol_gpio->dev_sol_irq <= 0)
return 0;
+ enable_irq(sol_gpio->dev_sol_irq);
ret = enable_irq_wake(sol_gpio->dev_sol_irq);
if (ret)
cnss_pr_err("Failed to enable device SOL as wake IRQ, err = %d\n",
@@ -1246,6 +1250,7 @@ int cnss_disable_dev_sol_irq(struct cnss_plat_data *plat_priv)
if (ret)
cnss_pr_err("Failed to disable device SOL as wake IRQ, err = %d\n",
ret);
+ disable_irq(sol_gpio->dev_sol_irq);
return ret;
}
@@ -2754,6 +2759,7 @@ int cnss_register_ramdump(struct cnss_plat_data *plat_priv)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
ret = cnss_register_ramdump_v2(plat_priv);
break;
default:
@@ -2774,6 +2780,7 @@ void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
cnss_unregister_ramdump_v2(plat_priv);
break;
default:
@@ -3183,6 +3190,7 @@ static ssize_t fs_ready_store(struct device *dev,
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
break;
default:
cnss_pr_err("Not supported for device ID 0x%lx\n",
@@ -3514,6 +3522,7 @@ static const struct platform_device_id cnss_platform_id_table[] = {
{ .name = "qca6390", .driver_data = QCA6390_DEVICE_ID, },
{ .name = "qca6490", .driver_data = QCA6490_DEVICE_ID, },
{ .name = "kiwi", .driver_data = KIWI_DEVICE_ID, },
+ { .name = "mango", .driver_data = MANGO_DEVICE_ID, },
{ },
};
@@ -3533,6 +3542,9 @@ static const struct of_device_id cnss_of_match_table[] = {
{
.compatible = "qcom,cnss-kiwi",
.data = (void *)&cnss_platform_id_table[4]},
+ {
+ .compatible = "qcom,cnss-mango",
+ .data = (void *)&cnss_platform_id_table[5]},
{ },
};
MODULE_DEVICE_TABLE(of, cnss_of_match_table);
@@ -3587,6 +3599,7 @@ static int cnss_probe(struct platform_device *plat_dev)
cnss_get_pm_domain_info(plat_priv);
cnss_get_wlaon_pwr_ctrl_info(plat_priv);
+ cnss_power_misc_params_init(plat_priv);
cnss_get_tcs_info(plat_priv);
cnss_get_cpr_info(plat_priv);
cnss_aop_mbox_init(plat_priv);
diff --git a/cnss2/main.h b/cnss2/main.h
index fd9a739..511d499 100644
--- a/cnss2/main.h
+++ b/cnss2/main.h
@@ -542,6 +542,8 @@ struct cnss_plat_data {
struct mbox_client mbox_client_data;
struct mbox_chan *mbox_chan;
const char *vreg_ol_cpr, *vreg_ipa;
+ const char **pdc_init_table, **vreg_pdc_map, **pmu_vreg_map;
+ int pdc_init_table_len, vreg_pdc_map_len, pmu_vreg_map_len;
bool adsp_pc_enabled;
u64 feature_list;
u8 charger_mode;
@@ -624,6 +626,11 @@ int cnss_get_tcs_info(struct cnss_plat_data *plat_priv);
unsigned int cnss_get_timeout(struct cnss_plat_data *plat_priv,
enum cnss_timeout_type);
int cnss_aop_mbox_init(struct cnss_plat_data *plat_priv);
+int cnss_aop_pdc_reconfig(struct cnss_plat_data *plat_priv);
+int cnss_aop_send_msg(struct cnss_plat_data *plat_priv, char *msg);
+void cnss_power_misc_params_init(struct cnss_plat_data *plat_priv);
+int cnss_aop_ol_cpr_cfg_setup(struct cnss_plat_data *plat_priv,
+ struct wlfw_pmu_cfg_v01 *fw_pmu_cfg);
int cnss_request_firmware_direct(struct cnss_plat_data *plat_priv,
const struct firmware **fw_entry,
const char *filename);
diff --git a/cnss2/pci.c b/cnss2/pci.c
index b46635e..c908c57 100644
--- a/cnss2/pci.c
+++ b/cnss2/pci.c
@@ -46,6 +46,7 @@
#define QCA6390_PATH_PREFIX "qca6390/"
#define QCA6490_PATH_PREFIX "qca6490/"
#define KIWI_PATH_PREFIX "kiwi/"
+#define MANGO_PATH_PREFIX "mango/"
#define DEFAULT_PHY_M3_FILE_NAME "m3.bin"
#define DEFAULT_PHY_UCODE_FILE_NAME "phy_ucode.elf"
#define PHY_UCODE_V2_FILE_NAME "phy_ucode20.elf"
@@ -550,6 +551,8 @@ static struct cnss_misc_reg syspm_reg_access_seq[] = {
{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
};
+static struct cnss_print_optimize print_optimize;
+
#define WCSS_REG_SIZE ARRAY_SIZE(wcss_reg_access_seq)
#define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq)
#define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq)
@@ -846,6 +849,8 @@ static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
return -EINVAL;
}
+ cnss_pr_vdbg("Bandwidth vote to %d, save %d\n", bw, save);
+
list_for_each_entry(bus_bw_info, &plat_priv->icc.list_head, list) {
ret = icc_set_bw(bus_bw_info->icc_path,
bus_bw_info->cfg_table[bw].avg_bw,
@@ -1150,6 +1155,7 @@ int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
break;
default:
return -EOPNOTSUPP;
@@ -1211,6 +1217,8 @@ void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv)
pci_priv->pci_link_down_ind = true;
spin_unlock_irqrestore(&pci_link_down_lock, flags);
+ /* Notify MHI about link down*/
+ mhi_report_error(pci_priv->mhi_ctrl);
if (pci_dev->device == QCA6174_DEVICE_ID)
disable_irq(pci_dev->irq);
@@ -1379,6 +1387,13 @@ static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
pbl_log_sram_start = KIWI_DEBUG_PBL_LOG_SRAM_START;
pbl_log_max_size = KIWI_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
sbl_log_max_size = KIWI_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
+ break;
+ case MANGO_DEVICE_ID:
+ pbl_bootstrap_status_reg = MANGO_PBL_BOOTSTRAP_STATUS;
+ pbl_log_sram_start = MANGO_DEBUG_PBL_LOG_SRAM_START;
+ pbl_log_max_size = MANGO_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
+ sbl_log_max_size = MANGO_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
+ break;
default:
return;
}
@@ -1669,7 +1684,11 @@ retry_mhi_suspend:
case CNSS_MHI_RESUME:
mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
if (pci_priv->drv_connected_last) {
- cnss_pci_prevent_l1(&pci_priv->pci_dev->dev);
+ ret = cnss_pci_prevent_l1(&pci_priv->pci_dev->dev);
+ if (ret) {
+ mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
+ break;
+ }
ret = cnss_mhi_pm_fast_resume(pci_priv, true);
cnss_pci_allow_l1(&pci_priv->pci_dev->dev);
} else {
@@ -2565,6 +2584,7 @@ skip_power_off:
}
clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
+ memset(&print_optimize, 0, sizeof(print_optimize));
out:
return ret;
@@ -2622,6 +2642,7 @@ int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
ret = cnss_qca6290_powerup(pci_priv);
break;
default:
@@ -2650,6 +2671,7 @@ int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
ret = cnss_qca6290_shutdown(pci_priv);
break;
default:
@@ -2678,6 +2700,7 @@ int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
cnss_qca6290_crash_shutdown(pci_priv);
break;
default:
@@ -2706,6 +2729,7 @@ int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
ret = cnss_qca6290_ramdump(pci_priv);
break;
default:
@@ -3589,6 +3613,7 @@ int cnss_pci_force_wake_request_sync(struct device *dev, int timeout_us)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
break;
default:
return 0;
@@ -3630,6 +3655,7 @@ int cnss_pci_force_wake_request(struct device *dev)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
break;
default:
return 0;
@@ -3665,6 +3691,7 @@ int cnss_pci_is_device_awake(struct device *dev)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
break;
default:
return 0;
@@ -3692,6 +3719,7 @@ int cnss_pci_force_wake_release(struct device *dev)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
break;
default:
return 0;
@@ -3908,6 +3936,7 @@ int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
phy_filename = DEFAULT_PHY_M3_FILE_NAME;
break;
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
switch (plat_priv->device_version.major_version) {
case FW_V2_NUMBER:
phy_filename = PHY_UCODE_V2_FILE_NAME;
@@ -4261,11 +4290,14 @@ int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
*user_base_data = msi_config->users[idx].base_vector
+ pci_priv->msi_ep_base_data;
*base_vector = msi_config->users[idx].base_vector;
+ /*Add only single print for each user*/
+ if (print_optimize.msi_log_chk[idx]++)
+ goto skip_print;
cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
user_name, *num_vectors, *user_base_data,
*base_vector);
-
+skip_print:
return 0;
}
}
@@ -4306,8 +4338,10 @@ void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
else
*msi_addr_high = 0;
- cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
- *msi_addr_low, *msi_addr_high);
+ /*Add only single print as the address is constant*/
+ if (!print_optimize.msi_addr_chk++)
+ cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
+ *msi_addr_low, *msi_addr_high);
}
EXPORT_SYMBOL(cnss_get_msi_address);
@@ -4369,6 +4403,7 @@ static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
pci_priv->dma_bit_mask = PCI_DMA_MASK_36_BIT;
break;
default:
@@ -4693,6 +4728,7 @@ static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv)
}
break;
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
offset = plat_priv->hang_data_addr_offset;
length = plat_priv->hang_event_data_len;
break;
@@ -4959,6 +4995,10 @@ void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
KIWI_PATH_PREFIX "%s", name);
break;
+ case MANGO_DEVICE_ID:
+ scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
+ MANGO_PATH_PREFIX "%s", name);
+ break;
default:
scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
break;
@@ -5001,6 +5041,7 @@ static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
break;
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
switch (plat_priv->device_version.major_version) {
case FW_V2_NUMBER:
cnss_pci_add_fw_prefix_name(pci_priv,
@@ -5592,6 +5633,7 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false);
timer_setup(&pci_priv->dev_rddm_timer,
cnss_dev_rddm_timeout_hdlr, 0);
@@ -5660,6 +5702,7 @@ static void cnss_pci_remove(struct pci_dev *pci_dev)
case QCA6390_DEVICE_ID:
case QCA6490_DEVICE_ID:
case KIWI_DEVICE_ID:
+ case MANGO_DEVICE_ID:
cnss_pci_wake_gpio_deinit(pci_priv);
del_timer(&pci_priv->boot_debug_timer);
del_timer(&pci_priv->dev_rddm_timer);
@@ -5691,6 +5734,7 @@ static const struct pci_device_id cnss_pci_id_table[] = {
{ QCA6390_VENDOR_ID, QCA6390_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
{ QCA6490_VENDOR_ID, QCA6490_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
{ KIWI_VENDOR_ID, KIWI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
+ { MANGO_VENDOR_ID, MANGO_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
diff --git a/cnss2/pci.h b/cnss2/pci.h
index 97ded45..7cc1b58 100644
--- a/cnss2/pci.h
+++ b/cnss2/pci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CNSS_PCI_H
@@ -36,6 +36,7 @@
#define PCI_LINK_DOWN 0
#define LINK_TRAINING_RETRY_MAX_TIMES 3
#define LINK_TRAINING_RETRY_DELAY_MS 500
+#define MSI_USERS 4
enum cnss_mhi_state {
CNSS_MHI_INIT,
@@ -67,6 +68,7 @@ enum cnss_pci_reg_dev_mask {
REG_MASK_QCA6390,
REG_MASK_QCA6490,
REG_MASK_KIWI,
+ REG_MASK_MANGO,
};
struct cnss_msi_user {
@@ -107,6 +109,11 @@ struct cnss_pm_stats {
u64 runtime_put_timestamp_id[RTPM_ID_MAX];
};
+struct cnss_print_optimize {
+ int msi_log_chk[MSI_USERS];
+ int msi_addr_chk;
+};
+
struct cnss_pci_data {
struct pci_dev *pci_dev;
struct cnss_plat_data *plat_priv;
diff --git a/cnss2/pci_platform_google.c b/cnss2/pci_platform_google.c
index 0585ceb..4267991 100644
--- a/cnss2/pci_platform_google.c
+++ b/cnss2/pci_platform_google.c
@@ -23,7 +23,7 @@ static DEFINE_SPINLOCK(pci_link_down_lock);
static struct cnss_msi_config msi_config = {
.total_vectors = 16,
- .total_users = 4,
+ .total_users = MSI_USERS,
.users = (struct cnss_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
diff --git a/cnss2/pci_qcom.c b/cnss2/pci_qcom.c
index 04390d1..1350f81 100644
--- a/cnss2/pci_qcom.c
+++ b/cnss2/pci_qcom.c
@@ -6,7 +6,7 @@
static struct cnss_msi_config msi_config = {
.total_vectors = 32,
- .total_users = 4,
+ .total_users = MSI_USERS,
.users = (struct cnss_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
@@ -301,7 +301,8 @@ int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv,
static int cnss_set_pci_link_status(struct cnss_pci_data *pci_priv,
enum pci_link_status status)
{
- u16 link_speed, link_width;
+ u16 link_speed, link_width = pci_priv->def_link_width;
+ u16 one_lane = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
int ret;
cnss_pr_vdbg("Set PCI link status to: %u\n", status);
@@ -309,16 +310,17 @@ static int cnss_set_pci_link_status(struct cnss_pci_data *pci_priv,
switch (status) {
case PCI_GEN1:
link_speed = PCI_EXP_LNKSTA_CLS_2_5GB;
- link_width = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
+ if (!link_width)
+ link_width = one_lane;
break;
case PCI_GEN2:
link_speed = PCI_EXP_LNKSTA_CLS_5_0GB;
- link_width = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
+ if (!link_width)
+ link_width = one_lane;
break;
case PCI_DEF:
link_speed = pci_priv->def_link_speed;
- link_width = pci_priv->def_link_width;
- if (!link_speed && !link_width) {
+ if (!link_speed || !link_width) {
cnss_pr_err("PCI link speed or width is not valid\n");
return -EINVAL;
}
diff --git a/cnss2/power.c b/cnss2/power.c
index 6059abc..5eb769b 100644
--- a/cnss2/power.c
+++ b/cnss2/power.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/clk.h>
@@ -81,32 +81,47 @@ static struct cnss_clk_cfg cnss_clk_list[] = {
#define BT_CXMX_VOLTAGE_MV 950
#define CNSS_MBOX_MSG_MAX_LEN 64
#define CNSS_MBOX_TIMEOUT_MS 1000
+/* Platform HW config */
+#define CNSS_PMIC_VOLTAGE_STEP 4
+#define CNSS_PMIC_AUTO_HEADROOM 16
+#define CNSS_IR_DROP_WAKE 30
+#define CNSS_IR_DROP_SLEEP 10
/**
- * enum cnss_vreg_param: Voltage regulator TCS param
- * @CNSS_VREG_VOLTAGE: Provides voltage level to be configured in TCS
+ * enum cnss_aop_vreg_param: Voltage regulator TCS param
+ * @CNSS_VREG_VOLTAGE: Provides voltage level in mV to be configured in TCS
* @CNSS_VREG_MODE: Regulator mode
- * @CNSS_VREG_TCS_ENABLE: Set Voltage regulator enable config in TCS
+ * @CNSS_VREG_TCS_ENABLE: Set bool Voltage regulator enable config in TCS.
*/
-enum cnss_vreg_param {
+enum cnss_aop_vreg_param {
CNSS_VREG_VOLTAGE,
CNSS_VREG_MODE,
CNSS_VREG_ENABLE,
+ CNSS_VREG_PARAM_MAX
+};
+
+/** enum cnss_aop_vreg_param_mode: Voltage modes supported by AOP*/
+enum cnss_aop_vreg_param_mode {
+ CNSS_VREG_RET_MODE = 3,
+ CNSS_VREG_LPM_MODE = 4,
+ CNSS_VREG_AUTO_MODE = 6,
+ CNSS_VREG_NPM_MODE = 7,
+ CNSS_VREG_MODE_MAX
};
/**
- * enum cnss_tcs_seq: TCS sequence ID for trigger
- * CNSS_TCS_UP_SEQ: TCS Sequence based on up trigger / Wake TCS
- * CNSS_TCS_DOWN_SEQ: TCS Sequence based on down trigger / Sleep TCS
- * CNSS_TCS_ALL_SEQ: Update for both up and down triggers
+ * enum cnss_aop_tcs_seq: TCS sequence ID for trigger
+ * @CNSS_TCS_UP_SEQ: TCS Sequence based on up trigger / Wake TCS
+ * @CNSS_TCS_DOWN_SEQ: TCS Sequence based on down trigger / Sleep TCS
+ * @CNSS_TCS_ENABLE_SEQ: Enable this TCS seq entry
*/
-enum cnss_tcs_seq {
+enum cnss_aop_tcs_seq_param {
CNSS_TCS_UP_SEQ,
CNSS_TCS_DOWN_SEQ,
- CNSS_TCS_ALL_SEQ,
+ CNSS_TCS_ENABLE_SEQ,
+ CNSS_TCS_SEQ_MAX
};
-
static int cnss_get_vreg_single(struct cnss_plat_data *plat_priv,
struct cnss_vreg_info *vreg)
{
@@ -731,7 +746,7 @@ int cnss_get_pinctrl(struct cnss_plat_data *plat_priv)
}
}
- if (of_find_property(dev->of_node, HOST_SOL_GPIO, NULL) &&
+ if (of_find_property(dev->of_node, HOST_SOL_GPIO, NULL) ||
of_find_property(dev->of_node, DEV_SOL_GPIO, NULL)) {
pinctrl_info->sol_default =
pinctrl_lookup_state(pinctrl_info->pinctrl,
@@ -1214,34 +1229,20 @@ out:
return ret;
}
+#if IS_ENABLED(CONFIG_MSM_QMP)
int cnss_aop_mbox_init(struct cnss_plat_data *plat_priv)
{
struct mbox_client *mbox = &plat_priv->mbox_client_data;
struct mbox_chan *chan;
- int ret = 0;
+ int ret;
+
+ plat_priv->mbox_chan = NULL;
mbox->dev = &plat_priv->plat_dev->dev;
mbox->tx_block = true;
mbox->tx_tout = CNSS_MBOX_TIMEOUT_MS;
mbox->knows_txdone = false;
- plat_priv->mbox_chan = NULL;
-
- ret = of_property_read_string(plat_priv->plat_dev->dev.of_node,
- "qcom,vreg_ol_cpr",
- &plat_priv->vreg_ol_cpr);
- if (ret)
- cnss_pr_dbg("Vreg for OL CPR not configured\n");
-
- ret = of_property_read_string(plat_priv->plat_dev->dev.of_node,
- "qcom,vreg_ipa",
- &plat_priv->vreg_ipa);
- if (ret)
- cnss_pr_dbg("Volt regulator for Int Power Amp not configured\n");
-
- if (!plat_priv->vreg_ol_cpr && !plat_priv->vreg_ipa)
- return 0;
-
chan = mbox_request_channel(mbox, 0);
if (IS_ERR(chan)) {
cnss_pr_err("Failed to get mbox channel\n");
@@ -1250,29 +1251,32 @@ int cnss_aop_mbox_init(struct cnss_plat_data *plat_priv)
plat_priv->mbox_chan = chan;
cnss_pr_dbg("Mbox channel initialized\n");
+ ret = cnss_aop_pdc_reconfig(plat_priv);
+ if (ret)
+ cnss_pr_err("Failed to reconfig WLAN PDC, err = %d\n", ret);
return 0;
}
-#if IS_ENABLED(CONFIG_MSM_QMP)
-static int cnss_aop_set_vreg_param(struct cnss_plat_data *plat_priv,
- const char *vreg_name,
- enum cnss_vreg_param param,
- enum cnss_tcs_seq seq, int val)
+/**
+ * cnss_aop_send_msg: Sends json message to AOP using QMP
+ * @plat_priv: Pointer to cnss platform data
+ * @msg: String in json format
+ *
+ * AOP accepts JSON message to configure WLAN resources. Format as follows:
+ * To send VReg config: {class: wlan_pdc, ss: <pdc_name>,
+ * res: <VReg_name>.<param>, <seq_param>: <value>}
+ * To send PDC Config: {class: wlan_pdc, ss: <pdc_name>, res: pdc,
+ * enable: <Value>}
+ * QMP returns timeout error if format not correct or AOP operation fails.
+ *
+ * Return: 0 for success
+ */
+int cnss_aop_send_msg(struct cnss_plat_data *plat_priv, char *mbox_msg)
{
struct qmp_pkt pkt;
- char mbox_msg[CNSS_MBOX_MSG_MAX_LEN];
- static const char * const vreg_param_str[] = {"v", "m", "e"};
- static const char *const tcs_seq_str[] = {"upval", "dwnval", "enable"};
int ret = 0;
- if (param > CNSS_VREG_ENABLE || seq > CNSS_TCS_ALL_SEQ || !vreg_name)
- return -EINVAL;
-
- snprintf(mbox_msg, CNSS_MBOX_MSG_MAX_LEN,
- "{class: wlan_pdc, res: %s.%s, %s: %d}", vreg_name,
- vreg_param_str[param], tcs_seq_str[seq], val);
-
cnss_pr_dbg("Sending AOP Mbox msg: %s\n", mbox_msg);
pkt.size = CNSS_MBOX_MSG_MAX_LEN;
pkt.data = mbox_msg;
@@ -1285,16 +1289,303 @@ static int cnss_aop_set_vreg_param(struct cnss_plat_data *plat_priv,
return ret;
}
+
+/* cnss_pdc_reconfig: Send PDC init table as configured in DT for wlan device */
+int cnss_aop_pdc_reconfig(struct cnss_plat_data *plat_priv)
+{
+ u32 i;
+ int ret;
+
+ if (plat_priv->pdc_init_table_len <= 0 || !plat_priv->pdc_init_table)
+ return 0;
+
+ cnss_pr_dbg("Setting PDC defaults for device ID: %d\n",
+ plat_priv->device_id);
+ for (i = 0; i < plat_priv->pdc_init_table_len; i++) {
+ ret = cnss_aop_send_msg(plat_priv,
+ (char *)plat_priv->pdc_init_table[i]);
+ if (ret < 0)
+ break;
+ }
+ return ret;
+}
+
+/* cnss_aop_pdc_name_str: Get PDC name corresponding to VReg from DT Mapiping */
+static const char *cnss_aop_pdc_name_str(struct cnss_plat_data *plat_priv,
+ const char *vreg_name)
+{
+ u32 i;
+ static const char * const aop_pdc_ss_str[] = {"rf", "bb"};
+ const char *pdc = aop_pdc_ss_str[0], *vreg_map_name;
+
+ if (plat_priv->vreg_pdc_map_len <= 0 || !plat_priv->vreg_pdc_map)
+ goto end;
+
+ for (i = 0; i < plat_priv->vreg_pdc_map_len; i++) {
+ vreg_map_name = plat_priv->vreg_pdc_map[i];
+ if (strnstr(vreg_map_name, vreg_name, strlen(vreg_map_name))) {
+ pdc = plat_priv->vreg_pdc_map[i + 1];
+ break;
+ }
+ }
+end:
+ cnss_pr_dbg("%s mapped to %s\n", vreg_name, pdc);
+ return pdc;
+}
+
+static int cnss_aop_set_vreg_param(struct cnss_plat_data *plat_priv,
+ const char *vreg_name,
+ enum cnss_aop_vreg_param param,
+ enum cnss_aop_tcs_seq_param seq_param,
+ int val)
+{
+ char msg[CNSS_MBOX_MSG_MAX_LEN];
+ static const char * const aop_vreg_param_str[] = {
+ [CNSS_VREG_VOLTAGE] = "v", [CNSS_VREG_MODE] = "m",
+ [CNSS_VREG_ENABLE] = "e",};
+ static const char * const aop_tcs_seq_str[] = {
+ [CNSS_TCS_UP_SEQ] = "upval", [CNSS_TCS_DOWN_SEQ] = "dwnval",
+ [CNSS_TCS_ENABLE_SEQ] = "enable",};
+
+ if (param >= CNSS_VREG_PARAM_MAX || seq_param >= CNSS_TCS_SEQ_MAX ||
+ !vreg_name)
+ return -EINVAL;
+
+ snprintf(msg, CNSS_MBOX_MSG_MAX_LEN,
+ "{class: wlan_pdc, ss: %s, res: %s.%s, %s: %d}",
+ cnss_aop_pdc_name_str(plat_priv, vreg_name),
+ vreg_name, aop_vreg_param_str[param],
+ aop_tcs_seq_str[seq_param], val);
+
+ return cnss_aop_send_msg(plat_priv, msg);
+}
+
+int cnss_aop_ol_cpr_cfg_setup(struct cnss_plat_data *plat_priv,
+ struct wlfw_pmu_cfg_v01 *fw_pmu_cfg)
+{
+ const char *pmu_pin, *vreg;
+ struct wlfw_pmu_param_v01 *fw_pmu_param;
+ u32 fw_pmu_param_len, i, j, plat_vreg_param_len = 0;
+ int ret = 0;
+ struct platform_vreg_param {
+ char vreg[MAX_PROP_SIZE];
+ u32 wake_volt;
+ u32 sleep_volt;
+ } plat_vreg_param[QMI_WLFW_PMU_PARAMS_MAX_V01] = {0};
+ static bool config_done;
+
+ if (config_done)
+ return 0;
+
+ if (plat_priv->pmu_vreg_map_len <= 0 || !plat_priv->mbox_chan ||
+ !plat_priv->pmu_vreg_map) {
+ cnss_pr_dbg("Mbox channel / PMU VReg Map not configured\n");
+ goto end;
+ }
+ if (!fw_pmu_cfg)
+ return -EINVAL;
+
+ fw_pmu_param = fw_pmu_cfg->pmu_param;
+ fw_pmu_param_len = fw_pmu_cfg->pmu_param_len;
+ /* Get PMU Pin name to Platfom Vreg Mapping */
+ for (i = 0; i < fw_pmu_param_len; i++) {
+ cnss_pr_dbg("FW_PMU Data: %s %d %d %d %d\n",
+ fw_pmu_param[i].pin_name,
+ fw_pmu_param[i].wake_volt_valid,
+ fw_pmu_param[i].wake_volt,
+ fw_pmu_param[i].sleep_volt_valid,
+ fw_pmu_param[i].sleep_volt);
+
+ if (!fw_pmu_param[i].wake_volt_valid &&
+ !fw_pmu_param[i].sleep_volt_valid)
+ continue;
+
+ vreg = NULL;
+ for (j = 0; j < plat_priv->pmu_vreg_map_len; j += 2) {
+ pmu_pin = plat_priv->pmu_vreg_map[j];
+ if (strnstr(pmu_pin, fw_pmu_param[i].pin_name,
+ strlen(pmu_pin))) {
+ vreg = plat_priv->pmu_vreg_map[j + 1];
+ break;
+ }
+ }
+ if (!vreg) {
+ cnss_pr_err("No VREG mapping for %s\n",
+ fw_pmu_param[i].pin_name);
+ continue;
+ } else {
+ cnss_pr_dbg("%s mapped to %s\n",
+ fw_pmu_param[i].pin_name, vreg);
+ }
+ for (j = 0; j < QMI_WLFW_PMU_PARAMS_MAX_V01; j++) {
+ u32 wake_volt = 0, sleep_volt = 0;
+
+ if (plat_vreg_param[j].vreg[0] == '\0')
+ strlcpy(plat_vreg_param[j].vreg, vreg,
+ sizeof(plat_vreg_param[j].vreg));
+ else if (!strnstr(plat_vreg_param[j].vreg, vreg,
+ strlen(plat_vreg_param[j].vreg)))
+ continue;
+
+ if (fw_pmu_param[i].wake_volt_valid)
+ wake_volt = roundup(fw_pmu_param[i].wake_volt,
+ CNSS_PMIC_VOLTAGE_STEP) -
+ CNSS_PMIC_AUTO_HEADROOM +
+ CNSS_IR_DROP_WAKE;
+ if (fw_pmu_param[i].sleep_volt_valid)
+ sleep_volt = roundup(fw_pmu_param[i].sleep_volt,
+ CNSS_PMIC_VOLTAGE_STEP) -
+ CNSS_PMIC_AUTO_HEADROOM +
+ CNSS_IR_DROP_SLEEP;
+
+ plat_vreg_param[j].wake_volt =
+ (wake_volt > plat_vreg_param[j].wake_volt ?
+ wake_volt : plat_vreg_param[j].wake_volt);
+ plat_vreg_param[j].sleep_volt =
+ (sleep_volt > plat_vreg_param[j].sleep_volt ?
+ sleep_volt : plat_vreg_param[j].sleep_volt);
+
+ plat_vreg_param_len = (plat_vreg_param_len > j ?
+ plat_vreg_param_len : j);
+ cnss_pr_dbg("Plat VReg Data: %s %d %d\n",
+ plat_vreg_param[j].vreg,
+ plat_vreg_param[j].wake_volt,
+ plat_vreg_param[j].sleep_volt);
+ break;
+ }
+ }
+
+ for (i = 0; i <= plat_vreg_param_len; i++) {
+ if (plat_vreg_param[i].wake_volt > 0) {
+ ret =
+ cnss_aop_set_vreg_param(plat_priv,
+ plat_vreg_param[i].vreg,
+ CNSS_VREG_VOLTAGE,
+ CNSS_TCS_UP_SEQ,
+ plat_vreg_param[i].wake_volt);
+ }
+ if (plat_vreg_param[i].sleep_volt > 0) {
+ ret =
+ cnss_aop_set_vreg_param(plat_priv,
+ plat_vreg_param[i].vreg,
+ CNSS_VREG_VOLTAGE,
+ CNSS_TCS_DOWN_SEQ,
+ plat_vreg_param[i].sleep_volt);
+ }
+ if (ret < 0)
+ break;
+ }
+end:
+ config_done = true;
+ return ret;
+}
#else
+int cnss_aop_mbox_init(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+int cnss_aop_send_msg(struct cnss_plat_data *plat_priv, char *msg)
+{
+ return 0;
+}
+
+int cnss_aop_pdc_reconfig(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
static int cnss_aop_set_vreg_param(struct cnss_plat_data *plat_priv,
const char *vreg_name,
- enum cnss_vreg_param param,
- enum cnss_tcs_seq seq, int val)
+ enum cnss_aop_vreg_param param,
+ enum cnss_aop_tcs_seq_param seq_param,
+ int val)
+{
+ return 0;
+}
+
+int cnss_aop_ol_cpr_cfg_setup(struct cnss_plat_data *plat_priv,
+ struct wlfw_pmu_cfg_v01 *fw_pmu_cfg)
{
return 0;
}
#endif
+void cnss_power_misc_params_init(struct cnss_plat_data *plat_priv)
+{
+ struct device *dev = &plat_priv->plat_dev->dev;
+ int ret;
+
+ /* common DT Entries */
+ plat_priv->pdc_init_table_len =
+ of_property_count_strings(dev->of_node,
+ "qcom,pdc_init_table");
+ if (plat_priv->pdc_init_table_len > 0) {
+ plat_priv->pdc_init_table =
+ kcalloc(plat_priv->pdc_init_table_len,
+ sizeof(char *), GFP_KERNEL);
+ ret =
+ of_property_read_string_array(dev->of_node,
+ "qcom,pdc_init_table",
+ plat_priv->pdc_init_table,
+ plat_priv->pdc_init_table_len);
+ if (ret < 0)
+ cnss_pr_err("Failed to get PDC Init Table\n");
+ } else {
+ cnss_pr_dbg("PDC Init Table not configured\n");
+ }
+
+ plat_priv->vreg_pdc_map_len =
+ of_property_count_strings(dev->of_node,
+ "qcom,vreg_pdc_map");
+ if (plat_priv->vreg_pdc_map_len > 0) {
+ plat_priv->vreg_pdc_map =
+ kcalloc(plat_priv->vreg_pdc_map_len,
+ sizeof(char *), GFP_KERNEL);
+ ret =
+ of_property_read_string_array(dev->of_node,
+ "qcom,vreg_pdc_map",
+ plat_priv->vreg_pdc_map,
+ plat_priv->vreg_pdc_map_len);
+ if (ret < 0)
+ cnss_pr_err("Failed to get VReg PDC Mapping\n");
+ } else {
+ cnss_pr_dbg("VReg PDC Mapping not configured\n");
+ }
+
+ plat_priv->pmu_vreg_map_len =
+ of_property_count_strings(dev->of_node,
+ "qcom,pmu_vreg_map");
+ if (plat_priv->pmu_vreg_map_len > 0) {
+ plat_priv->pmu_vreg_map = kcalloc(plat_priv->pmu_vreg_map_len,
+ sizeof(char *), GFP_KERNEL);
+ ret =
+ of_property_read_string_array(dev->of_node, "qcom,pmu_vreg_map",
+ plat_priv->pmu_vreg_map,
+ plat_priv->pmu_vreg_map_len);
+ if (ret < 0)
+ cnss_pr_err("Fail to get PMU VReg Mapping\n");
+ } else {
+ cnss_pr_dbg("PMU VReg Mapping not configured\n");
+ }
+
+ /* Device DT Specific */
+ if (plat_priv->device_id == QCA6390_DEVICE_ID ||
+ plat_priv->device_id == QCA6490_DEVICE_ID) {
+ ret = of_property_read_string(dev->of_node,
+ "qcom,vreg_ol_cpr",
+ &plat_priv->vreg_ol_cpr);
+ if (ret)
+ cnss_pr_dbg("VReg for QCA6490 OL CPR not configured\n");
+
+ ret = of_property_read_string(dev->of_node,
+ "qcom,vreg_ipa",
+ &plat_priv->vreg_ipa);
+ if (ret)
+ cnss_pr_dbg("VReg for QCA6490 Int Power Amp not configured\n");
+ }
+}
+
int cnss_update_cpr_info(struct cnss_plat_data *plat_priv)
{
struct cnss_cpr_info *cpr_info = &plat_priv->cpr_info;
@@ -1308,6 +1599,9 @@ int cnss_update_cpr_info(struct cnss_plat_data *plat_priv)
return -EINVAL;
}
+ if (plat_priv->device_id != QCA6490_DEVICE_ID)
+ return -EINVAL;
+
if (!plat_priv->vreg_ol_cpr || !plat_priv->mbox_chan) {
cnss_pr_dbg("Mbox channel / OL CPR Vreg not configured\n");
} else {
diff --git a/cnss2/qcom_ramdump.c b/cnss2/qcom_ramdump.c
index 6379c0f..2478e9d 100644
--- a/cnss2/qcom_ramdump.c
+++ b/cnss2/qcom_ramdump.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
@@ -47,6 +48,23 @@ do { \
#define set_phdr_property(arg, class, member, value) \
set_xhdr_property(phdr, arg, class, member, value)
+#if !IS_ENABLED(CONFIG_WCN_GOOGLE)
+#define RAMDUMP_NUM_DEVICES 256
+#define RAMDUMP_NAME "ramdump"
+
+static struct class *ramdump_class;
+static dev_t ramdump_dev;
+static DEFINE_MUTEX(rd_minor_mutex);
+static DEFINE_IDA(rd_minor_id);
+static bool ramdump_devnode_inited;
+
+struct ramdump_device {
+ char name[256];
+ struct cdev cdev;
+ struct device *dev;
+};
+#endif
+
struct qcom_ramdump_desc {
void *data;
struct completion dump_done;
@@ -268,6 +286,106 @@ int qcom_fw_elf_dump(struct firmware *fw, struct device *dev)
return 0;
}
EXPORT_SYMBOL(qcom_fw_elf_dump);
+
+static int ramdump_devnode_init(void)
+{
+ int ret;
+
+ ramdump_class = class_create(THIS_MODULE, RAMDUMP_NAME);
+ ret = alloc_chrdev_region(&ramdump_dev, 0, RAMDUMP_NUM_DEVICES,
+ RAMDUMP_NAME);
+ if (ret) {
+ pr_err("%s: unable to allocate major\n", __func__);
+ return ret;
+ }
+
+ ramdump_devnode_inited = true;
+
+ return 0;
+}
+
+void *qcom_create_ramdump_device(const char *dev_name, struct device *parent)
+{
+ int ret, minor;
+ struct ramdump_device *rd_dev;
+
+ if (!dev_name) {
+ pr_err("%s: Invalid device name.\n", __func__);
+ return NULL;
+ }
+
+ mutex_lock(&rd_minor_mutex);
+ if (!ramdump_devnode_inited) {
+ ret = ramdump_devnode_init();
+ if (ret) {
+ mutex_unlock(&rd_minor_mutex);
+ return ERR_PTR(ret);
+ }
+ }
+ mutex_unlock(&rd_minor_mutex);
+
+ rd_dev = kzalloc(sizeof(struct ramdump_device), GFP_KERNEL);
+
+ if (!rd_dev)
+ return NULL;
+
+ /* get a minor number */
+ minor = ida_simple_get(&rd_minor_id, 0, RAMDUMP_NUM_DEVICES,
+ GFP_KERNEL);
+ if (minor < 0) {
+ pr_err("%s: No more minor numbers left! rc:%d\n", __func__,
+ minor);
+ ret = -ENODEV;
+ goto fail_out_of_minors;
+ }
+
+ snprintf(rd_dev->name, ARRAY_SIZE(rd_dev->name), "%s",
+ dev_name);
+
+ rd_dev->dev = device_create(ramdump_class, parent,
+ MKDEV(MAJOR(ramdump_dev), minor),
+ rd_dev, rd_dev->name);
+ if (IS_ERR(rd_dev->dev)) {
+ ret = PTR_ERR(rd_dev->dev);
+ pr_err("%s: device_create failed for %s (%d)\n", __func__,
+ dev_name, ret);
+ goto fail_return_minor;
+ }
+
+ cdev_init(&rd_dev->cdev, NULL);
+ ret = cdev_add(&rd_dev->cdev, MKDEV(MAJOR(ramdump_dev), minor), 1);
+ if (ret) {
+ pr_err("%s: cdev_add failed for %s (%d)\n", __func__,
+ dev_name, ret);
+ goto fail_cdev_add;
+ }
+
+ return (void *)rd_dev->dev;
+
+fail_cdev_add:
+ device_unregister(rd_dev->dev);
+fail_return_minor:
+ ida_simple_remove(&rd_minor_id, minor);
+fail_out_of_minors:
+ kfree(rd_dev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(qcom_create_ramdump_device);
+
+void qcom_destroy_ramdump_device(void *dev)
+{
+ struct ramdump_device *rd_dev = container_of(dev, struct ramdump_device, dev);
+ int minor = MINOR(rd_dev->cdev.dev);
+
+ if (IS_ERR_OR_NULL(rd_dev))
+ return;
+
+ cdev_del(&rd_dev->cdev);
+ device_unregister(rd_dev->dev);
+ ida_simple_remove(&rd_minor_id, minor);
+ kfree(rd_dev);
+}
+EXPORT_SYMBOL(qcom_destroy_ramdump_device);
#endif
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Ramdump driver");
diff --git a/cnss2/qmi.c b/cnss2/qmi.c
index d1fa4b2..2d152bf 100644
--- a/cnss2/qmi.c
+++ b/cnss2/qmi.c
@@ -32,6 +32,10 @@
#define HDS_FILE_NAME "hds.bin"
#define CHIP_ID_GF_MASK 0x10
+#define CONN_ROAM_FILE_NAME "wlan-connection-roaming"
+#define INI_EXT ".ini"
+#define INI_FILE_NAME_LEN 100
+
#define QDSS_TRACE_CONFIG_FILE "qdss_trace_config"
#ifdef CONFIG_CNSS2_DEBUG
#define QDSS_DEBUG_FILE_STR "debug_"
@@ -49,6 +53,9 @@
#define QMI_WLFW_MAX_RECV_BUF_SIZE SZ_8K
#define IMSPRIVATE_SERVICE_MAX_MSG_LEN SZ_8K
#define DMS_QMI_MAX_MSG_LEN SZ_256
+#define MAX_SHADOW_REG_RESERVED 2
+#define MAX_NUM_SHADOW_REG_V3 (QMI_WLFW_MAX_NUM_SHADOW_REG_V3_USAGE_V01 - \
+ MAX_SHADOW_REG_RESERVED)
#define QMI_WLFW_MAC_READY_TIMEOUT_MS 50
#define QMI_WLFW_MAC_READY_MAX_RETRY 200
@@ -189,7 +196,8 @@ qmi_registered:
static void cnss_wlfw_host_cap_parse_mlo(struct cnss_plat_data *plat_priv,
struct wlfw_host_cap_req_msg_v01 *req)
{
- if (plat_priv->device_id == KIWI_DEVICE_ID) {
+ if (plat_priv->device_id == KIWI_DEVICE_ID ||
+ plat_priv->device_id == MANGO_DEVICE_ID) {
req->mlo_capable_valid = 1;
req->mlo_capable = 1;
req->mlo_chip_id_valid = 1;
@@ -535,6 +543,9 @@ int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv)
if (resp->hwid_bitmap_valid)
plat_priv->hwid_bitmap = resp->hwid_bitmap;
+ if (resp->ol_cpr_cfg_valid)
+ cnss_aop_ol_cpr_cfg_setup(plat_priv, &resp->ol_cpr_cfg);
+
cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, otp_version: 0x%x\n",
plat_priv->chip_info.chip_id,
plat_priv->chip_info.chip_family,
@@ -636,6 +647,146 @@ static int cnss_get_bdf_file_name(struct cnss_plat_data *plat_priv,
return ret;
}
+int cnss_wlfw_ini_file_send_sync(struct cnss_plat_data *plat_priv,
+ enum wlfw_ini_file_type_v01 file_type)
+{
+ struct wlfw_ini_file_download_req_msg_v01 *req;
+ struct wlfw_ini_file_download_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+ const struct firmware *fw;
+ char filename[INI_FILE_NAME_LEN] = {0};
+ char tmp_filename[INI_FILE_NAME_LEN] = {0};
+ const u8 *temp;
+ unsigned int remaining;
+ bool backup_supported = false;
+
+ cnss_pr_info("INI File %u download\n", file_type);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ switch (file_type) {
+ case WLFW_CONN_ROAM_INI_V01:
+ snprintf(tmp_filename, sizeof(tmp_filename),
+ CONN_ROAM_FILE_NAME);
+ backup_supported = true;
+ break;
+ default:
+ cnss_pr_err("Invalid file type: %u\n", file_type);
+ ret = -EINVAL;
+ goto err_req_fw;
+ }
+
+ snprintf(filename, sizeof(filename), "%s%s", tmp_filename, INI_EXT);
+ /* Fetch the file */
+ ret = firmware_request_nowarn(&fw, filename, &plat_priv->plat_dev->dev);
+ if (ret) {
+ cnss_pr_err("Failed to get INI file %s (%d), Backup file: %s",
+ filename, ret,
+ backup_supported ? "Supported" : "Not Supported");
+
+ if (!backup_supported)
+ goto err_req_fw;
+
+ snprintf(filename, sizeof(filename),
+ "%s-%s%s", tmp_filename, "backup", INI_EXT);
+
+ ret = firmware_request_nowarn(&fw, filename,
+ &plat_priv->plat_dev->dev);
+ if (ret) {
+ cnss_pr_err("Failed to get INI file %s (%d)", filename,
+ ret);
+ goto err_req_fw;
+ }
+ }
+
+ temp = fw->data;
+ remaining = fw->size;
+
+ cnss_pr_dbg("Downloading INI file: %s, size: %u\n", filename,
+ remaining);
+
+ while (remaining) {
+ req->file_type_valid = 1;
+ req->file_type = file_type;
+ req->total_size_valid = 1;
+ req->total_size = remaining;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->end_valid = 1;
+
+ if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ memcpy(req->data, temp, req->data_len);
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_ini_file_download_resp_msg_v01_ei,
+ resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for INI file download request, err: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = qmi_send_request
+ (&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_INI_FILE_DOWNLOAD_REQ_V01,
+ WLFW_INI_FILE_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_ini_file_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send INI File download request, err: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of INI File download request, err: %d\n",
+ ret);
+ goto err;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("INI file download request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto err;
+ }
+
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ }
+
+ release_firmware(fw);
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+err:
+ release_firmware(fw);
+err_req_fw:
+ kfree(req);
+ kfree(resp);
+
+ return ret;
+}
+
int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
u32 bdf_type)
{
@@ -1032,7 +1183,8 @@ void cnss_get_qdss_cfg_filename(struct cnss_plat_data *plat_priv,
char filename_tmp[MAX_FIRMWARE_NAME_LEN];
char *debug_str = QDSS_DEBUG_FILE_STR;
- if (plat_priv->device_id == KIWI_DEVICE_ID)
+ if (plat_priv->device_id == KIWI_DEVICE_ID ||
+ plat_priv->device_id == MANGO_DEVICE_ID)
debug_str = "";
if (plat_priv->device_version.major_version == FW_V2_NUMBER)
@@ -1384,16 +1536,31 @@ int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
}
- req->shadow_reg_v2_valid = 1;
- if (config->num_shadow_reg_v2_cfg >
- QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01)
- req->shadow_reg_v2_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01;
- else
- req->shadow_reg_v2_len = config->num_shadow_reg_v2_cfg;
+ if (plat_priv->device_id != KIWI_DEVICE_ID) {
+ req->shadow_reg_v2_valid = 1;
+ if (config->num_shadow_reg_v2_cfg >
+ QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01)
+ req->shadow_reg_v2_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01;
+ else
+ req->shadow_reg_v2_len = config->num_shadow_reg_v2_cfg;
- memcpy(req->shadow_reg_v2, config->shadow_reg_v2_cfg,
- sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01)
- * req->shadow_reg_v2_len);
+ memcpy(req->shadow_reg_v2, config->shadow_reg_v2_cfg,
+ sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01)
+ * req->shadow_reg_v2_len);
+ } else {
+ cnss_pr_dbg("Shadow reg v3 len: %d\n",
+ config->num_shadow_reg_v3_cfg);
+ req->shadow_reg_v3_valid = 1;
+ if (config->num_shadow_reg_v3_cfg >
+ MAX_NUM_SHADOW_REG_V3)
+ req->shadow_reg_v3_len = MAX_NUM_SHADOW_REG_V3;
+ else
+ req->shadow_reg_v3_len = config->num_shadow_reg_v3_cfg;
+
+ memcpy(req->shadow_reg_v3, config->shadow_reg_v3_cfg,
+ sizeof(struct wlfw_shadow_reg_v3_cfg_s_v01)
+ * req->shadow_reg_v3_len);
+ }
ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
wlfw_wlan_cfg_resp_msg_v01_ei, resp);
diff --git a/cnss2/qmi.h b/cnss2/qmi.h
index dc07dca..1fa4bb2 100644
--- a/cnss2/qmi.h
+++ b/cnss2/qmi.h
@@ -84,6 +84,8 @@ int wlfw_qdss_trace_start(struct cnss_plat_data *plat_priv);
int wlfw_qdss_trace_stop(struct cnss_plat_data *plat_priv, unsigned long long option);
int cnss_wlfw_cal_report_req_send_sync(struct cnss_plat_data *plat_priv,
u32 cal_file_download_size);
+int cnss_wlfw_ini_file_send_sync(struct cnss_plat_data *plat_priv,
+ enum wlfw_ini_file_type_v01 file_type);
#else
#define QMI_WLFW_TIMEOUT_MS 10000
@@ -299,6 +301,12 @@ int cnss_wlfw_cal_report_req_send_sync(struct cnss_plat_data *plat_priv,
{
return 0;
}
+
+int cnss_wlfw_ini_file_send_sync(struct cnss_plat_data *plat_priv,
+ enum wlfw_ini_file_type_v01 file_type)
+{
+ return 0;
+}
#endif /* CONFIG_CNSS2_QMI */
#ifdef CONFIG_CNSS2_DEBUG
diff --git a/cnss2/reg.h b/cnss2/reg.h
index c774d18..9a3729e 100644
--- a/cnss2/reg.h
+++ b/cnss2/reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CNSS_REG_H
@@ -89,6 +89,11 @@
#define KIWI_DEBUG_SBL_LOG_SRAM_MAX_SIZE 48
#define KIWI_PBL_BOOTSTRAP_STATUS 0x01A10008
+#define MANGO_DEBUG_PBL_LOG_SRAM_START 0x01403D98
+#define MANGO_DEBUG_PBL_LOG_SRAM_MAX_SIZE 40
+#define MANGO_DEBUG_SBL_LOG_SRAM_MAX_SIZE 48
+#define MANGO_PBL_BOOTSTRAP_STATUS 0x01A10008
+
#define TCSR_PBL_LOGGING_REG 0x01B000F8
#define PCIE_BHI_ERRDBG2_REG 0x01E0E238
#define PCIE_BHI_ERRDBG3_REG 0x01E0E23C
diff --git a/cnss_prealloc/cnss_prealloc.c b/cnss_prealloc/cnss_prealloc.c
index 1a29d91..8056ced 100644
--- a/cnss_prealloc/cnss_prealloc.c
+++ b/cnss_prealloc/cnss_prealloc.c
@@ -58,7 +58,7 @@ static struct cnss_pool cnss_pools[] = {
{16 * 1024, 16, "cnss-pool-16k", NULL, NULL},
{32 * 1024, 6, "cnss-pool-32k", NULL, NULL},
{64 * 1024, 8, "cnss-pool-64k", NULL, NULL},
- {128 * 1024, 2, "cnss-pool-128k", NULL, NULL},
+ {128 * 1024, 5, "cnss-pool-128k", NULL, NULL},
};
/**
diff --git a/cnss_utils/wlan_firmware_service_v01.c b/cnss_utils/wlan_firmware_service_v01.c
index e03388a..05b1311 100644
--- a/cnss_utils/wlan_firmware_service_v01.c
+++ b/cnss_utils/wlan_firmware_service_v01.c
@@ -622,6 +622,111 @@ static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
},
};
+static struct qmi_elem_info wlfw_pmu_param_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 32,
+ .elem_size = sizeof(u8),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_pmu_param_v01,
+ pin_name),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_pmu_param_v01,
+ wake_volt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_pmu_param_v01,
+ wake_volt),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_pmu_param_v01,
+ sleep_volt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_pmu_param_v01,
+ sleep_volt),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_pmu_cfg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_pmu_cfg_v01,
+ pmu_param_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_PMU_PARAMS_MAX_V01,
+ .elem_size = sizeof(struct wlfw_pmu_param_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_pmu_cfg_v01,
+ pmu_param),
+ .ei_array = wlfw_pmu_param_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info wlfw_shadow_reg_v3_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_shadow_reg_v3_cfg_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
@@ -1230,6 +1335,26 @@ struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
hw_debug),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_wlan_mode_req_msg_v01,
+ xo_cal_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_wlan_mode_req_msg_v01,
+ xo_cal_data),
+ },
+ {
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -1455,6 +1580,37 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
.ei_array = wlfw_msi_cfg_s_v01_ei,
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v3_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v3_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V3_V01,
+ .elem_size = sizeof(struct wlfw_shadow_reg_v3_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct
+ wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v3),
+ .ei_array = wlfw_shadow_reg_v3_cfg_s_v01_ei,
+ },
+ {
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -1869,6 +2025,27 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
hwid_bitmap),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x22,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ ol_cpr_cfg_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_pmu_cfg_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x22,
+ .offset = offsetof(struct
+ wlfw_cap_resp_msg_v01,
+ ol_cpr_cfg),
+ .ei_array = wlfw_pmu_cfg_v01_ei,
+ },
+ {
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -3648,6 +3825,26 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
gpio_info),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x2E,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ fw_ini_cfg_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x2E,
+ .offset = offsetof(struct
+ wlfw_host_cap_req_msg_v01,
+ fw_ini_cfg_support),
+ },
+ {
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -5679,6 +5876,145 @@ struct qmi_elem_info wlfw_subsys_restart_level_resp_msg_v01_ei[] = {
};
EXPORT_SYMBOL(wlfw_subsys_restart_level_resp_msg_v01_ei);
+struct qmi_elem_info wlfw_ini_file_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ini_file_download_req_msg_v01,
+ file_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_ini_file_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ini_file_download_req_msg_v01,
+ file_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_ini_file_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_ini_file_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_ini_file_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_ini_file_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_ini_file_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_ini_file_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct
+ wlfw_ini_file_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_ini_file_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct
+ wlfw_ini_file_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_ini_file_download_req_msg_v01_ei);
+
+struct qmi_elem_info wlfw_ini_file_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_ini_file_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+EXPORT_SYMBOL(wlfw_ini_file_download_resp_msg_v01_ei);
+
/**
* wlfw_is_valid_dt_node_found - Check if valid device tree node present
*
@@ -5709,7 +6045,6 @@ static int __init wlfw_init(void)
return 0;
}
-
module_init(wlfw_init);
MODULE_LICENSE("GPL v2");
diff --git a/cnss_utils/wlan_firmware_service_v01.h b/cnss_utils/wlan_firmware_service_v01.h
index 5f51b9a..aa9d021 100644
--- a/cnss_utils/wlan_firmware_service_v01.h
+++ b/cnss_utils/wlan_firmware_service_v01.h
@@ -19,6 +19,7 @@
#define QMI_WLFW_SUBSYS_RESTART_LEVEL_REQ_V01 0x0055
#define QMI_WLFW_POWER_SAVE_RESP_V01 0x0050
#define QMI_WLFW_CAP_REQ_V01 0x0024
+#define QMI_WLFW_INI_FILE_DOWNLOAD_RESP_V01 0x0056
#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
#define QMI_WLFW_M3_INFO_RESP_V01 0x003C
#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
@@ -102,6 +103,7 @@
#define QMI_WLFW_WFC_CALL_STATUS_REQ_V01 0x0049
#define QMI_WLFW_DEVICE_INFO_RESP_V01 0x004C
#define QMI_WLFW_MSA_READY_RESP_V01 0x002E
+#define QMI_WLFW_INI_FILE_DOWNLOAD_REQ_V01 0x0056
#define QMI_WLFW_QDSS_TRACE_FREE_IND_V01 0x0046
#define QMI_WLFW_QDSS_MEM_READY_IND_V01 0x0052
@@ -117,6 +119,7 @@
#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
#define QMI_WLFW_MAX_NUM_GPIO_INFO_V01 20
#define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2
+#define QMI_WLFW_PMU_PARAMS_MAX_V01 16
#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 52
#define QMI_WLFW_MAX_WFC_CALL_STATUS_DATA_SIZE_V01 256
#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
@@ -125,7 +128,9 @@
#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
#define QMI_WLFW_MAX_M3_SEGMENTS_SIZE_V01 10
#define QMI_WLFW_MAX_STR_LEN_V01 16
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V3_V01 60
#define QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01 36
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V3_USAGE_V01 40
#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144
#define QMI_WLFW_MAX_NUM_GPIO_V01 32
#define QMI_WLFW_MAX_NUM_MLO_CHIPS_V01 3
@@ -280,6 +285,13 @@ enum wlfw_gpio_info_type_v01 {
WLFW_GPIO_INFO_TYPE_MAX_VAL_V01 = INT_MAX,
};
+enum wlfw_ini_file_type_v01 {
+ WLFW_INI_FILE_TYPE_MIN_VAL_V01 = INT_MIN,
+ WLFW_INI_CFG_FILE_V01 = 0,
+ WLFW_CONN_ROAM_INI_V01 = 1,
+ WLFW_INI_FILE_TYPE_MAX_VAL_V01 = INT_MAX,
+};
+
#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00)
#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01)
#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02)
@@ -401,6 +413,23 @@ struct wlfw_host_mlo_chip_info_s_v01 {
u8 valid_mlo_link_id[QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01];
};
+struct wlfw_pmu_param_v01 {
+ u8 pin_name[32];
+ u32 wake_volt_valid;
+ u32 wake_volt;
+ u32 sleep_volt_valid;
+ u32 sleep_volt;
+};
+
+struct wlfw_pmu_cfg_v01 {
+ u32 pmu_param_len;
+ struct wlfw_pmu_param_v01 pmu_param[QMI_WLFW_PMU_PARAMS_MAX_V01];
+};
+
+struct wlfw_shadow_reg_v3_cfg_s_v01 {
+ u32 addr;
+};
+
struct wlfw_ind_register_req_msg_v01 {
u8 fw_ready_enable_valid;
u8 fw_ready_enable;
@@ -443,6 +472,7 @@ struct wlfw_ind_register_req_msg_v01 {
u8 m3_dump_upload_segments_req_enable_valid;
u8 m3_dump_upload_segments_req_enable;
};
+
#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 86
extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
@@ -451,12 +481,14 @@ struct wlfw_ind_register_resp_msg_v01 {
u8 fw_status_valid;
u64 fw_status;
};
+
#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
extern struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[];
struct wlfw_fw_ready_ind_msg_v01 {
char placeholder;
};
+
#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[];
@@ -466,6 +498,7 @@ struct wlfw_msa_ready_ind_msg_v01 {
u8 hang_data_length_valid;
u16 hang_data_length;
};
+
#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 12
extern struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[];
@@ -477,6 +510,7 @@ struct wlfw_pin_connect_result_ind_msg_v01 {
u8 rf_pin_result_valid;
u32 rf_pin_result;
};
+
#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
extern struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
@@ -484,13 +518,17 @@ struct wlfw_wlan_mode_req_msg_v01 {
enum wlfw_driver_mode_enum_v01 mode;
u8 hw_debug_valid;
u8 hw_debug;
+ u8 xo_cal_data_valid;
+ u8 xo_cal_data;
};
-#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
+
+#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 15
extern struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[];
struct wlfw_wlan_mode_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+
#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
@@ -514,19 +552,25 @@ struct wlfw_wlan_cfg_req_msg_v01 {
u8 msi_cfg_valid;
u32 msi_cfg_len;
struct wlfw_msi_cfg_s_v01 msi_cfg[QMI_WLFW_MAX_NUM_CE_V01];
+ u8 shadow_reg_v3_valid;
+ u32 shadow_reg_v3_len;
+ struct wlfw_shadow_reg_v3_cfg_s_v01 shadow_reg_v3[QMI_WLFW_MAX_NUM_SHADOW_REG_V3_V01];
};
-#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 866
+
+#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 1110
extern struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
struct wlfw_wlan_cfg_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+
#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
struct wlfw_cap_req_msg_v01 {
char placeholder;
};
+
#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_cap_req_msg_v01_ei[];
@@ -568,9 +612,11 @@ struct wlfw_cap_resp_msg_v01 {
enum wlfw_bdf_dnld_method_v01 bdf_dnld_method;
u8 hwid_bitmap_valid;
u8 hwid_bitmap;
+ u8 ol_cpr_cfg_valid;
+ struct wlfw_pmu_cfg_v01 ol_cpr_cfg;
};
-#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 362
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 1134
extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
struct wlfw_bdf_download_req_msg_v01 {
@@ -589,6 +635,7 @@ struct wlfw_bdf_download_req_msg_v01 {
u8 bdf_type_valid;
u8 bdf_type;
};
+
#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182
extern struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[];
@@ -597,6 +644,7 @@ struct wlfw_bdf_download_resp_msg_v01 {
u8 host_bdf_data_valid;
u64 host_bdf_data;
};
+
#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 18
extern struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[];
@@ -610,12 +658,14 @@ struct wlfw_cal_report_req_msg_v01 {
u8 cal_file_download_size_valid;
u64 cal_file_download_size;
};
+
#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 43
extern struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[];
struct wlfw_cal_report_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+
#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[];
@@ -626,6 +676,7 @@ struct wlfw_initiate_cal_download_ind_msg_v01 {
u8 cal_data_location_valid;
u32 cal_data_location;
};
+
#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 21
extern struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
@@ -645,12 +696,14 @@ struct wlfw_cal_download_req_msg_v01 {
u8 cal_data_location_valid;
u32 cal_data_location;
};
+
#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6185
extern struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[];
struct wlfw_cal_download_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+
#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[];
@@ -660,6 +713,7 @@ struct wlfw_initiate_cal_update_ind_msg_v01 {
u8 cal_data_location_valid;
u32 cal_data_location;
};
+
#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 21
extern struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
@@ -667,6 +721,7 @@ struct wlfw_cal_update_req_msg_v01 {
enum wlfw_cal_temp_id_enum_v01 cal_id;
u32 seg_id;
};
+
#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
extern struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[];
@@ -686,6 +741,7 @@ struct wlfw_cal_update_resp_msg_v01 {
u8 cal_data_location_valid;
u32 cal_data_location;
};
+
#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6188
extern struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[];
@@ -693,6 +749,7 @@ struct wlfw_msa_info_req_msg_v01 {
u64 msa_addr;
u32 size;
};
+
#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
extern struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[];
@@ -701,18 +758,21 @@ struct wlfw_msa_info_resp_msg_v01 {
u32 mem_region_info_len;
struct wlfw_memory_region_info_s_v01 mem_region_info[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
};
+
#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
extern struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[];
struct wlfw_msa_ready_req_msg_v01 {
char placeholder;
};
+
#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[];
struct wlfw_msa_ready_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+
#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[];
@@ -720,12 +780,14 @@ struct wlfw_ini_req_msg_v01 {
u8 enablefwlog_valid;
u8 enablefwlog;
};
+
#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
extern struct qmi_elem_info wlfw_ini_req_msg_v01_ei[];
struct wlfw_ini_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+
#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[];
@@ -734,6 +796,7 @@ struct wlfw_athdiag_read_req_msg_v01 {
u32 mem_type;
u32 data_len;
};
+
#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
extern struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[];
@@ -743,6 +806,7 @@ struct wlfw_athdiag_read_resp_msg_v01 {
u32 data_len;
u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
};
+
#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156
extern struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
@@ -752,24 +816,28 @@ struct wlfw_athdiag_write_req_msg_v01 {
u32 data_len;
u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
};
+
#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163
extern struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[];
struct wlfw_athdiag_write_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+
#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
struct wlfw_vbatt_req_msg_v01 {
u64 voltage_uv;
};
+
#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
extern struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[];
struct wlfw_vbatt_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+
#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[];
@@ -777,12 +845,14 @@ struct wlfw_mac_addr_req_msg_v01 {
u8 mac_addr_valid;
u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01];
};
+
#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
extern struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[];
struct wlfw_mac_addr_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+
#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[];
@@ -849,9 +919,11 @@ struct wlfw_host_cap_req_msg_v01 {
u8 gpio_info_valid;
u32 gpio_info_len;
u32 gpio_info[QMI_WLFW_MAX_NUM_GPIO_INFO_V01];
+ u8 fw_ini_cfg_support_valid;
+ u8 fw_ini_cfg_support;
};
-#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 487
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 491
extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
struct wlfw_host_cap_resp_msg_v01 {
@@ -1329,4 +1401,28 @@ struct wlfw_subsys_restart_level_resp_msg_v01 {
#define WLFW_SUBSYS_RESTART_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_subsys_restart_level_resp_msg_v01_ei[];
+struct wlfw_ini_file_download_req_msg_v01 {
+ u8 file_type_valid;
+ enum wlfw_ini_file_type_v01 file_type;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_INI_FILE_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6174
+extern struct qmi_elem_info wlfw_ini_file_download_req_msg_v01_ei[];
+
+struct wlfw_ini_file_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_INI_FILE_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_ini_file_download_resp_msg_v01_ei[];
+
#endif
diff --git a/inc/cnss2.h b/inc/cnss2.h
index c5f180e..8379d6a 100644
--- a/inc/cnss2.h
+++ b/inc/cnss2.h
@@ -165,6 +165,10 @@ struct cnss_rri_over_ddr_cfg {
u32 base_addr_high;
};
+struct cnss_shadow_reg_v3_cfg {
+ u32 addr;
+};
+
struct cnss_wlan_enable_cfg {
u32 num_ce_tgt_cfg;
struct cnss_ce_tgt_pipe_cfg *ce_tgt_cfg;
@@ -176,6 +180,8 @@ struct cnss_wlan_enable_cfg {
struct cnss_shadow_reg_v2_cfg *shadow_reg_v2_cfg;
bool rri_over_ddr_cfg_valid;
struct cnss_rri_over_ddr_cfg rri_over_ddr_cfg;
+ u32 num_shadow_reg_v3_cfg;
+ struct cnss_shadow_reg_v3_cfg *shadow_reg_v3_cfg;
};
enum cnss_driver_mode {
diff --git a/inc/mhi.h b/inc/mhi.h
index 50e4935..444b6b1 100644
--- a/inc/mhi.h
+++ b/inc/mhi.h
@@ -39,6 +39,7 @@ struct mhi_buf_info;
* @MHI_CB_BW_REQ: Received a bandwidth switch request from device
* @MHI_CB_FALLBACK_IMG: MHI device was loaded with the provided fallback image
* @MHI_CB_DTR_SIGNAL: DTR signaling update
+ * @MHI_CB_DTR_START_CHANNELS: DTR signal for client driver to start channels
*/
enum mhi_callback {
MHI_CB_IDLE,
@@ -52,6 +53,7 @@ enum mhi_callback {
MHI_CB_BW_REQ,
MHI_CB_FALLBACK_IMG,
MHI_CB_DTR_SIGNAL,
+ MHI_CB_DTR_START_CHANNELS,
};
/**
diff --git a/inc/qcom_ramdump.h b/inc/qcom_ramdump.h
index 726e868..24250e8 100644
--- a/inc/qcom_ramdump.h
+++ b/inc/qcom_ramdump.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _QCOM_RAMDUMP_HEADER
@@ -18,6 +19,8 @@ struct qcom_dump_segment {
};
#if IS_ENABLED(CONFIG_QCOM_RAMDUMP)
+extern void *qcom_create_ramdump_device(const char *dev_name, struct device *parent);
+extern void qcom_destroy_ramdump_device(void *dev);
extern int qcom_elf_dump(struct list_head *segs, struct device *dev, unsigned char class);
extern int qcom_dump(struct list_head *head, struct device *dev);
extern int qcom_fw_elf_dump(struct firmware *fw, struct device *dev);
@@ -29,6 +32,14 @@ extern void sscd_release(struct device *dev);
extern void sscd_set_coredump(void *buf, int buf_len);
#endif
#else
+static inline void *qcom_create_ramdump_device(const char *dev_name,
+ struct device *parent)
+{
+ return NULL;
+}
+static inline void qcom_destroy_ramdump_device(void *dev)
+{
+}
static inline int qcom_elf_dump(struct list_head *segs, struct device *dev, unsigned char class)
{
return -ENODEV;
diff --git a/mhi/core/misc.c b/mhi/core/misc.c
index 20f6333..fffef3e 100644
--- a/mhi/core/misc.c
+++ b/mhi/core/misc.c
@@ -33,6 +33,8 @@ const char * const mhi_log_level_str[MHI_MSG_LVL_MAX] = {
!mhi_log_level_str[level]) ? \
"Mask all" : mhi_log_level_str[level])
+#define MHI_DTR_CHANNEL 19
+
struct mhi_bus mhi_bus;
void mhi_misc_init(void)
@@ -788,7 +790,7 @@ bool mhi_scan_rddm_cookie(struct mhi_controller *mhi_cntrl, u32 cookie)
{ "ERROR_DBG3", BHI_ERRDBG3 },
{ NULL },
};
- if (!mhi_cntrl->rddm_image || !cookie)
+ if (!mhi_cntrl->rddm_image || !cookie || !mhi_cntrl->bhi)
return false;
MHI_VERB("Checking BHI debug register for 0x%x\n", cookie);
@@ -1455,6 +1457,7 @@ void mhi_misc_mission_mode(struct mhi_controller *mhi_cntrl)
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_private *mhi_priv = dev_get_drvdata(dev);
struct mhi_sfr_info *sfr_info = mhi_priv->sfr_info;
+ struct mhi_device *dtr_dev;
u64 local, remote;
int ret = -EIO;
@@ -1463,6 +1466,11 @@ void mhi_misc_mission_mode(struct mhi_controller *mhi_cntrl)
if (!ret)
MHI_LOG("Timesync: local: %llx, remote: %llx\n", local, remote);
+ /* IP_CTRL DTR channel ID */
+ dtr_dev = mhi_get_device_for_channel(mhi_cntrl, MHI_DTR_CHANNEL);
+ if (dtr_dev)
+ mhi_notify(dtr_dev, MHI_CB_DTR_START_CHANNELS);
+
/* initialize SFR */
if (!sfr_info)
return;
@@ -1594,7 +1602,10 @@ int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
preempt_disable();
local_irq_disable();
- *t_host = mhi_tsync->time_get(mhi_cntrl);
+ ret = mhi_read_reg(mhi_cntrl, mhi_tsync->time_reg,
+ TIMESYNC_TIME_HIGH_OFFSET, &tdev_hi);
+ if (ret)
+ MHI_ERR("Time HIGH register read error\n");
ret = mhi_read_reg(mhi_cntrl, mhi_tsync->time_reg,
TIMESYNC_TIME_LOW_OFFSET, &tdev_lo);
@@ -1607,6 +1618,7 @@ int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
MHI_ERR("Time HIGH register read error\n");
*t_dev = (u64) tdev_hi << 32 | tdev_lo;
+ *t_host = mhi_tsync->time_get(mhi_cntrl);
local_irq_enable();
preempt_enable();
diff --git a/mhi/core/misc.h b/mhi/core/misc.h
index 4d038fb..78bf8fb 100644
--- a/mhi/core/misc.h
+++ b/mhi/core/misc.h
@@ -16,7 +16,7 @@
#define MHI_FORCE_WAKE_DELAY_US (100)
#define MHI_IPC_LOG_PAGES (200)
-#define MAX_RDDM_TABLE_SIZE (7)
+#define MAX_RDDM_TABLE_SIZE (8)
#define MHI_REG_SIZE (SZ_4K)
/* MHI misc capability registers */
diff --git a/mhi/core/pm.c b/mhi/core/pm.c
index bb9628b..45ddbbb 100644
--- a/mhi/core/pm.c
+++ b/mhi/core/pm.c
@@ -436,7 +436,6 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
read_unlock_bh(&mhi_cntrl->pm_lock);
- mhi_misc_mission_mode(mhi_cntrl);
mhi_process_sleeping_events(mhi_cntrl);
/*
@@ -444,6 +443,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
* Execution Environment (EE) to either SBL or AMSS states
*/
mhi_create_devices(mhi_cntrl);
+ mhi_misc_mission_mode(mhi_cntrl);
read_lock_bh(&mhi_cntrl->pm_lock);
diff --git a/qrtr/Kconfig b/qrtr/Kconfig
index a5f069f..829d7fb 100644
--- a/qrtr/Kconfig
+++ b/qrtr/Kconfig
@@ -67,4 +67,12 @@ config QRTR_GUNYAH
Router communication between two virtual machines. The transport
uses dynamically shared memory and gunyah doorbells.
+config QRTR_DEBUG
+ bool "QRTR debug enhancements"
+ help
+ Say Y here to enable QRTR debug enhancements support and currently
+ supporting logging of resume tx failure cases when skb alloc failed
+ and confirm rx flag is set. Also logging the skb alloc failure
+ while allocating skb in rx path.
+
endif # QRTR
diff --git a/qrtr/Makefile b/qrtr/Makefile
index d7d2d4b..72fe8a3 100644
--- a/qrtr/Makefile
+++ b/qrtr/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
#ccflags-y += -I$(WLAN_PLATFORM_ROOT)/inc
-obj-$(CONFIG_QRTR) := qrtr.o ns.o
+obj-$(CONFIG_QRTR) += qrtr.o
+qrtr-y := af_qrtr.o ns.o
+qrtr-$(CONFIG_QRTR_DEBUG) += debug.o
obj-$(CONFIG_QRTR_MHI) += qrtr-mhi.o
qrtr-mhi-y := mhi.o \ No newline at end of file
diff --git a/qrtr/af_qrtr.c b/qrtr/af_qrtr.c
new file mode 100644
index 0000000..a64af13
--- /dev/null
+++ b/qrtr/af_qrtr.c
@@ -0,0 +1,2121 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2013, 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#ifdef CONFIG_CNSS_OUT_OF_TREE
+#include "uapi/qrtr.h"
+#else
+#include <linux/qrtr.h>
+#endif
+#include <linux/termios.h> /* For TIOCINQ/OUTQ */
+#include <linux/numa.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/rwsem.h>
+#include <linux/uidgid.h>
+#include <linux/pm_wakeup.h>
+#ifdef CONFIG_CNSS_OUT_OF_TREE
+#include "ipc_logging.h"
+#else
+#include <linux/ipc_logging.h>
+#endif
+
+#include <net/sock.h>
+#include <uapi/linux/sched/types.h>
+
+#include "qrtr.h"
+#include "debug.h"
+
+#define QRTR_LOG_PAGE_CNT 4
+#define QRTR_INFO(ctx, x, ...) \
+ ipc_log_string(ctx, x, ##__VA_ARGS__)
+
+#define QRTR_PROTO_VER_1 1
+#define QRTR_PROTO_VER_2 3
+
+/* auto-bind range */
+#define QRTR_MIN_EPH_SOCKET 0x4000
+#define QRTR_MAX_EPH_SOCKET 0x7fff
+
+#define QRTR_PORT_CTRL_LEGACY 0xffff
+
+/* qrtr socket states */
+#define QRTR_STATE_MULTI -2
+#define QRTR_STATE_INIT -1
+
+#define AID_VENDOR_QRTR KGIDT_INIT(2906)
+
+/**
+ * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
+ * @version: protocol version
+ * @type: packet type; one of QRTR_TYPE_*
+ * @src_node_id: source node
+ * @src_port_id: source port
+ * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
+ * @size: length of packet, excluding this header
+ * @dst_node_id: destination node
+ * @dst_port_id: destination port
+ */
+struct qrtr_hdr_v1 {
+ __le32 version;
+ __le32 type;
+ __le32 src_node_id;
+ __le32 src_port_id;
+ __le32 confirm_rx;
+ __le32 size;
+ __le32 dst_node_id;
+ __le32 dst_port_id;
+} __packed;
+
+/**
+ * struct qrtr_hdr_v2 - (I|R)PCrouter packet header later versions
+ * @version: protocol version
+ * @type: packet type; one of QRTR_TYPE_*
+ * @flags: bitmask of QRTR_FLAGS_*
+ * @optlen: length of optional header data
+ * @size: length of packet, excluding this header and optlen
+ * @src_node_id: source node
+ * @src_port_id: source port
+ * @dst_node_id: destination node
+ * @dst_port_id: destination port
+ */
+struct qrtr_hdr_v2 {
+ u8 version;
+ u8 type;
+ u8 flags;
+ u8 optlen;
+ __le32 size;
+ __le16 src_node_id;
+ __le16 src_port_id;
+ __le16 dst_node_id;
+ __le16 dst_port_id;
+};
+
+#define QRTR_FLAGS_CONFIRM_RX BIT(0)
+
+struct qrtr_cb {
+ u32 src_node;
+ u32 src_port;
+ u32 dst_node;
+ u32 dst_port;
+
+ u8 type;
+ u8 confirm_rx;
+};
+
+#define QRTR_HDR_MAX_SIZE max_t(size_t, sizeof(struct qrtr_hdr_v1), \
+ sizeof(struct qrtr_hdr_v2))
+
+struct qrtr_sock {
+ /* WARNING: sk must be the first member */
+ struct sock sk;
+ struct sockaddr_qrtr us;
+ struct sockaddr_qrtr peer;
+
+ int state;
+};
+
+static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
+{
+ BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
+ return container_of(sk, struct qrtr_sock, sk);
+}
+
+static unsigned int qrtr_local_nid = CONFIG_QRTR_NODE_ID;
+static unsigned int qrtr_wakeup_ms = CONFIG_QRTR_WAKEUP_MS;
+
+/* for node ids */
+static RADIX_TREE(qrtr_nodes, GFP_ATOMIC);
+static DEFINE_SPINLOCK(qrtr_nodes_lock);
+/* broadcast list */
+static LIST_HEAD(qrtr_all_epts);
+/* lock for qrtr_all_epts */
+static DECLARE_RWSEM(qrtr_epts_lock);
+
+/* local port allocation management */
+static DEFINE_IDR(qrtr_ports);
+static DEFINE_SPINLOCK(qrtr_port_lock);
+
+/* backup buffers */
+#define QRTR_BACKUP_HI_NUM 5
+#define QRTR_BACKUP_HI_SIZE SZ_16K
+#define QRTR_BACKUP_LO_NUM 20
+#define QRTR_BACKUP_LO_SIZE SZ_1K
+static struct sk_buff_head qrtr_backup_lo;
+static struct sk_buff_head qrtr_backup_hi;
+static struct work_struct qrtr_backup_work;
+
+/**
+ * struct qrtr_node - endpoint node
+ * @ep_lock: lock for endpoint management and callbacks
+ * @ep: endpoint
+ * @ref: reference count for node
+ * @nid: node id
+ * @net_id: network cluster identifer
+ * @hello_sent: hello packet sent to endpoint
+ * @hello_rcvd: hello packet received from endpoint
+ * @qrtr_tx_flow: tree with tx counts per flow
+ * @resume_tx: waiters for a resume tx from the remote
+ * @qrtr_tx_lock: lock for qrtr_tx_flow
+ * @rx_queue: receive queue
+ * @item: list item for broadcast list
+ * @kworker: worker thread for recv work
+ * @task: task to run the worker thread
+ * @read_data: scheduled work for recv work
+ * @say_hello: scheduled work for initiating hello
+ * @ws: wakeupsource avoid system suspend
+ * @ilc: ipc logging context reference
+ */
+struct qrtr_node {
+ struct mutex ep_lock;
+ struct qrtr_endpoint *ep;
+ struct kref ref;
+ unsigned int nid;
+ unsigned int net_id;
+ atomic_t hello_sent;
+ atomic_t hello_rcvd;
+
+ struct radix_tree_root qrtr_tx_flow;
+ struct wait_queue_head resume_tx;
+ struct mutex qrtr_tx_lock; /* for qrtr_tx_flow */
+
+ struct sk_buff_head rx_queue;
+ struct list_head item;
+
+ struct kthread_worker kworker;
+ struct task_struct *task;
+ struct kthread_work read_data;
+ struct kthread_work say_hello;
+
+ struct wakeup_source *ws;
+ void *ilc;
+};
+
+struct qrtr_tx_flow_waiter {
+ struct list_head node;
+ struct sock *sk;
+};
+
+/**
+ * struct qrtr_tx_flow - tx flow control
+ * @pending: number of waiting senders
+ * @tx_failed: indicates that a message with confirm_rx flag was lost
+ * @waiters: list of ports to notify when this flow resumes
+ */
+struct qrtr_tx_flow {
+ atomic_t pending;
+ int tx_failed;
+ struct list_head waiters;
+};
+
+#define QRTR_TX_FLOW_HIGH 10
+#define QRTR_TX_FLOW_LOW 5
+
+static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt);
+static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
+ int type, struct sockaddr_qrtr *from,
+ struct sockaddr_qrtr *to, unsigned int flags);
+static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
+ int type, struct sockaddr_qrtr *from,
+ struct sockaddr_qrtr *to, unsigned int flags);
+static struct qrtr_sock *qrtr_port_lookup(int port);
+static void qrtr_port_put(struct qrtr_sock *ipc);
+
+static void qrtr_log_tx_msg(struct qrtr_node *node, struct qrtr_hdr_v1 *hdr,
+ struct sk_buff *skb)
+{
+ struct qrtr_ctrl_pkt pkt = {0,};
+ u64 pl_buf = 0;
+ int type;
+
+ if (!hdr || !skb)
+ return;
+
+ type = le32_to_cpu(hdr->type);
+ if (type == QRTR_TYPE_DATA) {
+ skb_copy_bits(skb, QRTR_HDR_MAX_SIZE, &pl_buf, sizeof(pl_buf));
+ QRTR_INFO(node->ilc,
+ "TX DATA: Len:0x%x CF:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x] [%s]\n",
+ hdr->size, hdr->confirm_rx,
+ hdr->src_node_id, hdr->src_port_id,
+ hdr->dst_node_id, hdr->dst_port_id,
+ (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32),
+ current->comm);
+ } else {
+ skb_copy_bits(skb, QRTR_HDR_MAX_SIZE, &pkt, sizeof(pkt));
+ if (type == QRTR_TYPE_NEW_SERVER ||
+ type == QRTR_TYPE_DEL_SERVER)
+ QRTR_INFO(node->ilc,
+ "TX CTRL: cmd:0x%x SVC[0x%x:0x%x] addr[0x%x:0x%x]\n",
+ type, le32_to_cpu(pkt.server.service),
+ le32_to_cpu(pkt.server.instance),
+ le32_to_cpu(pkt.server.node),
+ le32_to_cpu(pkt.server.port));
+ else if (type == QRTR_TYPE_DEL_CLIENT ||
+ type == QRTR_TYPE_RESUME_TX)
+ QRTR_INFO(node->ilc,
+ "TX CTRL: cmd:0x%x addr[0x%x:0x%x]\n",
+ type, le32_to_cpu(pkt.client.node),
+ le32_to_cpu(pkt.client.port));
+ else if (type == QRTR_TYPE_HELLO ||
+ type == QRTR_TYPE_BYE)
+ QRTR_INFO(node->ilc,
+ "TX CTRL: cmd:0x%x node[0x%x]\n",
+ type, hdr->src_node_id);
+ else if (type == QRTR_TYPE_DEL_PROC)
+ QRTR_INFO(node->ilc,
+ "TX CTRL: cmd:0x%x node[0x%x]\n",
+ type, pkt.proc.node);
+ }
+}
+
+static void qrtr_log_rx_msg(struct qrtr_node *node, struct sk_buff *skb)
+{
+ struct qrtr_ctrl_pkt pkt = {0,};
+ struct qrtr_cb *cb;
+ u64 pl_buf = 0;
+
+ if (!skb)
+ return;
+
+ cb = (struct qrtr_cb *)skb->cb;
+
+ if (cb->type == QRTR_TYPE_DATA) {
+ skb_copy_bits(skb, 0, &pl_buf, sizeof(pl_buf));
+ QRTR_INFO(node->ilc,
+ "RX DATA: Len:0x%x CF:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x]\n",
+ skb->len, cb->confirm_rx, cb->src_node, cb->src_port,
+ cb->dst_node, cb->dst_port,
+ (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32));
+ } else {
+ skb_copy_bits(skb, 0, &pkt, sizeof(pkt));
+ if (cb->type == QRTR_TYPE_NEW_SERVER ||
+ cb->type == QRTR_TYPE_DEL_SERVER)
+ QRTR_INFO(node->ilc,
+ "RX CTRL: cmd:0x%x SVC[0x%x:0x%x] addr[0x%x:0x%x]\n",
+ cb->type, le32_to_cpu(pkt.server.service),
+ le32_to_cpu(pkt.server.instance),
+ le32_to_cpu(pkt.server.node),
+ le32_to_cpu(pkt.server.port));
+ else if (cb->type == QRTR_TYPE_DEL_CLIENT ||
+ cb->type == QRTR_TYPE_RESUME_TX)
+ QRTR_INFO(node->ilc,
+ "RX CTRL: cmd:0x%x addr[0x%x:0x%x]\n",
+ cb->type, le32_to_cpu(pkt.client.node),
+ le32_to_cpu(pkt.client.port));
+ else if (cb->type == QRTR_TYPE_HELLO ||
+ cb->type == QRTR_TYPE_BYE)
+ QRTR_INFO(node->ilc,
+ "RX CTRL: cmd:0x%x node[0x%x]\n",
+ cb->type, cb->src_node);
+ }
+}
+
+void qrtr_print_wakeup_reason(const void *data)
+{
+ const struct qrtr_hdr_v1 *v1;
+ const struct qrtr_hdr_v2 *v2;
+ struct qrtr_cb cb;
+ unsigned int size;
+ unsigned int ver;
+ int service_id;
+ size_t hdrlen;
+ u64 preview = 0;
+
+ ver = *(u8 *)data;
+ switch (ver) {
+ case QRTR_PROTO_VER_1:
+ v1 = data;
+ hdrlen = sizeof(*v1);
+ cb.src_node = le32_to_cpu(v1->src_node_id);
+ cb.src_port = le32_to_cpu(v1->src_port_id);
+ cb.dst_node = le32_to_cpu(v1->dst_node_id);
+ cb.dst_port = le32_to_cpu(v1->dst_port_id);
+
+ size = le32_to_cpu(v1->size);
+ break;
+ case QRTR_PROTO_VER_2:
+ v2 = data;
+ hdrlen = sizeof(*v2) + v2->optlen;
+ cb.src_node = le16_to_cpu(v2->src_node_id);
+ cb.src_port = le16_to_cpu(v2->src_port_id);
+ cb.dst_node = le16_to_cpu(v2->dst_node_id);
+ cb.dst_port = le16_to_cpu(v2->dst_port_id);
+
+ if (cb.src_port == (u16)QRTR_PORT_CTRL)
+ cb.src_port = QRTR_PORT_CTRL;
+ if (cb.dst_port == (u16)QRTR_PORT_CTRL)
+ cb.dst_port = QRTR_PORT_CTRL;
+
+ size = le32_to_cpu(v2->size);
+ break;
+ default:
+ return;
+ }
+
+ service_id = qrtr_get_service_id(cb.src_node, cb.src_port);
+ if (service_id < 0)
+ service_id = qrtr_get_service_id(cb.dst_node, cb.dst_port);
+
+ size = (sizeof(preview) > size) ? size : sizeof(preview);
+ memcpy(&preview, data + hdrlen, size);
+
+ pr_info("%s: src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x] service[0x%x]\n",
+ __func__,
+ cb.src_node, cb.src_port,
+ cb.dst_node, cb.dst_port,
+ (unsigned int)preview, (unsigned int)(preview >> 32),
+ service_id);
+}
+EXPORT_SYMBOL(qrtr_print_wakeup_reason);
+
+static bool refcount_dec_and_rwsem_lock(refcount_t *r,
+ struct rw_semaphore *sem)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ down_write(sem);
+ if (!refcount_dec_and_test(r)) {
+ up_write(sem);
+ return false;
+ }
+
+ return true;
+}
+
+static inline int kref_put_rwsem_lock(struct kref *kref,
+ void (*release)(struct kref *kref),
+ struct rw_semaphore *sem)
+{
+ if (refcount_dec_and_rwsem_lock(&kref->refcount, sem)) {
+ release(kref);
+ return 1;
+ }
+ return 0;
+}
+
+/* Release node resources and free the node.
+ *
+ * Do not call directly, use qrtr_node_release. To be used with
+ * kref_put_mutex. As such, the node mutex is expected to be locked on call.
+ */
+static void __qrtr_node_release(struct kref *kref)
+{
+ struct qrtr_tx_flow_waiter *waiter;
+ struct qrtr_tx_flow_waiter *temp;
+ struct radix_tree_iter iter;
+ struct qrtr_tx_flow *flow;
+ struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
+ unsigned long flags;
+ void __rcu **slot;
+
+ spin_lock_irqsave(&qrtr_nodes_lock, flags);
+ if (node->nid != QRTR_EP_NID_AUTO) {
+ radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
+ if (node == *slot)
+ radix_tree_iter_delete(&qrtr_nodes, &iter,
+ slot);
+ }
+ }
+ spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
+
+ list_del(&node->item);
+ up_write(&qrtr_epts_lock);
+
+ /* Free tx flow counters */
+ mutex_lock(&node->qrtr_tx_lock);
+ radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
+ flow = *slot;
+ list_for_each_entry_safe(waiter, temp, &flow->waiters, node) {
+ list_del(&waiter->node);
+ sock_put(waiter->sk);
+ kfree(waiter);
+ }
+ radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot);
+ kfree(flow);
+ }
+ mutex_unlock(&node->qrtr_tx_lock);
+
+ wakeup_source_unregister(node->ws);
+ kthread_flush_worker(&node->kworker);
+ kthread_stop(node->task);
+
+ skb_queue_purge(&node->rx_queue);
+ kfree(node);
+}
+
+/* Increment reference to node. */
+static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
+{
+ if (node)
+ kref_get(&node->ref);
+ return node;
+}
+
+/* Decrement reference to node and release as necessary. */
+static void qrtr_node_release(struct qrtr_node *node)
+{
+ if (!node)
+ return;
+ kref_put_rwsem_lock(&node->ref, __qrtr_node_release, &qrtr_epts_lock);
+}
+
+/**
+ * qrtr_tx_resume() - reset flow control counter
+ * @node: qrtr_node that the QRTR_TYPE_RESUME_TX packet arrived on
+ * @skb: resume_tx packet
+ */
+static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb)
+{
+ struct qrtr_tx_flow_waiter *waiter;
+ struct qrtr_tx_flow_waiter *temp;
+ struct qrtr_ctrl_pkt pkt = {0,};
+ struct qrtr_tx_flow *flow;
+ struct sockaddr_qrtr src;
+ struct qrtr_sock *ipc;
+ struct sk_buff *skbn;
+ unsigned long key;
+
+ skb_copy_bits(skb, 0, &pkt, sizeof(pkt));
+ if (le32_to_cpu(pkt.cmd) != QRTR_TYPE_RESUME_TX)
+ return;
+
+ src.sq_family = AF_QIPCRTR;
+ src.sq_node = le32_to_cpu(pkt.client.node);
+ src.sq_port = le32_to_cpu(pkt.client.port);
+ key = (u64)src.sq_node << 32 | src.sq_port;
+
+ mutex_lock(&node->qrtr_tx_lock);
+ flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
+ if (!flow) {
+ mutex_unlock(&node->qrtr_tx_lock);
+ return;
+ }
+
+ atomic_set(&flow->pending, 0);
+ wake_up_interruptible_all(&node->resume_tx);
+
+ list_for_each_entry_safe(waiter, temp, &flow->waiters, node) {
+ list_del(&waiter->node);
+ skbn = alloc_skb(0, GFP_KERNEL);
+ if (skbn) {
+ ipc = qrtr_sk(waiter->sk);
+ qrtr_local_enqueue(NULL, skbn, QRTR_TYPE_RESUME_TX,
+ &src, &ipc->us, 0);
+ }
+ sock_put(waiter->sk);
+ kfree(waiter);
+ }
+ mutex_unlock(&node->qrtr_tx_lock);
+
+ consume_skb(skb);
+}
+
+/**
+ * qrtr_tx_wait() - flow control for outgoing packets
+ * @node: qrtr_node that the packet is to be send to
+ * @dest_node: node id of the destination
+ * @dest_port: port number of the destination
+ * @type: type of message
+ *
+ * The flow control scheme is based around the low and high "watermarks". When
+ * the low watermark is passed the confirm_rx flag is set on the outgoing
+ * message, which will trigger the remote to send a control message of the type
+ * QRTR_TYPE_RESUME_TX to reset the counter. If the high watermark is hit
+ * further transmision should be paused.
+ *
+ * Return: 1 if confirm_rx should be set, 0 otherwise or errno failure
+ */
+static int qrtr_tx_wait(struct qrtr_node *node, struct sockaddr_qrtr *to,
+ struct sock *sk, int type, unsigned int flags)
+{
+ unsigned long key = (u64)to->sq_node << 32 | to->sq_port;
+ struct qrtr_tx_flow_waiter *waiter;
+ struct qrtr_tx_flow *flow;
+ int confirm_rx = 0;
+ long timeo;
+ long ret;
+
+ /* Never set confirm_rx on non-data packets */
+ if (type != QRTR_TYPE_DATA)
+ return 0;
+
+ /* Assume sk is set correctly for all data type packets */
+ timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+
+ mutex_lock(&node->qrtr_tx_lock);
+ flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
+ if (!flow) {
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (flow) {
+ INIT_LIST_HEAD(&flow->waiters);
+ if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
+ kfree(flow);
+ flow = NULL;
+ }
+ }
+ }
+ mutex_unlock(&node->qrtr_tx_lock);
+
+ /* Set confirm_rx if we where unable to find and allocate a flow */
+ if (!flow)
+ return 1;
+
+ ret = timeo;
+ for (;;) {
+ mutex_lock(&node->qrtr_tx_lock);
+ if (READ_ONCE(flow->tx_failed)) {
+ WRITE_ONCE(flow->tx_failed, 0);
+ confirm_rx = 1;
+ mutex_unlock(&node->qrtr_tx_lock);
+ break;
+ }
+
+ if (atomic_read(&flow->pending) < QRTR_TX_FLOW_HIGH) {
+ confirm_rx = atomic_inc_return(&flow->pending) ==
+ QRTR_TX_FLOW_LOW;
+ mutex_unlock(&node->qrtr_tx_lock);
+ break;
+ }
+ if (!ret) {
+ list_for_each_entry(waiter, &flow->waiters, node) {
+ if (waiter->sk == sk) {
+ mutex_unlock(&node->qrtr_tx_lock);
+ return -EAGAIN;
+ }
+ }
+ waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
+ if (!waiter) {
+ mutex_unlock(&node->qrtr_tx_lock);
+ return -ENOMEM;
+ }
+ waiter->sk = sk;
+ sock_hold(sk);
+ list_add_tail(&waiter->node, &flow->waiters);
+ QRTR_INFO(node->ilc, "new waiter for [0x%x:0x%x]\n",
+ to->sq_node, to->sq_port);
+ mutex_unlock(&node->qrtr_tx_lock);
+ return -EAGAIN;
+ }
+ mutex_unlock(&node->qrtr_tx_lock);
+
+ ret = wait_event_interruptible_timeout(node->resume_tx,
+ (!node->ep || READ_ONCE(flow->tx_failed) ||
+ atomic_read(&flow->pending) < QRTR_TX_FLOW_HIGH),
+ timeo);
+ if (ret < 0)
+ return ret;
+ if (!node->ep)
+ return -EPIPE;
+ }
+ return confirm_rx;
+}
+
+/**
+ * qrtr_tx_flow_failed() - flag that tx of confirm_rx flagged messages failed
+ * @node: qrtr_node that the packet is to be send to
+ * @dest_node: node id of the destination
+ * @dest_port: port number of the destination
+ *
+ * Signal that the transmission of a message with confirm_rx flag failed. The
+ * flow's "pending" counter will keep incrementing towards QRTR_TX_FLOW_HIGH,
+ * at which point transmission would stall forever waiting for the resume TX
+ * message associated with the dropped confirm_rx message.
+ * Work around this by marking the flow as having a failed transmission and
+ * cause the next transmission attempt to be sent with the confirm_rx.
+ */
+static void qrtr_tx_flow_failed(struct qrtr_node *node, int dest_node,
+ int dest_port)
+{
+ unsigned long key = (u64)dest_node << 32 | dest_port;
+ struct qrtr_tx_flow *flow;
+
+ mutex_lock(&node->qrtr_tx_lock);
+ flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
+ if (flow)
+ WRITE_ONCE(flow->tx_failed, 1);
+ mutex_unlock(&node->qrtr_tx_lock);
+}
+
+/* Pass an outgoing packet socket buffer to the endpoint driver. */
+static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
+ int type, struct sockaddr_qrtr *from,
+ struct sockaddr_qrtr *to, unsigned int flags)
+{
+ struct qrtr_hdr_v1 *hdr;
+ size_t len = skb->len;
+ int rc = -ENODEV;
+ int confirm_rx;
+
+ if (!atomic_read(&node->hello_sent) && type != QRTR_TYPE_HELLO) {
+ kfree_skb(skb);
+ return rc;
+ }
+ if (atomic_read(&node->hello_sent) && type == QRTR_TYPE_HELLO) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ /* If sk is null, this is a forwarded packet and should not wait */
+ if (!skb->sk) {
+ struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
+
+ confirm_rx = cb->confirm_rx;
+ } else {
+ confirm_rx = qrtr_tx_wait(node, to, skb->sk, type, flags);
+ if (confirm_rx < 0) {
+ kfree_skb(skb);
+ return confirm_rx;
+ }
+ }
+
+ hdr = skb_push(skb, sizeof(*hdr));
+ hdr->version = cpu_to_le32(QRTR_PROTO_VER_1);
+ hdr->type = cpu_to_le32(type);
+ hdr->src_node_id = cpu_to_le32(from->sq_node);
+ hdr->src_port_id = cpu_to_le32(from->sq_port);
+ if (to->sq_node == QRTR_NODE_BCAST)
+ hdr->dst_node_id = cpu_to_le32(node->nid);
+ else
+ hdr->dst_node_id = cpu_to_le32(to->sq_node);
+
+ hdr->dst_port_id = cpu_to_le32(to->sq_port);
+ hdr->size = cpu_to_le32(len);
+ hdr->confirm_rx = !!confirm_rx;
+
+ qrtr_log_tx_msg(node, hdr, skb);
+ rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
+ if (rc) {
+ pr_err("%s: failed to pad size %lu to %lu rc:%d\n", __func__,
+ len, ALIGN(len, 4) + sizeof(*hdr), rc);
+ return rc;
+ }
+
+ mutex_lock(&node->ep_lock);
+ if (node->ep)
+ rc = node->ep->xmit(node->ep, skb);
+ else
+ kfree_skb(skb);
+ mutex_unlock(&node->ep_lock);
+
+ /* Need to ensure that a subsequent message carries the otherwise lost
+ * confirm_rx flag if we dropped this one */
+ if (rc && confirm_rx)
+ qrtr_tx_flow_failed(node, to->sq_node, to->sq_port);
+ if (type == QRTR_TYPE_HELLO) {
+ if (!rc)
+ atomic_inc(&node->hello_sent);
+ else
+ kthread_queue_work(&node->kworker, &node->say_hello);
+ }
+
+ return rc;
+}
+
+/* Lookup node by id.
+ *
+ * callers must release with qrtr_node_release()
+ */
+static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
+{
+ struct qrtr_node *node;
+ unsigned long flags;
+
+ down_read(&qrtr_epts_lock);
+ spin_lock_irqsave(&qrtr_nodes_lock, flags);
+ node = radix_tree_lookup(&qrtr_nodes, nid);
+ node = qrtr_node_acquire(node);
+ spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
+ up_read(&qrtr_epts_lock);
+
+ return node;
+}
+
+/* Assign node id to node.
+ *
+ * This is mostly useful for automatic node id assignment, based on
+ * the source id in the incoming packet.
+ */
+static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
+{
+ unsigned long flags;
+
+ if (nid == node->nid || nid == QRTR_EP_NID_AUTO)
+ return;
+
+ spin_lock_irqsave(&qrtr_nodes_lock, flags);
+ if (!radix_tree_lookup(&qrtr_nodes, nid))
+ radix_tree_insert(&qrtr_nodes, nid, node);
+
+ if (node->nid == QRTR_EP_NID_AUTO)
+ node->nid = nid;
+ spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
+}
+
+/**
+ * qrtr_peek_pkt_size() - Peek into the packet header to get potential pkt size
+ *
+ * @data: Starting address of the packet which points to router header.
+ *
+ * @returns: potential packet size on success, < 0 on error.
+ *
+ * This function is used by the underlying transport abstraction layer to
+ * peek into the potential packet size of an incoming packet. This information
+ * is used to perform link layer fragmentation and re-assembly
+ */
+int qrtr_peek_pkt_size(const void *data)
+{
+ const struct qrtr_hdr_v1 *v1;
+ const struct qrtr_hdr_v2 *v2;
+ unsigned int hdrlen;
+ unsigned int size;
+ unsigned int ver;
+
+ /* Version field in v1 is little endian, so this works for both cases */
+ ver = *(u8 *)data;
+
+ switch (ver) {
+ case QRTR_PROTO_VER_1:
+ v1 = data;
+ hdrlen = sizeof(*v1);
+ size = le32_to_cpu(v1->size);
+ break;
+ case QRTR_PROTO_VER_2:
+ v2 = data;
+ hdrlen = sizeof(*v2) + v2->optlen;
+ size = le32_to_cpu(v2->size);
+ break;
+ default:
+ pr_err("qrtr: Invalid version %d\n", ver);
+ return -EINVAL;
+ }
+
+ return ALIGN(size, 4) + hdrlen;
+}
+EXPORT_SYMBOL(qrtr_peek_pkt_size);
+
+static void qrtr_alloc_backup(struct work_struct *work)
+{
+ struct sk_buff *skb;
+ int errcode;
+
+ while (skb_queue_len(&qrtr_backup_lo) < QRTR_BACKUP_LO_NUM) {
+ skb = alloc_skb_with_frags(sizeof(struct qrtr_hdr_v1),
+ QRTR_BACKUP_LO_SIZE, 0, &errcode,
+ GFP_KERNEL);
+ if (!skb)
+ break;
+ skb_queue_tail(&qrtr_backup_lo, skb);
+ }
+ while (skb_queue_len(&qrtr_backup_hi) < QRTR_BACKUP_HI_NUM) {
+ skb = alloc_skb_with_frags(sizeof(struct qrtr_hdr_v1),
+ QRTR_BACKUP_HI_SIZE, 0, &errcode,
+ GFP_KERNEL);
+ if (!skb)
+ break;
+ skb_queue_tail(&qrtr_backup_hi, skb);
+ }
+}
+
+static struct sk_buff *qrtr_get_backup(size_t len)
+{
+ struct sk_buff *skb = NULL;
+
+ if (len < QRTR_BACKUP_LO_SIZE)
+ skb = skb_dequeue(&qrtr_backup_lo);
+ else if (len < QRTR_BACKUP_HI_SIZE)
+ skb = skb_dequeue(&qrtr_backup_hi);
+
+ if (skb)
+ queue_work(system_unbound_wq, &qrtr_backup_work);
+
+ return skb;
+}
+
+static void qrtr_backup_init(void)
+{
+ skb_queue_head_init(&qrtr_backup_lo);
+ skb_queue_head_init(&qrtr_backup_hi);
+ INIT_WORK(&qrtr_backup_work, qrtr_alloc_backup);
+ queue_work(system_unbound_wq, &qrtr_backup_work);
+}
+
+static void qrtr_backup_deinit(void)
+{
+ cancel_work_sync(&qrtr_backup_work);
+ skb_queue_purge(&qrtr_backup_lo);
+ skb_queue_purge(&qrtr_backup_hi);
+}
+
+/**
+ * qrtr_endpoint_post() - post incoming data
+ * @ep: endpoint handle
+ * @data: data pointer
+ * @len: size of data in bytes
+ *
+ * Return: 0 on success; negative error code on failure
+ */
+int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
+{
+ struct qrtr_node *node = ep->node;
+ const struct qrtr_hdr_v1 *v1;
+ const struct qrtr_hdr_v2 *v2;
+ struct qrtr_ctrl_pkt *pkt;
+ struct qrtr_sock *ipc;
+ struct sk_buff *skb;
+ struct qrtr_cb *cb;
+ size_t size;
+ unsigned int ver;
+ size_t hdrlen;
+ int errcode;
+
+ if (len == 0 || len & 3)
+ return -EINVAL;
+
+ skb = alloc_skb_with_frags(sizeof(*v1), len, 0, &errcode, GFP_ATOMIC);
+ if (!skb) {
+ skb = qrtr_get_backup(len);
+ if (!skb) {
+ qrtr_log_skb_failure(data, len);
+ pr_err("qrtr: Unable to get skb with len:%lu\n", len);
+ return -ENOMEM;
+ }
+ }
+
+ skb_reserve(skb, sizeof(*v1));
+ cb = (struct qrtr_cb *)skb->cb;
+
+ /* Version field in v1 is little endian, so this works for both cases */
+ ver = *(u8*)data;
+
+ switch (ver) {
+ case QRTR_PROTO_VER_1:
+ if (len < sizeof(*v1))
+ goto err;
+ v1 = data;
+ hdrlen = sizeof(*v1);
+
+ cb->type = le32_to_cpu(v1->type);
+ cb->src_node = le32_to_cpu(v1->src_node_id);
+ cb->src_port = le32_to_cpu(v1->src_port_id);
+ cb->confirm_rx = !!v1->confirm_rx;
+ cb->dst_node = le32_to_cpu(v1->dst_node_id);
+ cb->dst_port = le32_to_cpu(v1->dst_port_id);
+
+ size = le32_to_cpu(v1->size);
+ break;
+ case QRTR_PROTO_VER_2:
+ if (len < sizeof(*v2))
+ goto err;
+ v2 = data;
+ hdrlen = sizeof(*v2) + v2->optlen;
+
+ cb->type = v2->type;
+ cb->confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
+ cb->src_node = le16_to_cpu(v2->src_node_id);
+ cb->src_port = le16_to_cpu(v2->src_port_id);
+ cb->dst_node = le16_to_cpu(v2->dst_node_id);
+ cb->dst_port = le16_to_cpu(v2->dst_port_id);
+
+ if (cb->src_port == (u16)QRTR_PORT_CTRL)
+ cb->src_port = QRTR_PORT_CTRL;
+ if (cb->dst_port == (u16)QRTR_PORT_CTRL)
+ cb->dst_port = QRTR_PORT_CTRL;
+
+ size = le32_to_cpu(v2->size);
+ break;
+ default:
+ pr_err("qrtr: Invalid version %d\n", ver);
+ goto err;
+ }
+
+ if (cb->dst_port == QRTR_PORT_CTRL_LEGACY)
+ cb->dst_port = QRTR_PORT_CTRL;
+
+ if (!size || len != ALIGN(size, 4) + hdrlen)
+ goto err;
+
+ if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
+ cb->type != QRTR_TYPE_RESUME_TX)
+ goto err;
+
+ skb->data_len = size;
+ skb->len = size;
+ skb_store_bits(skb, 0, data + hdrlen, size);
+
+ qrtr_node_assign(node, cb->src_node);
+ if (cb->type == QRTR_TYPE_NEW_SERVER) {
+ pkt = (void *)data + hdrlen;
+ qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
+ }
+
+ if (cb->confirm_rx)
+ qrtr_log_resume_tx(cb->src_node, cb->src_port, RTX_SKB_ALLOC_SUCC);
+ qrtr_log_rx_msg(node, skb);
+ /* All control packets and non-local destined data packets should be
+ * queued to the worker for forwarding handling.
+ */
+ if (cb->type != QRTR_TYPE_DATA || cb->dst_node != qrtr_local_nid) {
+ skb_queue_tail(&node->rx_queue, skb);
+ kthread_queue_work(&node->kworker, &node->read_data);
+ pm_wakeup_ws_event(node->ws, qrtr_wakeup_ms, true);
+ } else {
+ ipc = qrtr_port_lookup(cb->dst_port);
+ if (!ipc) {
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ if (sock_queue_rcv_skb(&ipc->sk, skb)) {
+ qrtr_port_put(ipc);
+ goto err;
+ }
+
+ /* Force wakeup for all packets except for sensors */
+ if (node->nid != 9)
+ pm_wakeup_ws_event(node->ws, qrtr_wakeup_ms, true);
+
+ qrtr_port_put(ipc);
+ }
+
+ return 0;
+
+err:
+ kfree_skb(skb);
+ return -EINVAL;
+
+}
+EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
+
+/**
+ * qrtr_alloc_ctrl_packet() - allocate control packet skb
+ * @pkt: reference to qrtr_ctrl_pkt pointer
+ *
+ * Returns newly allocated sk_buff, or NULL on failure
+ *
+ * This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and
+ * on success returns a reference to the control packet in @pkt.
+ */
+static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt)
+{
+ const int pkt_len = sizeof(struct qrtr_ctrl_pkt);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, QRTR_HDR_MAX_SIZE);
+ *pkt = skb_put_zero(skb, pkt_len);
+
+ return skb;
+}
+
+static bool qrtr_must_forward(struct qrtr_node *src,
+ struct qrtr_node *dst, u32 type)
+{
+ /* Node structure is not maintained for local processor.
+ * Hence src is null in that case.
+ */
+ if (!src)
+ return true;
+
+ if (!dst)
+ return false;
+
+ if (type == QRTR_TYPE_HELLO || type == QRTR_TYPE_RESUME_TX)
+ return false;
+
+ if (dst == src || dst->nid == QRTR_EP_NID_AUTO)
+ return false;
+
+ if (abs(dst->net_id - src->net_id) > 1)
+ return true;
+
+ return false;
+}
+
+static void qrtr_fwd_ctrl_pkt(struct qrtr_node *src, struct sk_buff *skb)
+{
+ struct qrtr_node *node;
+ struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
+
+ down_read(&qrtr_epts_lock);
+ list_for_each_entry(node, &qrtr_all_epts, item) {
+ struct sockaddr_qrtr from;
+ struct sockaddr_qrtr to;
+ struct sk_buff *skbn;
+
+ if (!qrtr_must_forward(src, node, cb->type))
+ continue;
+
+ skbn = skb_clone(skb, GFP_KERNEL);
+ if (!skbn)
+ break;
+
+ from.sq_family = AF_QIPCRTR;
+ from.sq_node = cb->src_node;
+ from.sq_port = cb->src_port;
+
+ to.sq_family = AF_QIPCRTR;
+ to.sq_node = node->nid;
+ to.sq_port = QRTR_PORT_CTRL;
+
+ qrtr_node_enqueue(node, skbn, cb->type, &from, &to, 0);
+ }
+ up_read(&qrtr_epts_lock);
+}
+
+static void qrtr_fwd_pkt(struct sk_buff *skb, struct qrtr_cb *cb)
+{
+ struct sockaddr_qrtr from = {AF_QIPCRTR, cb->src_node, cb->src_port};
+ struct sockaddr_qrtr to = {AF_QIPCRTR, cb->dst_node, cb->dst_port};
+ struct qrtr_node *node;
+
+ node = qrtr_node_lookup(cb->dst_node);
+ if (!node) {
+ kfree_skb(skb);
+ return;
+ }
+
+ qrtr_node_enqueue(node, skb, cb->type, &from, &to, 0);
+ qrtr_node_release(node);
+}
+
+static void qrtr_sock_queue_skb(struct qrtr_node *node, struct sk_buff *skb,
+ struct qrtr_sock *ipc)
+{
+ struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
+ int rc;
+
+ /* Don't queue HELLO if control port already received */
+ if (cb->type == QRTR_TYPE_HELLO) {
+ if (atomic_read(&node->hello_rcvd)) {
+ kfree_skb(skb);
+ return;
+ }
+ atomic_inc(&node->hello_rcvd);
+ }
+
+ rc = sock_queue_rcv_skb(&ipc->sk, skb);
+ if (rc) {
+ pr_err("%s: qrtr pkt dropped flow[%d] rc[%d]\n",
+ __func__, cb->confirm_rx, rc);
+ kfree_skb(skb);
+ }
+}
+
+/* Handle not atomic operations for a received packet. */
+static void qrtr_node_rx_work(struct kthread_work *work)
+{
+ struct qrtr_node *node = container_of(work, struct qrtr_node,
+ read_data);
+ struct sk_buff *skb;
+ char name[32] = {0,};
+
+ if (unlikely(!node->ilc)) {
+ snprintf(name, sizeof(name), "qrtr_%d", node->nid);
+ node->ilc = ipc_log_context_create(QRTR_LOG_PAGE_CNT, name, 0);
+ }
+
+ while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
+ struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
+ struct qrtr_sock *ipc;
+
+ if (cb->type != QRTR_TYPE_DATA)
+ qrtr_fwd_ctrl_pkt(node, skb);
+
+ if (cb->type == QRTR_TYPE_RESUME_TX) {
+ if (cb->dst_node != qrtr_local_nid) {
+ qrtr_fwd_pkt(skb, cb);
+ continue;
+ }
+ qrtr_tx_resume(node, skb);
+ } else if (cb->dst_node != qrtr_local_nid &&
+ cb->type == QRTR_TYPE_DATA) {
+ qrtr_fwd_pkt(skb, cb);
+ } else {
+ ipc = qrtr_port_lookup(cb->dst_port);
+ if (!ipc) {
+ kfree_skb(skb);
+ } else {
+ qrtr_sock_queue_skb(node, skb, ipc);
+ qrtr_port_put(ipc);
+ }
+ }
+ }
+}
+
+static void qrtr_hello_work(struct kthread_work *work)
+{
+ struct sockaddr_qrtr from = {AF_QIPCRTR, 0, QRTR_PORT_CTRL};
+ struct sockaddr_qrtr to = {AF_QIPCRTR, 0, QRTR_PORT_CTRL};
+ struct qrtr_ctrl_pkt *pkt;
+ struct qrtr_node *node;
+ struct qrtr_sock *ctrl;
+ struct sk_buff *skb;
+
+ ctrl = qrtr_port_lookup(QRTR_PORT_CTRL);
+ if (!ctrl)
+ return;
+
+ skb = qrtr_alloc_ctrl_packet(&pkt);
+ if (!skb) {
+ qrtr_port_put(ctrl);
+ return;
+ }
+
+ node = container_of(work, struct qrtr_node, say_hello);
+ pkt->cmd = cpu_to_le32(QRTR_TYPE_HELLO);
+ from.sq_node = qrtr_local_nid;
+ to.sq_node = node->nid;
+ qrtr_node_enqueue(node, skb, QRTR_TYPE_HELLO, &from, &to, 0);
+ qrtr_port_put(ctrl);
+}
+
+/**
+ * qrtr_endpoint_register() - register a new endpoint
+ * @ep: endpoint to register
+ * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
+ * @rt: flag to notify real time low latency endpoint
+ * Return: 0 on success; negative error code on failure
+ *
+ * The specified endpoint must have the xmit function pointer set on call.
+ */
+int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id,
+ bool rt)
+{
+ struct qrtr_node *node;
+ struct sched_param param = {.sched_priority = 1};
+
+ if (!ep || !ep->xmit)
+ return -EINVAL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ kref_init(&node->ref);
+ mutex_init(&node->ep_lock);
+ skb_queue_head_init(&node->rx_queue);
+ node->nid = QRTR_EP_NID_AUTO;
+ node->ep = ep;
+ atomic_set(&node->hello_sent, 0);
+ atomic_set(&node->hello_rcvd, 0);
+
+ kthread_init_work(&node->read_data, qrtr_node_rx_work);
+ kthread_init_work(&node->say_hello, qrtr_hello_work);
+ kthread_init_worker(&node->kworker);
+ node->task = kthread_run(kthread_worker_fn, &node->kworker, "qrtr_rx");
+ if (IS_ERR(node->task)) {
+ kfree(node);
+ return -ENOMEM;
+ }
+ if (rt)
+ sched_setscheduler(node->task, SCHED_FIFO, &param);
+
+ mutex_init(&node->qrtr_tx_lock);
+ INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
+ init_waitqueue_head(&node->resume_tx);
+
+ qrtr_node_assign(node, node->nid);
+ node->net_id = net_id;
+
+ down_write(&qrtr_epts_lock);
+ list_add(&node->item, &qrtr_all_epts);
+ up_write(&qrtr_epts_lock);
+ ep->node = node;
+
+ node->ws = wakeup_source_register(NULL, "qrtr_ws");
+
+ kthread_queue_work(&node->kworker, &node->say_hello);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
+
+static void qrtr_notify_bye(u32 nid)
+{
+ struct sockaddr_qrtr src = {AF_QIPCRTR, nid, QRTR_PORT_CTRL};
+ struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL};
+ struct qrtr_ctrl_pkt *pkt;
+ struct sk_buff *skb;
+
+ skb = qrtr_alloc_ctrl_packet(&pkt);
+ if (!skb)
+ return;
+
+ pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
+ qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst, 0);
+}
+
+static u32 qrtr_calc_checksum(struct qrtr_ctrl_pkt *pkt)
+{
+ u32 checksum = 0;
+ u32 mask = 0xffff;
+ u16 upper_nb;
+ u16 lower_nb;
+ u32 *msg;
+ int i;
+
+ if (!pkt)
+ return checksum;
+ msg = (u32 *)pkt;
+
+ for (i = 0; i < sizeof(*pkt) / sizeof(*msg); i++) {
+ lower_nb = *msg & mask;
+ upper_nb = (*msg >> 16) & mask;
+ checksum += (upper_nb + lower_nb);
+ msg++;
+ }
+ while (checksum > 0xffff)
+ checksum = (checksum & mask) + ((checksum >> 16) & mask);
+
+ checksum = ~checksum & mask;
+
+ return checksum;
+}
+
+static void qrtr_fwd_del_proc(struct qrtr_node *src, unsigned int nid)
+{
+ struct sockaddr_qrtr from = {AF_QIPCRTR, 0, QRTR_PORT_CTRL};
+ struct sockaddr_qrtr to = {AF_QIPCRTR, 0, QRTR_PORT_CTRL};
+ struct qrtr_ctrl_pkt *pkt;
+ struct qrtr_node *dst;
+ struct sk_buff *skb;
+
+ list_for_each_entry(dst, &qrtr_all_epts, item) {
+ if (!qrtr_must_forward(src, dst, QRTR_TYPE_DEL_PROC))
+ continue;
+
+ skb = qrtr_alloc_ctrl_packet(&pkt);
+ if (!skb)
+ return;
+
+ pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_PROC);
+ pkt->proc.rsvd = QRTR_DEL_PROC_MAGIC;
+ pkt->proc.node = cpu_to_le32(nid);
+ pkt->proc.rsvd = cpu_to_le32(qrtr_calc_checksum(pkt));
+
+ from.sq_node = src->nid;
+ to.sq_node = dst->nid;
+ qrtr_node_enqueue(dst, skb, QRTR_TYPE_DEL_PROC, &from, &to, 0);
+ }
+}
+
+/**
+ * qrtr_endpoint_unregister - unregister endpoint
+ * @ep: endpoint to unregister
+ */
+void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
+{
+ struct radix_tree_iter iter;
+ struct qrtr_node *node = ep->node;
+ unsigned long flags;
+ void __rcu **slot;
+
+ mutex_lock(&node->ep_lock);
+ node->ep = NULL;
+ mutex_unlock(&node->ep_lock);
+
+ /* Notify the local controller about the event */
+ down_read(&qrtr_epts_lock);
+ spin_lock_irqsave(&qrtr_nodes_lock, flags);
+ radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
+ if (node != *slot)
+ continue;
+
+ spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
+
+ qrtr_notify_bye(iter.index);
+ qrtr_fwd_del_proc(node, iter.index);
+
+ spin_lock_irqsave(&qrtr_nodes_lock, flags);
+ }
+ spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
+ up_read(&qrtr_epts_lock);
+
+ /* Wake up any transmitters waiting for resume-tx from the node */
+ wake_up_interruptible_all(&node->resume_tx);
+ qrtr_log_resume_tx_node_erase(node->nid);
+ qrtr_node_release(node);
+ ep->node = NULL;
+}
+EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
+
+/* Lookup socket by port.
+ *
+ * Callers must release with qrtr_port_put()
+ */
+static struct qrtr_sock *qrtr_port_lookup(int port)
+{
+ struct qrtr_sock *ipc;
+ unsigned long flags;
+
+ if (port == QRTR_PORT_CTRL)
+ port = 0;
+
+ spin_lock_irqsave(&qrtr_port_lock, flags);
+ ipc = idr_find(&qrtr_ports, port);
+ if (ipc)
+ sock_hold(&ipc->sk);
+ spin_unlock_irqrestore(&qrtr_port_lock, flags);
+
+ return ipc;
+}
+
+/* Release acquired socket. */
+static void qrtr_port_put(struct qrtr_sock *ipc)
+{
+ sock_put(&ipc->sk);
+}
+
+static void qrtr_send_del_client(struct qrtr_sock *ipc)
+{
+ struct qrtr_ctrl_pkt *pkt;
+ struct sockaddr_qrtr to;
+ struct qrtr_node *node;
+ struct sk_buff *skbn;
+ struct sk_buff *skb;
+ int type = QRTR_TYPE_DEL_CLIENT;
+
+ skb = qrtr_alloc_ctrl_packet(&pkt);
+ if (!skb)
+ return;
+
+ to.sq_family = AF_QIPCRTR;
+ to.sq_node = QRTR_NODE_BCAST;
+ to.sq_port = QRTR_PORT_CTRL;
+
+ pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
+ pkt->client.node = cpu_to_le32(ipc->us.sq_node);
+ pkt->client.port = cpu_to_le32(ipc->us.sq_port);
+
+ qrtr_log_resume_tx(pkt->client.node, pkt->client.port,
+ RTX_REMOVE_RECORD);
+
+ skb_set_owner_w(skb, &ipc->sk);
+
+ if (ipc->state == QRTR_STATE_MULTI) {
+ qrtr_bcast_enqueue(NULL, skb, type, &ipc->us, &to, 0);
+ return;
+ }
+
+ if (ipc->state > QRTR_STATE_INIT) {
+ node = qrtr_node_lookup(ipc->state);
+ if (!node)
+ goto exit;
+
+ skbn = skb_clone(skb, GFP_KERNEL);
+ if (!skbn) {
+ qrtr_node_release(node);
+ goto exit;
+ }
+
+ skb_set_owner_w(skbn, &ipc->sk);
+ qrtr_node_enqueue(node, skbn, type, &ipc->us, &to, 0);
+ qrtr_node_release(node);
+ }
+exit:
+ qrtr_local_enqueue(NULL, skb, type, &ipc->us, &to, 0);
+}
+
+/* Remove port assignment. */
+static void qrtr_port_remove(struct qrtr_sock *ipc)
+{
+ int port = ipc->us.sq_port;
+ unsigned long flags;
+
+ qrtr_send_del_client(ipc);
+ if (port == QRTR_PORT_CTRL)
+ port = 0;
+
+ __sock_put(&ipc->sk);
+
+ spin_lock_irqsave(&qrtr_port_lock, flags);
+ idr_remove(&qrtr_ports, port);
+ spin_unlock_irqrestore(&qrtr_port_lock, flags);
+}
+
+/* Assign port number to socket.
+ *
+ * Specify port in the integer pointed to by port, and it will be adjusted
+ * on return as necesssary.
+ *
+ * Port may be:
+ * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
+ * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
+ * >QRTR_MIN_EPH_SOCKET: Specified; available to all
+ */
+static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
+{
+ int rc;
+
+ if (!*port) {
+ rc = idr_alloc_cyclic(&qrtr_ports, ipc, QRTR_MIN_EPH_SOCKET,
+ QRTR_MAX_EPH_SOCKET + 1, GFP_ATOMIC);
+ if (rc >= 0)
+ *port = rc;
+ } else if (*port < QRTR_MIN_EPH_SOCKET &&
+ !(capable(CAP_NET_ADMIN) ||
+ in_egroup_p(AID_VENDOR_QRTR) ||
+ in_egroup_p(GLOBAL_ROOT_GID))) {
+ rc = -EACCES;
+ } else if (*port == QRTR_PORT_CTRL) {
+ rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
+ } else {
+ rc = idr_alloc_cyclic(&qrtr_ports, ipc, *port, *port + 1,
+ GFP_ATOMIC);
+ if (rc >= 0)
+ *port = rc;
+ }
+
+ if (rc == -ENOSPC)
+ return -EADDRINUSE;
+ else if (rc < 0)
+ return rc;
+
+ sock_hold(&ipc->sk);
+
+ return 0;
+}
+
+/* Reset all non-control ports */
+static void qrtr_reset_ports(void)
+{
+ struct qrtr_sock *ipc;
+ int id;
+
+ idr_for_each_entry(&qrtr_ports, ipc, id) {
+ /* Don't reset control port */
+ if (id == 0)
+ continue;
+
+ sock_hold(&ipc->sk);
+ ipc->sk.sk_err = ENETRESET;
+ if (ipc->sk.sk_error_report)
+ ipc->sk.sk_error_report(&ipc->sk);
+ sock_put(&ipc->sk);
+ }
+}
+
+/* Bind socket to address.
+ *
+ * Socket should be locked upon call.
+ */
+static int __qrtr_bind(struct socket *sock,
+ const struct sockaddr_qrtr *addr, int zapped)
+{
+ struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+ struct sock *sk = sock->sk;
+ unsigned long flags;
+ int port;
+ int rc;
+
+ /* rebinding ok */
+ if (!zapped && addr->sq_port == ipc->us.sq_port)
+ return 0;
+
+ spin_lock_irqsave(&qrtr_port_lock, flags);
+ port = addr->sq_port;
+ rc = qrtr_port_assign(ipc, &port);
+ if (rc) {
+ spin_unlock_irqrestore(&qrtr_port_lock, flags);
+ return rc;
+ }
+ /* Notify all open ports about the new controller */
+ if (port == QRTR_PORT_CTRL)
+ qrtr_reset_ports();
+ spin_unlock_irqrestore(&qrtr_port_lock, flags);
+
+
+ if (port == QRTR_PORT_CTRL) {
+ struct qrtr_node *node;
+
+ down_write(&qrtr_epts_lock);
+ list_for_each_entry(node, &qrtr_all_epts, item) {
+ atomic_set(&node->hello_sent, 0);
+ atomic_set(&node->hello_rcvd, 0);
+ }
+ up_write(&qrtr_epts_lock);
+ }
+
+ /* unbind previous, if any */
+ if (!zapped)
+ qrtr_port_remove(ipc);
+ ipc->us.sq_port = port;
+ sock_reset_flag(sk, SOCK_ZAPPED);
+
+ return 0;
+}
+
+/* Auto bind to an ephemeral port. */
+static int qrtr_autobind(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_qrtr addr;
+
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ return 0;
+
+ addr.sq_family = AF_QIPCRTR;
+ addr.sq_node = qrtr_local_nid;
+ addr.sq_port = 0;
+
+ return __qrtr_bind(sock, &addr, 1);
+}
+
+/* Bind socket to specified sockaddr. */
+static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
+{
+ DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
+ struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+ struct sock *sk = sock->sk;
+ int rc;
+
+ if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
+ return -EINVAL;
+
+ if (addr->sq_node != ipc->us.sq_node)
+ return -EINVAL;
+
+ lock_sock(sk);
+ rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
+ release_sock(sk);
+
+ return rc;
+}
+
+/* Queue packet to local peer socket. */
+static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
+ int type, struct sockaddr_qrtr *from,
+ struct sockaddr_qrtr *to, unsigned int flags)
+{
+ struct qrtr_sock *ipc;
+ struct qrtr_cb *cb;
+ struct sock *sk = skb->sk;
+
+ ipc = qrtr_port_lookup(to->sq_port);
+ if (!ipc && to->sq_port == QRTR_PORT_CTRL) {
+ kfree_skb(skb);
+ return 0;
+ }
+ if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
+ if (ipc)
+ qrtr_port_put(ipc);
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+ /* Keep resetting NETRESET until socket is closed */
+ if (sk && sk->sk_err == ENETRESET) {
+ sock_hold(sk);
+ sk->sk_err = ENETRESET;
+ if (sk->sk_error_report)
+ sk->sk_error_report(sk);
+ sock_put(sk);
+ kfree_skb(skb);
+ return 0;
+ }
+
+ cb = (struct qrtr_cb *)skb->cb;
+ cb->src_node = from->sq_node;
+ cb->src_port = from->sq_port;
+
+ if (sock_queue_rcv_skb(&ipc->sk, skb)) {
+ qrtr_port_put(ipc);
+ kfree_skb(skb);
+ return -ENOSPC;
+ }
+
+ qrtr_port_put(ipc);
+
+ return 0;
+}
+
+/* Queue packet for broadcast. */
+static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
+ int type, struct sockaddr_qrtr *from,
+ struct sockaddr_qrtr *to, unsigned int flags)
+{
+ struct sk_buff *skbn;
+
+ down_read(&qrtr_epts_lock);
+ list_for_each_entry(node, &qrtr_all_epts, item) {
+ if (node->nid == QRTR_EP_NID_AUTO && type != QRTR_TYPE_HELLO)
+ continue;
+
+ skbn = skb_clone(skb, GFP_KERNEL);
+ if (!skbn)
+ break;
+ skb_set_owner_w(skbn, skb->sk);
+ qrtr_node_enqueue(node, skbn, type, from, to, flags);
+ }
+ up_read(&qrtr_epts_lock);
+
+ qrtr_local_enqueue(NULL, skb, type, from, to, flags);
+
+ return 0;
+}
+
+static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+{
+ DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
+ int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int,
+ struct sockaddr_qrtr *, struct sockaddr_qrtr *,
+ unsigned int);
+ __le32 qrtr_type = cpu_to_le32(QRTR_TYPE_DATA);
+ struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+ struct sock *sk = sock->sk;
+ struct qrtr_ctrl_pkt pkt;
+ struct qrtr_node *node;
+ struct qrtr_node *srv_node;
+ struct sk_buff *skb;
+ size_t plen;
+ u32 type;
+ int rc;
+
+ if (msg->msg_flags & ~(MSG_DONTWAIT))
+ return -EINVAL;
+
+ if (len > 65535)
+ return -EMSGSIZE;
+
+ lock_sock(sk);
+
+ if (addr) {
+ if (msg->msg_namelen < sizeof(*addr)) {
+ release_sock(sk);
+ return -EINVAL;
+ }
+
+ if (addr->sq_family != AF_QIPCRTR) {
+ release_sock(sk);
+ return -EINVAL;
+ }
+
+ rc = qrtr_autobind(sock);
+ if (rc) {
+ release_sock(sk);
+ return rc;
+ }
+ } else if (sk->sk_state == TCP_ESTABLISHED) {
+ addr = &ipc->peer;
+ } else {
+ release_sock(sk);
+ return -ENOTCONN;
+ }
+
+ node = NULL;
+ srv_node = NULL;
+ if (addr->sq_node == QRTR_NODE_BCAST) {
+ if (addr->sq_port != QRTR_PORT_CTRL &&
+ qrtr_local_nid != QRTR_NODE_BCAST) {
+ release_sock(sk);
+ return -ENOTCONN;
+ }
+ enqueue_fn = qrtr_bcast_enqueue;
+ } else if (addr->sq_node == ipc->us.sq_node) {
+ enqueue_fn = qrtr_local_enqueue;
+ } else {
+ node = qrtr_node_lookup(addr->sq_node);
+ if (!node) {
+ release_sock(sk);
+ return -ECONNRESET;
+ }
+ enqueue_fn = qrtr_node_enqueue;
+ if (ipc->state > QRTR_STATE_INIT && ipc->state != node->nid)
+ ipc->state = QRTR_STATE_MULTI;
+ else if (ipc->state == QRTR_STATE_INIT)
+ ipc->state = node->nid;
+ }
+
+ plen = (len + 3) & ~3;
+ skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE,
+ msg->msg_flags & MSG_DONTWAIT, &rc);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto out_node;
+ }
+
+ skb_reserve(skb, QRTR_HDR_MAX_SIZE);
+
+ rc = memcpy_from_msg(skb_put(skb, len), msg, len);
+ if (rc) {
+ kfree_skb(skb);
+ goto out_node;
+ }
+
+ if (ipc->us.sq_port == QRTR_PORT_CTRL ||
+ addr->sq_port == QRTR_PORT_CTRL) {
+ if (len < 4) {
+ rc = -EINVAL;
+ kfree_skb(skb);
+ goto out_node;
+ }
+
+ /* control messages already require the type as 'command' */
+ skb_copy_bits(skb, 0, &qrtr_type, 4);
+ }
+
+ type = le32_to_cpu(qrtr_type);
+ if (addr->sq_port == QRTR_PORT_CTRL && type == QRTR_TYPE_NEW_SERVER) {
+ ipc->state = QRTR_STATE_MULTI;
+
+ /* drop new server cmds that are not forwardable to dst node*/
+ skb_copy_bits(skb, 0, &pkt, sizeof(pkt));
+ srv_node = qrtr_node_lookup(pkt.server.node);
+ if (!qrtr_must_forward(srv_node, node, type)) {
+ rc = 0;
+ kfree_skb(skb);
+ qrtr_node_release(srv_node);
+ goto out_node;
+ }
+ qrtr_node_release(srv_node);
+ }
+
+ rc = enqueue_fn(node, skb, type, &ipc->us, addr, msg->msg_flags);
+ if (rc >= 0)
+ rc = len;
+
+out_node:
+ qrtr_node_release(node);
+ release_sock(sk);
+
+ return rc;
+}
+
+static int qrtr_send_resume_tx(struct qrtr_cb *cb)
+{
+ struct sockaddr_qrtr remote = { AF_QIPCRTR, cb->src_node, cb->src_port };
+ struct sockaddr_qrtr local = { AF_QIPCRTR, cb->dst_node, cb->dst_port };
+ struct qrtr_ctrl_pkt *pkt;
+ struct qrtr_node *node;
+ struct sk_buff *skb;
+ int ret;
+
+ node = qrtr_node_lookup(remote.sq_node);
+ if (!node)
+ return -EINVAL;
+
+ skb = qrtr_alloc_ctrl_packet(&pkt);
+ if (!skb) {
+ qrtr_log_resume_tx(cb->src_node, cb->src_port,
+ RTX_CTRL_SKB_ALLOC_FAIL);
+ qrtr_node_release(node);
+ return -ENOMEM;
+ }
+
+ pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
+ pkt->client.node = cpu_to_le32(cb->dst_node);
+ pkt->client.port = cpu_to_le32(cb->dst_port);
+
+ ret = qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX,
+ &local, &remote, 0);
+ qrtr_log_resume_tx(cb->src_node, cb->src_port, RTX_SENT_ACK);
+ qrtr_node_release(node);
+
+ return ret;
+}
+
+static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
+{
+ DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ struct qrtr_cb *cb;
+ int copied, rc;
+
+
+ if (sock_flag(sk, SOCK_ZAPPED))
+ return -EADDRNOTAVAIL;
+
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &rc);
+ if (!skb)
+ return rc;
+
+ lock_sock(sk);
+ cb = (struct qrtr_cb *)skb->cb;
+
+ copied = skb->len;
+ if (copied > size) {
+ copied = size;
+ msg->msg_flags |= MSG_TRUNC;
+ }
+
+ rc = skb_copy_datagram_msg(skb, 0, msg, copied);
+ if (rc < 0)
+ goto out;
+ rc = copied;
+
+ if (addr) {
+ /* There is an anonymous 2-byte hole after sq_family,
+ * make sure to clear it.
+ */
+ memset(addr, 0, sizeof(*addr));
+
+ addr->sq_family = AF_QIPCRTR;
+ addr->sq_node = cb->src_node;
+ addr->sq_port = cb->src_port;
+ msg->msg_namelen = sizeof(*addr);
+ }
+
+out:
+ if (cb->confirm_rx)
+ qrtr_send_resume_tx(cb);
+
+ skb_free_datagram(sk, skb);
+ release_sock(sk);
+
+ return rc;
+}
+
+static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
+ int len, int flags)
+{
+ DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
+ struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+ struct sock *sk = sock->sk;
+ int rc;
+
+ if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ sk->sk_state = TCP_CLOSE;
+ sock->state = SS_UNCONNECTED;
+
+ rc = qrtr_autobind(sock);
+ if (rc) {
+ release_sock(sk);
+ return rc;
+ }
+
+ ipc->peer = *addr;
+ sock->state = SS_CONNECTED;
+ sk->sk_state = TCP_ESTABLISHED;
+
+ release_sock(sk);
+
+ return 0;
+}
+
+static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
+ int peer)
+{
+ struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+ struct sockaddr_qrtr qaddr;
+ struct sock *sk = sock->sk;
+
+ lock_sock(sk);
+ if (peer) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ release_sock(sk);
+ return -ENOTCONN;
+ }
+
+ qaddr = ipc->peer;
+ } else {
+ qaddr = ipc->us;
+ }
+ release_sock(sk);
+
+ qaddr.sq_family = AF_QIPCRTR;
+
+ memcpy(saddr, &qaddr, sizeof(qaddr));
+
+ return sizeof(qaddr);
+}
+
+static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+ struct sock *sk = sock->sk;
+ struct sockaddr_qrtr *sq;
+ struct sk_buff *skb;
+ struct ifreq ifr;
+ long len = 0;
+ int rc = 0;
+
+ lock_sock(sk);
+
+ switch (cmd) {
+ case TIOCOUTQ:
+ len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
+ if (len < 0)
+ len = 0;
+ rc = put_user(len, (int __user *)argp);
+ break;
+ case TIOCINQ:
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb)
+ len = skb->len;
+ rc = put_user(len, (int __user *)argp);
+ break;
+ case SIOCGIFADDR:
+ if (copy_from_user(&ifr, argp, sizeof(ifr))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
+ *sq = ipc->us;
+ if (copy_to_user(argp, &ifr, sizeof(ifr))) {
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ case SIOCADDRT:
+ case SIOCDELRT:
+ case SIOCSIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCSIFNETMASK:
+ rc = -EINVAL;
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+
+ release_sock(sk);
+
+ return rc;
+}
+
+static int qrtr_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct qrtr_sock *ipc;
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+
+ ipc = qrtr_sk(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_state_change(sk);
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ qrtr_port_remove(ipc);
+
+ skb_queue_purge(&sk->sk_receive_queue);
+
+ release_sock(sk);
+ sock_put(sk);
+
+ return 0;
+}
+
+static const struct proto_ops qrtr_proto_ops = {
+ .owner = THIS_MODULE,
+ .family = AF_QIPCRTR,
+ .bind = qrtr_bind,
+ .connect = qrtr_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .listen = sock_no_listen,
+ .sendmsg = qrtr_sendmsg,
+ .recvmsg = qrtr_recvmsg,
+ .getname = qrtr_getname,
+ .ioctl = qrtr_ioctl,
+ .gettstamp = sock_gettstamp,
+ .poll = datagram_poll,
+ .shutdown = sock_no_shutdown,
+ .release = qrtr_release,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+static struct proto qrtr_proto = {
+ .name = "QIPCRTR",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct qrtr_sock),
+};
+
+static int qrtr_create(struct net *net, struct socket *sock,
+ int protocol, int kern)
+{
+ struct qrtr_sock *ipc;
+ struct sock *sk;
+
+ if (sock->type != SOCK_DGRAM)
+ return -EPROTOTYPE;
+
+ sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
+ if (!sk)
+ return -ENOMEM;
+
+ sock_set_flag(sk, SOCK_ZAPPED);
+
+ sock_init_data(sock, sk);
+ sock->ops = &qrtr_proto_ops;
+
+ ipc = qrtr_sk(sk);
+ ipc->us.sq_family = AF_QIPCRTR;
+ ipc->us.sq_node = qrtr_local_nid;
+ ipc->us.sq_port = 0;
+ ipc->state = QRTR_STATE_INIT;
+
+ return 0;
+}
+
+static const struct net_proto_family qrtr_family = {
+ .owner = THIS_MODULE,
+ .family = AF_QIPCRTR,
+ .create = qrtr_create,
+};
+
+static int __init qrtr_proto_init(void)
+{
+ int rc;
+
+ rc = proto_register(&qrtr_proto, 1);
+ if (rc)
+ return rc;
+
+ rc = sock_register(&qrtr_family);
+ if (rc) {
+ proto_unregister(&qrtr_proto);
+ return rc;
+ }
+
+ qrtr_ns_init();
+
+ qrtr_backup_init();
+ qrtr_debug_init();
+
+ return rc;
+}
+postcore_initcall(qrtr_proto_init);
+
+static void __exit qrtr_proto_fini(void)
+{
+ qrtr_ns_remove();
+ sock_unregister(qrtr_family.family);
+ proto_unregister(&qrtr_proto);
+
+ qrtr_backup_deinit();
+ qrtr_debug_remove();
+}
+module_exit(qrtr_proto_fini);
+
+MODULE_DESCRIPTION("Qualcomm IPC-router driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_NETPROTO(PF_QIPCRTR);
diff --git a/qrtr/debug.c b/qrtr/debug.c
new file mode 100644
index 0000000..470c27f
--- /dev/null
+++ b/qrtr/debug.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/xarray.h>
+#include <linux/list.h>
+
+#include "debug.h"
+
+static LIST_HEAD(rtx_pkt_list);
+static DEFINE_SPINLOCK(rtx_pkt_list_lock);
+
+static DEFINE_XARRAY(rtx_records);
+static DEFINE_MUTEX(rtx_records_lock);
+
+static struct work_struct rtx_work;
+
+#define QRTR_FLAGS_CONFIRM_RX BIT(0)
+
+#define QRTR_PROTO_VER_1 1
+#define QRTR_PROTO_VER_2 3
+
+struct qrtr_hdr_v1 {
+ __le32 version;
+ __le32 type;
+ __le32 src_node_id;
+ __le32 src_port_id;
+ __le32 confirm_rx;
+ __le32 size;
+ __le32 dst_node_id;
+ __le32 dst_port_id;
+} __packed;
+
+struct qrtr_hdr_v2 {
+ u8 version;
+ u8 type;
+ u8 flags;
+ u8 optlen;
+ __le32 size;
+ __le16 src_node_id;
+ __le16 src_port_id;
+ __le16 dst_node_id;
+ __le16 dst_port_id;
+};
+
+struct qrtr_rtx_record {
+ u8 state;
+ unsigned long key;
+ struct timespec64 time;
+};
+
+struct qrtr_rtx_pkt {
+ u8 state;
+ unsigned long key;
+ struct list_head item;
+};
+
+void qrtr_log_resume_tx_node_erase(unsigned int node_id)
+{
+ unsigned long index;
+ struct qrtr_rtx_record *record;
+
+ mutex_lock(&rtx_records_lock);
+ xa_for_each(&rtx_records, index, record) {
+ if ((record->key >> 32) == node_id &&
+ record->state != RTX_UNREG_NODE) {
+ xa_erase(&rtx_records, record->key);
+ kfree(record);
+ }
+ }
+ mutex_unlock(&rtx_records_lock);
+
+ qrtr_log_resume_tx(node_id, 0, RTX_UNREG_NODE);
+}
+EXPORT_SYMBOL(qrtr_log_resume_tx_node_erase);
+
+static void qrtr_update_record(unsigned long key, u8 state)
+{
+ struct qrtr_rtx_record *record;
+
+ mutex_lock(&rtx_records_lock);
+ record = xa_load(&rtx_records, key);
+ if (!record) {
+ record = kzalloc(sizeof(*record), GFP_KERNEL);
+ if (!record) {
+ mutex_unlock(&rtx_records_lock);
+ return;
+ }
+
+ record->key = key;
+ record->state = state;
+ ktime_get_ts64(&record->time);
+ xa_store(&rtx_records, record->key, record, GFP_KERNEL);
+ mutex_unlock(&rtx_records_lock);
+ return;
+ }
+
+ if (record->state == RTX_REMOVE_RECORD) {
+ xa_erase(&rtx_records, record->key);
+ mutex_unlock(&rtx_records_lock);
+ kfree(record);
+ return;
+ }
+
+ record->state = state;
+ ktime_get_ts64(&record->time);
+ mutex_unlock(&rtx_records_lock);
+}
+
+static void qrtr_rtx_work(struct work_struct *work)
+{
+ struct qrtr_rtx_pkt *pkt, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtx_pkt_list_lock, flags);
+ list_for_each_entry_safe(pkt, tmp, &rtx_pkt_list, item) {
+ list_del(&pkt->item);
+ spin_unlock_irqrestore(&rtx_pkt_list_lock, flags);
+ qrtr_update_record(pkt->key, pkt->state);
+ kfree(pkt);
+ spin_lock_irqsave(&rtx_pkt_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&rtx_pkt_list_lock, flags);
+}
+
+int qrtr_log_resume_tx(unsigned int node_id,
+ unsigned int port_id, u8 state)
+{
+ struct qrtr_rtx_pkt *pkt;
+ unsigned long flags;
+
+ pkt = kzalloc(sizeof(*pkt), GFP_ATOMIC);
+ if (!pkt)
+ return -ENOMEM;
+
+ pkt->state = state;
+ pkt->key = ((u64)node_id << 32 | port_id);
+
+ spin_lock_irqsave(&rtx_pkt_list_lock, flags);
+ list_add(&pkt->item, &rtx_pkt_list);
+ spin_unlock_irqrestore(&rtx_pkt_list_lock, flags);
+
+ schedule_work(&rtx_work);
+ return 0;
+}
+EXPORT_SYMBOL(qrtr_log_resume_tx);
+
+void qrtr_log_skb_failure(const void *data, size_t len)
+{
+ const struct qrtr_hdr_v1 *v1;
+ const struct qrtr_hdr_v2 *v2;
+ bool confirm_rx = false;
+ unsigned int node_id;
+ unsigned int port_id;
+ unsigned int ver;
+
+ ver = *(u8 *)data;
+ if (ver == QRTR_PROTO_VER_1 && len > sizeof(*v1)) {
+ v1 = data;
+ if (v1->confirm_rx) {
+ node_id = v1->src_port_id;
+ port_id = v1->src_port_id;
+ confirm_rx = true;
+ }
+ } else if (ver == QRTR_PROTO_VER_2 && len > sizeof(*v2)) {
+ v2 = data;
+ if (v2->flags & QRTR_FLAGS_CONFIRM_RX) {
+ node_id = v2->src_node_id;
+ port_id = v2->src_port_id;
+ confirm_rx = true;
+ }
+ } else {
+ pr_err("%s: Invalid version %d\n", __func__, ver);
+ }
+
+ if (confirm_rx)
+ qrtr_log_resume_tx(node_id, port_id,
+ RTX_SKB_ALLOC_FAIL);
+}
+EXPORT_SYMBOL(qrtr_log_skb_failure);
+
+void qrtr_debug_init(void)
+{
+ INIT_WORK(&rtx_work, qrtr_rtx_work);
+}
+EXPORT_SYMBOL(qrtr_debug_init);
+
+void qrtr_debug_remove(void)
+{
+ cancel_work_sync(&rtx_work);
+}
+EXPORT_SYMBOL(qrtr_debug_remove);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. QRTR debug");
+MODULE_LICENSE("GPL v2");
+
diff --git a/qrtr/debug.h b/qrtr/debug.h
new file mode 100644
index 0000000..2ca5c71
--- /dev/null
+++ b/qrtr/debug.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __QRTR_DEBUG_H_
+#define __QRTR_DEBUG_H_
+
+#include <linux/types.h>
+
+enum {
+ RTX_REMOVE_RECORD = 0xFF,
+ RTX_SKB_ALLOC_FAIL = 0xAA,
+ RTX_SKB_ALLOC_SUCC = 0xBB,
+ RTX_SENT_ACK = 0xCC,
+ RTX_CTRL_SKB_ALLOC_FAIL = 0xDD,
+ RTX_UNREG_NODE = 0xEE,
+};
+
+#if IS_ENABLED(CONFIG_QRTR_DEBUG)
+
+void qrtr_debug_init(void);
+
+void qrtr_debug_remove(void);
+
+void qrtr_log_resume_tx_node_erase(unsigned int node_id);
+
+int qrtr_log_resume_tx(unsigned int node_id,
+ unsigned int port_id, u8 state);
+
+void qrtr_log_skb_failure(const void *data, size_t len);
+
+#else
+
+static inline void qrtr_debug_init(void) { }
+
+static inline void qrtr_debug_remove(void) { }
+
+static inline void qrtr_log_resume_tx_node_erase(unsigned int node_id) { }
+
+static inline int qrtr_log_resume_tx(unsigned int node_id,
+ unsigned int port_id, u8 state)
+{
+ return 0;
+}
+
+static inline void qrtr_log_skb_failure(const void *data, size_t len) { }
+
+#endif
+
+#endif
+