summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Hong <rurumihong@google.com>2020-02-03 16:14:11 +0800
committerAlex Hong <rurumihong@google.com>2020-02-03 16:14:16 +0800
commitd7cab489e6199dde86b59e9c8daaad2149311557 (patch)
treebb17fbbdd277a38f9da33c5915276be972d7c219
parenta92a4f004da43807b3c6d38032764bc843541167 (diff)
parenta407b46367aa6c80435b8ccafd941d4222e333a9 (diff)
downloaddata-kernel-d7cab489e6199dde86b59e9c8daaad2149311557.tar.gz
Merge branch 'LA.UM.8.1.C9.09.00.00.518.406' via branch 'qcom-msm-4.14' into android-msm-sunfish-4.14android-10.0.0_r0.87android-10.0.0_r0.85android-10.0.0_r0.83android-msm-sunfish-4.14-android10-d4
Bug: 148752159 Change-Id: I3d2e69488d7684d528d12f3c950f45601f74dc34 Signed-off-by: Alex Hong <rurumihong@google.com>
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c13
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c12
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c101
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c21
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c4
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h7
-rw-r--r--drivers/rmnet/shs/rmnet_shs.h4
-rw-r--r--drivers/rmnet/shs/rmnet_shs_config.c6
-rw-r--r--drivers/rmnet/shs/rmnet_shs_config.h3
-rwxr-xr-xdrivers/rmnet/shs/rmnet_shs_main.c39
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.c48
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.h7
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_genl.c4
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_genl.h3
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_mem.c403
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_mem.h29
16 files changed, 569 insertions, 135 deletions
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c
index 7ce0a31..b262b23 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c
@@ -3126,7 +3126,7 @@ static INT set_promiscuous_mode(void)
static INT write_phy_regs(INT phy_id, INT phy_reg, INT phy_reg_data)
{
- ULONG RETRYCOUNT = 1000;
+ ULONG RETRYCOUNT = 5000;
ULONG vy_count;
volatile ULONG VARMAC_GMIIAR;
@@ -3139,7 +3139,7 @@ static INT write_phy_regs(INT phy_id, INT phy_reg, INT phy_reg_data)
return -Y_FAILURE;
vy_count++;
- mdelay(1);
+ udelay(200);
MAC_GMIIAR_RGRD(VARMAC_GMIIAR);
if (GET_VALUE(
@@ -3173,7 +3173,7 @@ static INT write_phy_regs(INT phy_id, INT phy_reg, INT phy_reg_data)
return -Y_FAILURE;
vy_count++;
- mdelay(1);
+ udelay(200);
MAC_GMIIAR_RGRD(VARMAC_GMIIAR);
if (GET_VALUE(
@@ -3197,7 +3197,7 @@ static INT write_phy_regs(INT phy_id, INT phy_reg, INT phy_reg_data)
static INT read_phy_regs(INT phy_id, INT phy_reg, INT *phy_reg_data)
{
- ULONG RETRYCOUNT = 1000;
+ ULONG RETRYCOUNT = 5000;
ULONG vy_count;
volatile ULONG VARMAC_GMIIAR;
ULONG VARMAC_GMIIDR;
@@ -3211,8 +3211,7 @@ static INT read_phy_regs(INT phy_id, INT phy_reg, INT *phy_reg_data)
return -Y_FAILURE;
vy_count++;
- mdelay(1);
-
+ udelay(200);
MAC_GMIIAR_RGRD(VARMAC_GMIIAR);
if (GET_VALUE(
VARMAC_GMIIAR, MAC_GMIIAR_GB_LPOS,
@@ -3243,7 +3242,7 @@ static INT read_phy_regs(INT phy_id, INT phy_reg, INT *phy_reg_data)
return -Y_FAILURE;
vy_count++;
- mdelay(1);
+ udelay(200);
MAC_GMIIAR_RGRD(VARMAC_GMIIAR);
if (GET_VALUE(
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c
index fda72a8..9fc79e7 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c
@@ -4961,11 +4961,11 @@ static VOID DWC_ETH_QOS_config_timer_registers(
DBGPR("-->DWC_ETH_QOS_config_timer_registers\n");
- pdata->ptpclk_freq = DWC_ETH_QOS_DEFAULT_PTP_CLOCK;
+ pdata->ptpclk_freq = pdata->default_ptp_clock;
/* program default addend */
- hw_if->config_default_addend(pdata, DWC_ETH_QOS_DEFAULT_PTP_CLOCK);
+ hw_if->config_default_addend(pdata, pdata->default_ptp_clock);
/* program Sub Second Increment Reg */
- hw_if->config_sub_second_increment(DWC_ETH_QOS_DEFAULT_PTP_CLOCK);
+ hw_if->config_sub_second_increment(pdata->default_ptp_clock);
/* initialize system time */
getnstimeofday(&now);
hw_if->init_systime(now.tv_sec, now.tv_nsec);
@@ -5269,7 +5269,7 @@ int ETH_PPSOUT_Config(struct DWC_ETH_QOS_prv_data *pdata, struct ifr_data_struct
will change & We will not see 19.2Mhz for PPS0.
*/
if (pdata->res_data->pps_lpass_conn_en ) {
- eth_pps_cfg->ptpclk_freq = DWC_ETH_QOS_DEFAULT_PTP_CLOCK;
+ eth_pps_cfg->ptpclk_freq = pdata->default_ptp_clock;
EMACDBG("using default ptp clock \n");
}
@@ -6040,10 +6040,10 @@ static int DWC_ETH_QOS_handle_hwtstamp_ioctl(struct DWC_ETH_QOS_prv_data *pdata,
hw_if->config_hw_time_stamping(VARMAC_TCR);
/* program default addend */
- hw_if->config_default_addend(pdata, DWC_ETH_QOS_DEFAULT_PTP_CLOCK);
+ hw_if->config_default_addend(pdata, pdata->default_ptp_clock);
/* program Sub Second Increment Reg */
- hw_if->config_sub_second_increment(DWC_ETH_QOS_DEFAULT_PTP_CLOCK);
+ hw_if->config_sub_second_increment(pdata->default_ptp_clock);
/* initialize system time */
getnstimeofday(&now);
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c
index faac49e..defeef8 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c
@@ -445,6 +445,43 @@ void dump_phy_registers(struct DWC_ETH_QOS_prv_data *pdata)
pr_alert("\n****************************************************\n");
}
+static void DWC_ETH_QOS_request_phy_wol(struct DWC_ETH_QOS_prv_data *pdata)
+{
+ pdata->phy_wol_supported = 0;
+ pdata->phy_wol_wolopts = 0;
+
+ /* Check if phydev is valid*/
+ /* Check and enable Wake-on-LAN functionality in PHY*/
+ if (pdata->phydev) {
+ struct ethtool_wolinfo wol = {.cmd = ETHTOOL_GWOL};
+ wol.supported = 0;
+ wol.wolopts= 0;
+
+ phy_ethtool_get_wol(pdata->phydev, &wol);
+ pdata->phy_wol_supported = wol.supported;
+
+ /* Try to enable supported Wake-on-LAN features in PHY*/
+ if (wol.supported) {
+
+ device_set_wakeup_capable(&pdata->pdev->dev, 1);
+
+ wol.cmd = ETHTOOL_SWOL;
+ wol.wolopts = wol.supported;
+
+ if (!phy_ethtool_set_wol(pdata->phydev, &wol)){
+ pdata->phy_wol_wolopts = wol.wolopts;
+
+ enable_irq_wake(pdata->phy_irq);
+
+ device_set_wakeup_enable(&pdata->pdev->dev, 1);
+ EMACDBG("Enabled WoL[0x%x] in %s\n", wol.wolopts,
+ pdata->phydev->drv->name);
+ pdata->wol_enabled = 1;
+ }
+ }
+ }
+}
+
/*!
* \brief API to enable or disable PHY hibernation mode
*
@@ -1007,6 +1044,9 @@ void DWC_ETH_QOS_adjust_link(struct net_device *dev)
}
#endif
+ if (pdata->phy_intr_en && !pdata->wol_enabled)
+ DWC_ETH_QOS_request_phy_wol(pdata);
+
if (pdata->ipa_enabled && netif_running(dev)) {
if (phydev->link == 1)
DWC_ETH_QOS_ipa_offload_event_handler(pdata, EV_PHY_LINK_UP);
@@ -1030,42 +1070,6 @@ void DWC_ETH_QOS_adjust_link(struct net_device *dev)
DBGPR_MDIO("<--DWC_ETH_QOS_adjust_link\n");
}
-static void DWC_ETH_QOS_request_phy_wol(struct DWC_ETH_QOS_prv_data *pdata)
-{
- pdata->phy_wol_supported = 0;
- pdata->phy_wol_wolopts = 0;
-
- /* Check if phydev is valid*/
- /* Check and enable Wake-on-LAN functionality in PHY*/
- if (pdata->phydev) {
- struct ethtool_wolinfo wol = {.cmd = ETHTOOL_GWOL};
- wol.supported = 0;
- wol.wolopts= 0;
-
- phy_ethtool_get_wol(pdata->phydev, &wol);
- pdata->phy_wol_supported = wol.supported;
-
- /* Try to enable supported Wake-on-LAN features in PHY*/
- if (wol.supported) {
-
- device_set_wakeup_capable(&pdata->pdev->dev, 1);
-
- wol.cmd = ETHTOOL_SWOL;
- wol.wolopts = wol.supported;
-
- if (!phy_ethtool_set_wol(pdata->phydev, &wol)){
- pdata->phy_wol_wolopts = wol.wolopts;
-
- enable_irq_wake(pdata->phy_irq);
-
- device_set_wakeup_enable(&pdata->pdev->dev, 1);
- EMACDBG("Enabled WoL[0x%x] in %s\n", wol.wolopts,
- pdata->phydev->drv->name);
- }
- }
- }
-}
-
bool DWC_ETH_QOS_is_phy_link_up(struct DWC_ETH_QOS_prv_data *pdata)
{
/* PHY driver initializes phydev->link=1.
@@ -1194,10 +1198,8 @@ static int DWC_ETH_QOS_init_phy(struct net_device *dev)
phydev->irq = PHY_IGNORE_INTERRUPT;
phydev->interrupts = PHY_INTERRUPT_ENABLED;
- if (phydev->drv->config_intr &&
- !phydev->drv->config_intr(phydev)){
- DWC_ETH_QOS_request_phy_wol(pdata);
- } else {
+ if (!(phydev->drv->config_intr &&
+ !phydev->drv->config_intr(phydev))){
EMACERR("Failed to configure PHY interrupts");
BUG();
}
@@ -1250,6 +1252,21 @@ int DWC_ETH_QOS_mdio_register(struct net_device *dev)
DBGPR_MDIO("-->DWC_ETH_QOS_mdio_register\n");
+ if (pdata->res_data->phy_addr != -1) {
+ phy_reg_read_status =
+ DWC_ETH_QOS_mdio_read_direct(pdata, pdata->res_data->phy_addr, MII_BMSR,
+ &mii_status);
+ if (phy_reg_read_status == 0) {
+ if (mii_status != 0x0000 && mii_status != 0xffff) {
+ phy_detected = 1;
+ phyaddr = pdata->res_data->phy_addr;
+ EMACINFO("skip_phy_detection (phyaddr)%d\n", phyaddr);
+ goto skip_phy_detection;
+ } else
+ EMACERR("Invlaid phy address specified in device tree\n");
+ }
+ }
+
/* find the phy ID or phy address which is connected to our MAC */
for (phyaddr = 0; phyaddr < 32; phyaddr++) {
@@ -1276,6 +1293,8 @@ int DWC_ETH_QOS_mdio_register(struct net_device *dev)
return -ENOLINK;
}
+ skip_phy_detection:
+
pdata->phyaddr = phyaddr;
pdata->bus_id = 0x1;
pdata->phy_intr_en = false;
@@ -1307,7 +1326,7 @@ int DWC_ETH_QOS_mdio_register(struct net_device *dev)
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", new_bus->name,
pdata->bus_id);
new_bus->priv = dev;
- new_bus->phy_mask = 0;
+ new_bus->phy_mask = ~(1 << phyaddr);
new_bus->parent = &pdata->pdev->dev;
ret = mdiobus_register(new_bus);
if (ret != 0) {
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c
index 9102457..877a703 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c
@@ -899,6 +899,16 @@ static int DWC_ETH_QOS_get_dts_config(struct platform_device *pdev)
dwc_eth_qos_res_data.is_pinctrl_names = true;
EMACDBG("qcom,pinctrl-names present\n");
}
+ dwc_eth_qos_res_data.phy_addr = -1;
+ if (of_property_read_bool(pdev->dev.of_node, "emac-phy-addr")) {
+ ret = of_property_read_u32(pdev->dev.of_node, "emac-phy-addr",
+ &dwc_eth_qos_res_data.phy_addr);
+ if (ret) {
+ EMACINFO("Pphy_addr not specified, using dynamic phy detection\n");
+ dwc_eth_qos_res_data.phy_addr = -1;
+ }
+ EMACINFO("phy_addr = %d\n", dwc_eth_qos_res_data.phy_addr);
+ }
return ret;
@@ -1470,6 +1480,8 @@ static int DWC_ETH_QOS_init_gpios(struct device *dev)
gpio_set_value(dwc_eth_qos_res_data.gpio_phy_reset, PHY_RESET_GPIO_HIGH);
EMACDBG("PHY is out of reset successfully\n");
+ /* Add delay of 50ms so that phy should get sufficient time*/
+ mdelay(50);
}
return ret;
@@ -1829,10 +1841,17 @@ static int DWC_ETH_QOS_configure_netdevice(struct platform_device *pdev)
DWC_ETH_QOS_init_rx_coalesce(pdata);
+ if (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_2_0 )
+ pdata->default_ptp_clock = DWC_ETH_QOS_PTP_CLOCK_57_6;
+ else if (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_2 || dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_3_1)
+ pdata->default_ptp_clock = DWC_ETH_QOS_PTP_CLOCK_96;
+ else if (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_3_2 )
+ pdata->default_ptp_clock = DWC_ETH_QOS_PTP_CLOCK_62_5;
+
#ifdef DWC_ETH_QOS_CONFIG_PTP
DWC_ETH_QOS_ptp_init(pdata);
/*default ptp clock frequency set to 50Mhz*/
- pdata->ptpclk_freq = DWC_ETH_QOS_DEFAULT_PTP_CLOCK;
+ pdata->ptpclk_freq = pdata->default_ptp_clock;
#endif /* end of DWC_ETH_QOS_CONFIG_PTP */
#endif /* end of DWC_ETH_QOS_CONFIG_PGTEST */
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c
index 1bcead3..2f40f55 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -318,7 +318,7 @@ int DWC_ETH_QOS_ptp_init(struct DWC_ETH_QOS_prv_data *pdata)
if (pdata->res_data->pps_lpass_conn_en) {
/*Configuring PPS0 PPS output frequency to defualt 19.2 Mhz*/
eth_pps_cfg.ppsout_ch = 0;
- eth_pps_cfg.ptpclk_freq = DWC_ETH_QOS_DEFAULT_PTP_CLOCK;
+ eth_pps_cfg.ptpclk_freq = pdata->default_ptp_clock;
eth_pps_cfg.ppsout_freq = DWC_ETH_QOS_DEFAULT_LPASS_PPS_FREQUENCY;
eth_pps_cfg.ppsout_start = 1;
eth_pps_cfg.ppsout_duty = 50;
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h b/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h
index ee29121..22ad45a 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h
@@ -427,7 +427,9 @@ extern void *ipc_emac_log_ctxt;
#define DWC_ETH_QOS_SYSCLOCK 250000000 /* System clock is 250MHz */
#define DWC_ETH_QOS_SYSTIMEPERIOD 4 /* System time period is 4ns */
-#define DWC_ETH_QOS_DEFAULT_PTP_CLOCK 96000000
+#define DWC_ETH_QOS_PTP_CLOCK_57_6 57600000
+#define DWC_ETH_QOS_PTP_CLOCK_62_5 62500000
+#define DWC_ETH_QOS_PTP_CLOCK_96 96000000
#define DWC_ETH_QOS_DEFAULT_LPASS_PPS_FREQUENCY 19200000
#define DWC_ETH_QOS_TX_QUEUE_CNT (pdata->tx_queue_cnt)
@@ -1584,6 +1586,7 @@ struct DWC_ETH_QOS_res_data {
unsigned int emac_hw_version_type;
bool early_eth_en;
bool pps_lpass_conn_en;
+ int phy_addr;
};
struct DWC_ETH_QOS_prv_ipa_data {
@@ -1878,6 +1881,8 @@ struct DWC_ETH_QOS_prv_data {
struct class* avb_class_b_class;
struct delayed_work ipv6_addr_assign_wq;
bool print_kpi;
+ unsigned long default_ptp_clock;
+ bool wol_enabled;
};
struct ip_params {
diff --git a/drivers/rmnet/shs/rmnet_shs.h b/drivers/rmnet/shs/rmnet_shs.h
index f6ce09e..b7bf773 100644
--- a/drivers/rmnet/shs/rmnet_shs.h
+++ b/drivers/rmnet/shs/rmnet_shs.h
@@ -299,7 +299,7 @@ extern int (*rmnet_shs_skb_entry)(struct sk_buff *skb,
int rmnet_shs_is_lpwr_cpu(u16 cpu);
void rmnet_shs_cancel_table(void);
void rmnet_shs_rx_wq_init(void);
-void rmnet_shs_rx_wq_exit(void);
+unsigned int rmnet_shs_rx_wq_exit(void);
int rmnet_shs_get_mask_len(u8 mask);
int rmnet_shs_chk_and_flush_node(struct rmnet_shs_skbn_s *node,
@@ -314,7 +314,7 @@ void rmnet_shs_assign(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_shs_flush_table(u8 is_force_flush, u8 ctxt);
void rmnet_shs_cpu_node_remove(struct rmnet_shs_skbn_s *node);
void rmnet_shs_init(struct net_device *dev, struct net_device *vnd);
-void rmnet_shs_exit(void);
+void rmnet_shs_exit(unsigned int cpu_switch);
void rmnet_shs_ps_on_hdlr(void *port);
void rmnet_shs_ps_off_hdlr(void *port);
void rmnet_shs_update_cpu_proc_q_all_cpus(void);
diff --git a/drivers/rmnet/shs/rmnet_shs_config.c b/drivers/rmnet/shs/rmnet_shs_config.c
index e6b4002..6a628dc 100644
--- a/drivers/rmnet/shs/rmnet_shs_config.c
+++ b/drivers/rmnet/shs/rmnet_shs_config.c
@@ -99,14 +99,16 @@ static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,
* phy_dev is going down.
*/
if (!rmnet_vnd_total && rmnet_shs_cfg.rmnet_shs_init_complete) {
+ unsigned int cpu_switch;
+
pr_info("rmnet_shs deinit %s going down ", dev->name);
RCU_INIT_POINTER(rmnet_shs_skb_entry, NULL);
qmi_rmnet_ps_ind_deregister(rmnet_shs_cfg.port,
&rmnet_shs_cfg.rmnet_idl_ind_cb);
rmnet_shs_cancel_table();
- rmnet_shs_rx_wq_exit();
+ cpu_switch = rmnet_shs_rx_wq_exit();
rmnet_shs_wq_exit();
- rmnet_shs_exit();
+ rmnet_shs_exit(cpu_switch);
trace_rmnet_shs_high(RMNET_SHS_MODULE,
RMNET_SHS_MODULE_INIT_WQ,
0xDEF, 0xDEF, 0xDEF,
diff --git a/drivers/rmnet/shs/rmnet_shs_config.h b/drivers/rmnet/shs/rmnet_shs_config.h
index dc385e4..e55f5f8 100644
--- a/drivers/rmnet/shs/rmnet_shs_config.h
+++ b/drivers/rmnet/shs/rmnet_shs_config.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@ enum rmnet_shs_crit_err_e {
RMNET_SHS_WQ_INVALID_PTR_ERR,
RMNET_SHS_WQ_NODE_MALLOC_ERR,
RMNET_SHS_WQ_NL_SOCKET_ERR,
+ RMNET_SHS_CPU_FLOWS_BNDS_ERR,
RMNET_SHS_CRIT_ERR_MAX
};
diff --git a/drivers/rmnet/shs/rmnet_shs_main.c b/drivers/rmnet/shs/rmnet_shs_main.c
index ae66460..2df4330 100755
--- a/drivers/rmnet/shs/rmnet_shs_main.c
+++ b/drivers/rmnet/shs/rmnet_shs_main.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -42,6 +42,8 @@
#define GET_CTIMER(CPU) rmnet_shs_cfg.core_flush[CPU].core_timer
#define SKB_FLUSH 0
+#define INCREMENT 1
+#define DECREMENT 0
/* Local Definitions and Declarations */
DEFINE_SPINLOCK(rmnet_shs_ht_splock);
DEFINE_HASHTABLE(RMNET_SHS_HT, RMNET_SHS_HT_SIZE);
@@ -114,13 +116,21 @@ unsigned int rmnet_shs_cpu_max_coresum[MAX_CPUS];
module_param_array(rmnet_shs_cpu_max_coresum, uint, 0, 0644);
MODULE_PARM_DESC(rmnet_shs_cpu_max_coresum, "Max coresum seen of each core");
+static void rmnet_shs_change_cpu_num_flows(u16 map_cpu, bool inc)
+{
+ if (map_cpu < MAX_CPUS)
+ (inc) ? cpu_num_flows[map_cpu]++: cpu_num_flows[map_cpu]--;
+ else
+ rmnet_shs_crit_err[RMNET_SHS_CPU_FLOWS_BNDS_ERR]++;
+}
+
void rmnet_shs_cpu_node_remove(struct rmnet_shs_skbn_s *node)
{
SHS_TRACE_LOW(RMNET_SHS_CPU_NODE, RMNET_SHS_CPU_NODE_FUNC_REMOVE,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
list_del_init(&node->node_id);
- cpu_num_flows[node->map_cpu]--;
+ rmnet_shs_change_cpu_num_flows(node->map_cpu, DECREMENT);
}
@@ -131,7 +141,7 @@ void rmnet_shs_cpu_node_add(struct rmnet_shs_skbn_s *node,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
list_add(&node->node_id, hd);
- cpu_num_flows[node->map_cpu]++;
+ rmnet_shs_change_cpu_num_flows(node->map_cpu, INCREMENT);
}
void rmnet_shs_cpu_node_move(struct rmnet_shs_skbn_s *node,
@@ -141,8 +151,8 @@ void rmnet_shs_cpu_node_move(struct rmnet_shs_skbn_s *node,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
list_move(&node->node_id, hd);
- cpu_num_flows[node->map_cpu]++;
- cpu_num_flows[oldcpu]--;
+ rmnet_shs_change_cpu_num_flows(node->map_cpu, INCREMENT);
+ rmnet_shs_change_cpu_num_flows((u16) oldcpu, DECREMENT);
}
/* Evaluates the incoming transport protocol of the incoming skb. Determines
@@ -1359,14 +1369,25 @@ void rmnet_shs_rx_wq_init(void)
INIT_WORK(&shs_rx_work.work, rmnet_flush_buffered);
}
-void rmnet_shs_rx_wq_exit(void)
+unsigned int rmnet_shs_rx_wq_exit(void)
{
+ unsigned int cpu_switch = rmnet_shs_inst_rate_switch;
int i;
- for (i = 0; i < MAX_CPUS; i++)
+ /* Disable any further core_flush timer starts untill cleanup
+ * is complete.
+ */
+ rmnet_shs_inst_rate_switch = 0;
+
+ for (i = 0; i < MAX_CPUS; i++) {
+ hrtimer_cancel(&GET_CTIMER(i));
+
cancel_work_sync(&rmnet_shs_cfg.core_flush[i].work);
+ }
cancel_work_sync(&shs_rx_work.work);
+
+ return cpu_switch;
}
void rmnet_shs_ps_on_hdlr(void *port)
@@ -1724,7 +1745,7 @@ void rmnet_shs_assign(struct sk_buff *skb, struct rmnet_port *port)
/* Cancels the flushing timer if it has been armed
* Deregisters DL marker indications
*/
-void rmnet_shs_exit(void)
+void rmnet_shs_exit(unsigned int cpu_switch)
{
rmnet_shs_freq_exit();
rmnet_shs_cfg.dl_mrk_ind_cb.dl_hdr_handler = NULL;
@@ -1738,5 +1759,5 @@ void rmnet_shs_exit(void)
memset(&rmnet_shs_cfg, 0, sizeof(rmnet_shs_cfg));
rmnet_shs_cfg.port = NULL;
rmnet_shs_cfg.rmnet_shs_init_complete = 0;
-
+ rmnet_shs_inst_rate_switch = cpu_switch;
}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c
index 4c69b57..7df6059 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -177,8 +177,7 @@ static struct rmnet_shs_wq_rx_flow_s rmnet_shs_rx_flow_tbl;
static struct list_head rmnet_shs_wq_hstat_tbl =
LIST_HEAD_INIT(rmnet_shs_wq_hstat_tbl);
static int rmnet_shs_flow_dbg_stats_idx_cnt;
-static struct list_head rmnet_shs_wq_ep_tbl =
- LIST_HEAD_INIT(rmnet_shs_wq_ep_tbl);
+struct list_head rmnet_shs_wq_ep_tbl = LIST_HEAD_INIT(rmnet_shs_wq_ep_tbl);
/* Helper functions to add and remove entries to the table
* that maintains a list of all endpoints (vnd's) available on this device.
@@ -538,6 +537,17 @@ void rmnet_shs_wq_update_hstat_rps_msk(struct rmnet_shs_wq_hstat_s *hstat_p)
hstat_p->rps_config_msk = ep->rps_config_msk;
hstat_p->def_core_msk = ep->default_core_msk;
hstat_p->pri_core_msk = ep->pri_core_msk;
+
+ /* Update ep tput stats while we're here */
+ if (hstat_p->skb_tport_proto == IPPROTO_TCP) {
+ rm_err("SHS_UDP: adding TCP bps %lu to ep_total %lu ep name %s",
+ hstat_p->rx_bps, ep->tcp_rx_bps, node_p->dev->name);
+ ep->tcp_rx_bps += hstat_p->rx_bps;
+ } else if (hstat_p->skb_tport_proto == IPPROTO_UDP) {
+ rm_err("SHS_UDP: adding UDP rx_bps %lu to ep_total %lu ep name %s",
+ hstat_p->rx_bps, ep->udp_rx_bps, node_p->dev->name);
+ ep->udp_rx_bps += hstat_p->rx_bps;
+ }
break;
}
}
@@ -1234,6 +1244,7 @@ int rmnet_shs_wq_check_cpu_move_for_ep(u16 current_cpu, u16 dest_cpu,
int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
u32 sugg_type)
{
+ unsigned long flags;
struct rmnet_shs_wq_ep_s *ep;
if (cur_cpu >= MAX_CPUS || dest_cpu >= MAX_CPUS) {
@@ -1245,6 +1256,7 @@ int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
* on it if is online, rps mask, isolation, etc. then make
* suggestion to change the cpu for the flow by passing its hash
*/
+ spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
if (!ep)
continue;
@@ -1266,9 +1278,13 @@ int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
rm_err("SHS_FDESC: >> flow 0x%x was suggested to"
" move from cpu[%d] to cpu[%d] sugg_type [%d]",
hash_to_move, cur_cpu, dest_cpu, sugg_type);
+
+ spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
return 1;
}
}
+
+ spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
return 0;
}
@@ -1277,8 +1293,10 @@ int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
{
struct rmnet_shs_skbn_s *node_p;
struct rmnet_shs_wq_hstat_s *hstat_p;
+ unsigned long ht_flags;
u16 bkt;
+ spin_lock_irqsave(&rmnet_shs_ht_splock, ht_flags);
hash_for_each(RMNET_SHS_HT, bkt, node_p, list) {
if (!node_p)
continue;
@@ -1300,8 +1318,10 @@ int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
0xDEF, 0xDEF, hstat_p, NULL);
node_p->hstats->segment_enable = seg_enable;
+ spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
return 1;
}
+ spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u not set - hash not found",
hash_to_set, seg_enable);
@@ -1446,6 +1466,7 @@ void rmnet_shs_wq_eval_cpus_caps_and_flows(struct list_head *cpu_caps,
rmnet_shs_wq_mem_update_cached_cpu_caps(cpu_caps);
rmnet_shs_wq_mem_update_cached_sorted_gold_flows(gold_flows);
rmnet_shs_wq_mem_update_cached_sorted_ss_flows(ss_flows);
+ rmnet_shs_wq_mem_update_cached_netdevs();
rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_RESP_INT);
@@ -1608,12 +1629,14 @@ int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev)
int cpu_assigned = -1;
u8 is_match_found = 0;
struct rmnet_shs_wq_ep_s *ep = NULL;
+ unsigned long flags;
if (!dev) {
rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
return cpu_assigned;
}
+ spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
if (!ep)
continue;
@@ -1629,6 +1652,7 @@ int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev)
if (!is_match_found) {
rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
+ spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
return cpu_assigned;
}
@@ -1646,6 +1670,7 @@ int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev)
/* Increment CPU assignment idx to be ready for next flow assignment*/
if ((cpu_assigned >= 0) || ((ep->new_lo_idx + 1) >= ep->new_lo_max))
ep->new_lo_idx = ((ep->new_lo_idx + 1) % ep->new_lo_max);
+ spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
return cpu_assigned;
}
@@ -1657,12 +1682,14 @@ int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev)
u8 hi_idx;
u8 hi_max;
u8 is_match_found = 0;
+ unsigned long flags;
if (!dev) {
rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
return cpu_assigned;
}
+ spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
if (!ep)
continue;
@@ -1678,6 +1705,7 @@ int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev)
if (!is_match_found) {
rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
+ spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
return cpu_assigned;
}
@@ -1694,6 +1722,7 @@ int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev)
/* Increment CPU assignment idx to be ready for next flow assignment*/
if (cpu_assigned >= 0)
ep->new_hi_idx = ((hi_idx + 1) % hi_max);
+ spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
return cpu_assigned;
}
@@ -1868,6 +1897,11 @@ void rmnet_shs_wq_refresh_ep_masks(void)
if (!ep->is_ep_active)
continue;
rmnet_shs_wq_update_ep_rps_msk(ep);
+
+ /* These tput totals get re-added as we go through each flow */
+ ep->udp_rx_bps = 0;
+ ep->tcp_rx_bps = 0;
+
}
}
@@ -1941,9 +1975,6 @@ void rmnet_shs_wq_update_stats(void)
}
rmnet_shs_wq_refresh_new_flow_list();
- /*Invoke after both the locks are released*/
- rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
- rmnet_shs_wq_debug_print_flows();
}
void rmnet_shs_wq_process_wq(struct work_struct *work)
@@ -1958,6 +1989,10 @@ void rmnet_shs_wq_process_wq(struct work_struct *work)
rmnet_shs_wq_update_stats();
spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
+ /*Invoke after both the locks are released*/
+ rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
+ rmnet_shs_wq_debug_print_flows();
+
queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq,
rmnet_shs_wq_frequency);
@@ -1993,6 +2028,7 @@ void rmnet_shs_wq_exit(void)
return;
rmnet_shs_wq_mem_deinit();
+ rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_WQ_EXIT);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_EXIT, RMNET_SHS_WQ_EXIT_START,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.h b/drivers/rmnet/shs/rmnet_shs_wq.h
index 0d86200..446fa17 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -36,9 +36,13 @@
extern unsigned long long rmnet_shs_cpu_rx_max_pps_thresh[MAX_CPUS]__read_mostly;
extern unsigned long long rmnet_shs_cpu_rx_min_pps_thresh[MAX_CPUS]__read_mostly;
+extern struct list_head rmnet_shs_wq_ep_tbl;
+
/* stores wq and end point details */
struct rmnet_shs_wq_ep_s {
+ u64 tcp_rx_bps;
+ u64 udp_rx_bps;
struct list_head ep_list_id;
struct net_device *ep;
int new_lo_core[MAX_CPUS];
@@ -161,6 +165,7 @@ struct rmnet_shs_wq_cpu_cap_s {
struct list_head cpu_cap_list;
u64 pps_capacity;
u64 avg_pps_capacity;
+ u64 bps;
u16 cpu_num;
};
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.c b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
index b28f0c2..2dff48a 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_genl.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -349,6 +349,8 @@ int rmnet_shs_wq_genl_deinit(void)
{
int ret;
+ rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_WQ_EXIT);
+
ret = genl_unregister_family(&rmnet_shs_genl_family);
if(ret != 0){
rm_err("SHS_GNL: unregister family failed: %i\n",ret);
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.h b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
index 333de48..9901d38 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_genl.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,7 @@
#define RMNET_SHS_GENL_VERSION 1
#define RMNET_SHS_GENL_FAMILY_NAME "RMNET_SHS"
#define RMNET_SHS_SYNC_RESP_INT 828 /* Any number, sent after mem update */
+#define RMNET_SHS_SYNC_WQ_EXIT 42
extern int rmnet_shs_userspace_connected;
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.c b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
index 1675517..062edb7 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_mem.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,7 @@
#include "rmnet_shs_wq_mem.h"
#include <linux/proc_fs.h>
+#include <linux/refcount.h>
MODULE_LICENSE("GPL v2");
@@ -24,6 +25,7 @@ struct proc_dir_entry *shs_proc_dir;
struct rmnet_shs_wq_cpu_cap_usr_s rmnet_shs_wq_cap_list_usr[MAX_CPUS];
struct rmnet_shs_wq_gflows_usr_s rmnet_shs_wq_gflows_usr[RMNET_SHS_MAX_USRFLOWS];
struct rmnet_shs_wq_ssflows_usr_s rmnet_shs_wq_ssflows_usr[RMNET_SHS_MAX_USRFLOWS];
+struct rmnet_shs_wq_netdev_usr_s rmnet_shs_wq_netdev_usr[RMNET_SHS_MAX_NETDEVS];
struct list_head gflows = LIST_HEAD_INIT(gflows); /* gold flows */
struct list_head ssflows = LIST_HEAD_INIT(ssflows); /* slow start flows */
@@ -32,6 +34,7 @@ struct list_head cpu_caps = LIST_HEAD_INIT(cpu_caps); /* capacities */
struct rmnet_shs_mmap_info *cap_shared;
struct rmnet_shs_mmap_info *gflow_shared;
struct rmnet_shs_mmap_info *ssflow_shared;
+struct rmnet_shs_mmap_info *netdev_shared;
/* Static Functions and Definitions */
static void rmnet_shs_vm_open(struct vm_area_struct *vma)
@@ -44,32 +47,163 @@ static void rmnet_shs_vm_close(struct vm_area_struct *vma)
return;
}
-static int rmnet_shs_vm_fault(struct vm_fault *vmf)
+static int rmnet_shs_vm_fault_caps(struct vm_fault *vmf)
{
struct page *page = NULL;
struct rmnet_shs_mmap_info *info;
rmnet_shs_wq_ep_lock_bh();
- info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
- if (info->data) {
- page = virt_to_page(info->data);
- get_page(page);
- vmf->page = page;
+ if (cap_shared) {
+ info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
+ if (info->data) {
+ page = virt_to_page(info->data);
+ get_page(page);
+ vmf->page = page;
+ } else {
+ rmnet_shs_wq_ep_unlock_bh();
+ return VM_FAULT_SIGSEGV;
+ }
+ } else {
+ rmnet_shs_wq_ep_unlock_bh();
+ return VM_FAULT_SIGSEGV;
+ }
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return 0;
+}
+
+
+static int rmnet_shs_vm_fault_g_flows(struct vm_fault *vmf)
+{
+ struct page *page = NULL;
+ struct rmnet_shs_mmap_info *info;
+
+ rmnet_shs_wq_ep_lock_bh();
+ if (gflow_shared) {
+ info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
+ if (info->data) {
+ page = virt_to_page(info->data);
+ get_page(page);
+ vmf->page = page;
+ } else {
+ rmnet_shs_wq_ep_unlock_bh();
+ return VM_FAULT_SIGSEGV;
+ }
+ } else {
+ rmnet_shs_wq_ep_unlock_bh();
+ return VM_FAULT_SIGSEGV;
+
+ }
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return 0;
+}
+
+static int rmnet_shs_vm_fault_ss_flows(struct vm_fault *vmf)
+{
+ struct page *page = NULL;
+ struct rmnet_shs_mmap_info *info;
+
+ rmnet_shs_wq_ep_lock_bh();
+ if (ssflow_shared) {
+ info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
+ if (info->data) {
+ page = virt_to_page(info->data);
+ get_page(page);
+ vmf->page = page;
+ } else {
+ rmnet_shs_wq_ep_unlock_bh();
+ return VM_FAULT_SIGSEGV;
+ }
+ } else {
+ rmnet_shs_wq_ep_unlock_bh();
+ return VM_FAULT_SIGSEGV;
}
rmnet_shs_wq_ep_unlock_bh();
return 0;
}
-static const struct vm_operations_struct rmnet_shs_vm_ops = {
+static int rmnet_shs_vm_fault_netdev(struct vm_fault *vmf)
+{
+ struct page *page = NULL;
+ struct rmnet_shs_mmap_info *info;
+
+ rmnet_shs_wq_ep_lock_bh();
+ if (netdev_shared) {
+ info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
+ if (info->data) {
+ page = virt_to_page(info->data);
+ get_page(page);
+ vmf->page = page;
+ } else {
+ rmnet_shs_wq_ep_unlock_bh();
+ return VM_FAULT_SIGSEGV;
+ }
+ } else {
+ rmnet_shs_wq_ep_unlock_bh();
+ return VM_FAULT_SIGSEGV;
+ }
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return 0;
+}
+
+
+static const struct vm_operations_struct rmnet_shs_vm_ops_caps = {
+ .close = rmnet_shs_vm_close,
+ .open = rmnet_shs_vm_open,
+ .fault = rmnet_shs_vm_fault_caps,
+};
+
+static const struct vm_operations_struct rmnet_shs_vm_ops_g_flows = {
.close = rmnet_shs_vm_close,
.open = rmnet_shs_vm_open,
- .fault = rmnet_shs_vm_fault,
+ .fault = rmnet_shs_vm_fault_g_flows,
};
-static int rmnet_shs_mmap(struct file *filp, struct vm_area_struct *vma)
+static const struct vm_operations_struct rmnet_shs_vm_ops_ss_flows = {
+ .close = rmnet_shs_vm_close,
+ .open = rmnet_shs_vm_open,
+ .fault = rmnet_shs_vm_fault_ss_flows,
+};
+
+static const struct vm_operations_struct rmnet_shs_vm_ops_netdev = {
+ .close = rmnet_shs_vm_close,
+ .open = rmnet_shs_vm_open,
+ .fault = rmnet_shs_vm_fault_netdev,
+};
+
+static int rmnet_shs_mmap_caps(struct file *filp, struct vm_area_struct *vma)
{
- vma->vm_ops = &rmnet_shs_vm_ops;
+ vma->vm_ops = &rmnet_shs_vm_ops_caps;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = filp->private_data;
+
+ return 0;
+}
+
+static int rmnet_shs_mmap_g_flows(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &rmnet_shs_vm_ops_g_flows;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = filp->private_data;
+
+ return 0;
+}
+
+static int rmnet_shs_mmap_ss_flows(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &rmnet_shs_vm_ops_ss_flows;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = filp->private_data;
+
+ return 0;
+}
+
+static int rmnet_shs_mmap_netdev(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &rmnet_shs_vm_ops_netdev;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = filp->private_data;
@@ -95,9 +229,12 @@ static int rmnet_shs_open_caps(struct inode *inode, struct file *filp)
}
cap_shared = info;
+ refcount_set(&cap_shared->refcnt, 1);
rm_err("SHS_MEM: virt_to_phys = 0x%llx cap_shared = 0x%llx\n",
(unsigned long long)virt_to_phys((void *)info),
(unsigned long long)virt_to_phys((void *)cap_shared));
+ } else {
+ refcount_inc(&cap_shared->refcnt);
}
filp->private_data = cap_shared;
@@ -132,10 +269,14 @@ static int rmnet_shs_open_g_flows(struct inode *inode, struct file *filp)
}
gflow_shared = info;
+ refcount_set(&gflow_shared->refcnt, 1);
rm_err("SHS_MEM: virt_to_phys = 0x%llx gflow_shared = 0x%llx\n",
(unsigned long long)virt_to_phys((void *)info),
(unsigned long long)virt_to_phys((void *)gflow_shared));
+ } else {
+ refcount_inc(&gflow_shared->refcnt);
}
+
filp->private_data = gflow_shared;
rmnet_shs_wq_ep_unlock_bh();
@@ -166,10 +307,14 @@ static int rmnet_shs_open_ss_flows(struct inode *inode, struct file *filp)
}
ssflow_shared = info;
+ refcount_set(&ssflow_shared->refcnt, 1);
rm_err("SHS_MEM: virt_to_phys = 0x%llx ssflow_shared = 0x%llx\n",
(unsigned long long)virt_to_phys((void *)info),
(unsigned long long)virt_to_phys((void *)ssflow_shared));
+ } else {
+ refcount_inc(&ssflow_shared->refcnt);
}
+
filp->private_data = ssflow_shared;
rmnet_shs_wq_ep_unlock_bh();
@@ -181,40 +326,59 @@ fail:
return -ENOMEM;
}
-static ssize_t rmnet_shs_read(struct file *filp, char __user *buf, size_t len, loff_t *off)
+static int rmnet_shs_open_netdev(struct inode *inode, struct file *filp)
{
struct rmnet_shs_mmap_info *info;
- int ret = 0;
- rm_err("%s", "SHS_MEM: rmnet_shs_read - entry\n");
+ rm_err("%s", "SHS_MEM: rmnet_shs_open netdev - entry\n");
rmnet_shs_wq_ep_lock_bh();
- info = filp->private_data;
- ret = min_t(size_t, len, RMNET_SHS_BUFFER_SIZE);
- if (copy_to_user(buf, info->data, ret))
- ret = -EFAULT;
- rmnet_shs_wq_ep_unlock_bh();
+ if (!netdev_shared) {
+ info = kzalloc(sizeof(struct rmnet_shs_mmap_info), GFP_ATOMIC);
+ if (!info)
+ goto fail;
- return ret;
-}
+ info->data = (char *)get_zeroed_page(GFP_ATOMIC);
+ if (!info->data) {
+ kfree(info);
+ goto fail;
+ }
-static ssize_t rmnet_shs_write(struct file *filp, const char __user *buf, size_t len, loff_t *off)
-{
- struct rmnet_shs_mmap_info *info;
- int ret;
+ netdev_shared = info;
+ refcount_set(&netdev_shared->refcnt, 1);
+ rm_err("SHS_MEM: virt_to_phys = 0x%llx netdev_shared = 0x%llx\n",
+ (unsigned long long)virt_to_phys((void *)info),
+ (unsigned long long)virt_to_phys((void *)netdev_shared));
+ } else {
+ refcount_inc(&netdev_shared->refcnt);
+ }
- rm_err("%s", "SHS_MEM: rmnet_shs_write - entry\n");
+ filp->private_data = netdev_shared;
+ rmnet_shs_wq_ep_unlock_bh();
- rmnet_shs_wq_ep_lock_bh();
- info = filp->private_data;
- ret = min_t(size_t, len, RMNET_SHS_BUFFER_SIZE);
- if (copy_from_user(info->data, buf, ret))
- ret = -EFAULT;
- else
- ret = len;
+ return 0;
+
+fail:
rmnet_shs_wq_ep_unlock_bh();
+ return -ENOMEM;
+}
- return ret;
+static ssize_t rmnet_shs_read(struct file *filp, char __user *buf, size_t len, loff_t *off)
+{
+ /*
+ * Decline to expose file value and simply return benign value
+ */
+ return RMNET_SHS_READ_VAL;
+}
+
+static ssize_t rmnet_shs_write(struct file *filp, const char __user *buf, size_t len, loff_t *off)
+{
+ /*
+ * Returning zero here would result in echo commands hanging
+ * Instead return len and simply decline to allow echo'd values to
+ * take effect
+ */
+ return len;
}
static int rmnet_shs_release_caps(struct inode *inode, struct file *filp)
@@ -226,10 +390,14 @@ static int rmnet_shs_release_caps(struct inode *inode, struct file *filp)
rmnet_shs_wq_ep_lock_bh();
if (cap_shared) {
info = filp->private_data;
- cap_shared = NULL;
- free_page((unsigned long)info->data);
- kfree(info);
- filp->private_data = NULL;
+ if (refcount_read(&info->refcnt) <= 1) {
+ free_page((unsigned long)info->data);
+ kfree(info);
+ cap_shared = NULL;
+ filp->private_data = NULL;
+ } else {
+ refcount_dec(&info->refcnt);
+ }
}
rmnet_shs_wq_ep_unlock_bh();
@@ -245,10 +413,14 @@ static int rmnet_shs_release_g_flows(struct inode *inode, struct file *filp)
rmnet_shs_wq_ep_lock_bh();
if (gflow_shared) {
info = filp->private_data;
- gflow_shared = NULL;
- free_page((unsigned long)info->data);
- kfree(info);
- filp->private_data = NULL;
+ if (refcount_read(&info->refcnt) <= 1) {
+ free_page((unsigned long)info->data);
+ kfree(info);
+ gflow_shared = NULL;
+ filp->private_data = NULL;
+ } else {
+ refcount_dec(&info->refcnt);
+ }
}
rmnet_shs_wq_ep_unlock_bh();
@@ -264,10 +436,37 @@ static int rmnet_shs_release_ss_flows(struct inode *inode, struct file *filp)
rmnet_shs_wq_ep_lock_bh();
if (ssflow_shared) {
info = filp->private_data;
- ssflow_shared = NULL;
- free_page((unsigned long)info->data);
- kfree(info);
- filp->private_data = NULL;
+ if (refcount_read(&info->refcnt) <= 1) {
+ free_page((unsigned long)info->data);
+ kfree(info);
+ ssflow_shared = NULL;
+ filp->private_data = NULL;
+ } else {
+ refcount_dec(&info->refcnt);
+ }
+ }
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return 0;
+}
+
+static int rmnet_shs_release_netdev(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_release netdev - entry\n");
+
+ rmnet_shs_wq_ep_lock_bh();
+ if (netdev_shared) {
+ info = filp->private_data;
+ if (refcount_read(&info->refcnt) <= 1) {
+ free_page((unsigned long)info->data);
+ kfree(info);
+ netdev_shared = NULL;
+ filp->private_data = NULL;
+ } else {
+ refcount_dec(&info->refcnt);
+ }
}
rmnet_shs_wq_ep_unlock_bh();
@@ -276,7 +475,7 @@ static int rmnet_shs_release_ss_flows(struct inode *inode, struct file *filp)
static const struct file_operations rmnet_shs_caps_fops = {
.owner = THIS_MODULE,
- .mmap = rmnet_shs_mmap,
+ .mmap = rmnet_shs_mmap_caps,
.open = rmnet_shs_open_caps,
.release = rmnet_shs_release_caps,
.read = rmnet_shs_read,
@@ -285,7 +484,7 @@ static const struct file_operations rmnet_shs_caps_fops = {
static const struct file_operations rmnet_shs_g_flows_fops = {
.owner = THIS_MODULE,
- .mmap = rmnet_shs_mmap,
+ .mmap = rmnet_shs_mmap_g_flows,
.open = rmnet_shs_open_g_flows,
.release = rmnet_shs_release_g_flows,
.read = rmnet_shs_read,
@@ -294,13 +493,21 @@ static const struct file_operations rmnet_shs_g_flows_fops = {
static const struct file_operations rmnet_shs_ss_flows_fops = {
.owner = THIS_MODULE,
- .mmap = rmnet_shs_mmap,
+ .mmap = rmnet_shs_mmap_ss_flows,
.open = rmnet_shs_open_ss_flows,
.release = rmnet_shs_release_ss_flows,
.read = rmnet_shs_read,
.write = rmnet_shs_write,
};
+static const struct file_operations rmnet_shs_netdev_fops = {
+ .owner = THIS_MODULE,
+ .mmap = rmnet_shs_mmap_netdev,
+ .open = rmnet_shs_open_netdev,
+ .release = rmnet_shs_release_netdev,
+ .read = rmnet_shs_read,
+ .write = rmnet_shs_write,
+};
/* Global Functions */
/* Add a flow to the slow start flow list */
@@ -432,6 +639,7 @@ void rmnet_shs_wq_cpu_caps_list_add(
if (flows <= 0) {
cap_node->pps_capacity = pps_uthresh;
cap_node->avg_pps_capacity = pps_uthresh;
+ cap_node->bps = 0;
list_add(&cap_node->cpu_cap_list, cpu_caps);
return;
}
@@ -452,6 +660,8 @@ void rmnet_shs_wq_cpu_caps_list_add(
cap_node->avg_pps_capacity = 0;
}
+ cap_node->bps = cpu_node->rx_bps;
+
list_add(&cap_node->cpu_cap_list, cpu_caps);
}
@@ -503,12 +713,13 @@ void rmnet_shs_wq_mem_update_cached_cpu_caps(struct list_head *cpu_caps)
break;
rm_err("SHS_SCAPS: > cpu[%d] with pps capacity = %llu | "
- "avg pps cap = %llu",
+ "avg pps cap = %llu bps = %llu",
cap_node->cpu_num, cap_node->pps_capacity,
- cap_node->avg_pps_capacity);
+ cap_node->avg_pps_capacity, cap_node->bps);
rmnet_shs_wq_cap_list_usr[idx].avg_pps_capacity = cap_node->avg_pps_capacity;
rmnet_shs_wq_cap_list_usr[idx].pps_capacity = cap_node->pps_capacity;
+ rmnet_shs_wq_cap_list_usr[idx].bps = cap_node->bps;
rmnet_shs_wq_cap_list_usr[idx].cpu_num = cap_node->cpu_num;
idx += 1;
}
@@ -650,13 +861,97 @@ void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows)
rm_err("SHS_SLOW: num ss flows = %u\n", idx);
/* Copy num ss flows into first 2 bytes,
- then copy in the cached gold flow array */
+ then copy in the cached ss flow array */
memcpy(((char *)ssflow_shared->data), &idx, sizeof(idx));
memcpy(((char *)ssflow_shared->data + sizeof(uint16_t)),
(void *) &rmnet_shs_wq_ssflows_usr[0],
sizeof(rmnet_shs_wq_ssflows_usr));
}
+
+/* Extract info required from the rmnet_port array then memcpy to shared mem.
+ * > Add number of active netdevices/endpoints at the start.
+ * > After memcpy is complete, send userspace a message indicating that memcpy
+ * has just completed.
+ * > The netdev is formated like this:
+ * | num_netdevs | data_format | {rmnet_data0,ip_miss,rx_pkts} | ... |
+ * | 16 bits | 32 bits | |
+ */
+void rmnet_shs_wq_mem_update_cached_netdevs(void)
+{
+ struct rmnet_priv *priv;
+ struct rmnet_shs_wq_ep_s *ep = NULL;
+ u16 idx = 0;
+ u16 count = 0;
+
+ rm_err("SHS_NETDEV: function enter %u\n", idx);
+ list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
+ count += 1;
+ rm_err("SHS_NETDEV: function enter ep %u\n", count);
+ if (!ep)
+ continue;
+
+ if (!ep->is_ep_active) {
+ rm_err("SHS_NETDEV: ep %u is NOT active\n", count);
+ continue;
+ }
+
+ rm_err("SHS_NETDEV: ep %u is active and not null\n", count);
+ if (idx >= RMNET_SHS_MAX_NETDEVS) {
+ break;
+ }
+
+ priv = netdev_priv(ep->ep);
+ if (!priv) {
+ rm_err("SHS_NETDEV: priv for ep %u is null\n", count);
+ continue;
+ }
+
+ rm_err("SHS_NETDEV: ep %u has name = %s \n", count,
+ ep->ep->name);
+ rm_err("SHS_NETDEV: ep %u has mux_id = %u \n", count,
+ priv->mux_id);
+ rm_err("SHS_NETDEV: ep %u has ip_miss = %lu \n", count,
+ priv->stats.coal.close.ip_miss);
+ rm_err("SHS_NETDEV: ep %u has coal_rx_pkts = %lu \n", count,
+ priv->stats.coal.coal_pkts);
+ rm_err("SHS_NETDEV: ep %u has udp_rx_bps = %lu \n", count,
+ ep->udp_rx_bps);
+ rm_err("SHS_NETDEV: ep %u has tcp_rx_bps = %lu \n", count,
+ ep->tcp_rx_bps);
+
+ /* Set netdev name and ip mismatch count */
+ rmnet_shs_wq_netdev_usr[idx].coal_ip_miss = priv->stats.coal.close.ip_miss;
+ rmnet_shs_wq_netdev_usr[idx].hw_evict = priv->stats.coal.close.hw_evict;
+ rmnet_shs_wq_netdev_usr[idx].coal_tcp = priv->stats.coal.coal_tcp;
+ rmnet_shs_wq_netdev_usr[idx].coal_tcp_bytes = priv->stats.coal.coal_tcp_bytes;
+ rmnet_shs_wq_netdev_usr[idx].coal_udp = priv->stats.coal.coal_udp;
+ rmnet_shs_wq_netdev_usr[idx].coal_udp_bytes = priv->stats.coal.coal_udp_bytes;
+ rmnet_shs_wq_netdev_usr[idx].mux_id = priv->mux_id;
+ strlcpy(rmnet_shs_wq_netdev_usr[idx].name,
+ ep->ep->name,
+ sizeof(rmnet_shs_wq_netdev_usr[idx].name));
+
+ /* Set rx pkt from netdev stats */
+ rmnet_shs_wq_netdev_usr[idx].coal_rx_pkts = priv->stats.coal.coal_pkts;
+ rmnet_shs_wq_netdev_usr[idx].tcp_rx_bps = ep->tcp_rx_bps;
+ rmnet_shs_wq_netdev_usr[idx].udp_rx_bps = ep->udp_rx_bps;
+ idx += 1;
+ }
+
+ rm_err("SHS_MEM: netdev_shared = 0x%llx addr = 0x%pK\n",
+ (unsigned long long)virt_to_phys((void *)netdev_shared), netdev_shared);
+ if (!netdev_shared) {
+ rm_err("%s", "SHS_WRITE: netdev_shared is NULL");
+ return;
+ }
+
+ memcpy(((char *)netdev_shared->data), &idx, sizeof(idx));
+ memcpy(((char *)netdev_shared->data + sizeof(uint16_t)),
+ (void *) &rmnet_shs_wq_netdev_usr[0],
+ sizeof(rmnet_shs_wq_netdev_usr));
+}
+
/* Creates the proc folder and files for shs shared memory */
void rmnet_shs_wq_mem_init(void)
{
@@ -665,11 +960,13 @@ void rmnet_shs_wq_mem_init(void)
proc_create(RMNET_SHS_PROC_CAPS, 0644, shs_proc_dir, &rmnet_shs_caps_fops);
proc_create(RMNET_SHS_PROC_G_FLOWS, 0644, shs_proc_dir, &rmnet_shs_g_flows_fops);
proc_create(RMNET_SHS_PROC_SS_FLOWS, 0644, shs_proc_dir, &rmnet_shs_ss_flows_fops);
+ proc_create(RMNET_SHS_PROC_NETDEV, 0644, shs_proc_dir, &rmnet_shs_netdev_fops);
rmnet_shs_wq_ep_lock_bh();
cap_shared = NULL;
gflow_shared = NULL;
ssflow_shared = NULL;
+ netdev_shared = NULL;
rmnet_shs_wq_ep_unlock_bh();
}
@@ -679,11 +976,13 @@ void rmnet_shs_wq_mem_deinit(void)
remove_proc_entry(RMNET_SHS_PROC_CAPS, shs_proc_dir);
remove_proc_entry(RMNET_SHS_PROC_G_FLOWS, shs_proc_dir);
remove_proc_entry(RMNET_SHS_PROC_SS_FLOWS, shs_proc_dir);
+ remove_proc_entry(RMNET_SHS_PROC_NETDEV, shs_proc_dir);
remove_proc_entry(RMNET_SHS_PROC_DIR, NULL);
rmnet_shs_wq_ep_lock_bh();
cap_shared = NULL;
gflow_shared = NULL;
ssflow_shared = NULL;
+ netdev_shared = NULL;
rmnet_shs_wq_ep_unlock_bh();
}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.h b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
index 2e5e889..e955606 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_mem.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,16 +23,23 @@
#define RMNET_SHS_PROC_CAPS "rmnet_shs_caps"
#define RMNET_SHS_PROC_G_FLOWS "rmnet_shs_flows"
#define RMNET_SHS_PROC_SS_FLOWS "rmnet_shs_ss_flows"
+#define RMNET_SHS_PROC_NETDEV "rmnet_shs_netdev"
#define RMNET_SHS_MAX_USRFLOWS (128)
+#define RMNET_SHS_MAX_NETDEVS (40)
+#define RMNET_SHS_IFNAMSIZ (16)
+#define RMNET_SHS_READ_VAL (0)
+/* NOTE: Make sure these structs fit in one page */
+/* 26 bytes * 8 max cpus = 208 bytes < 4096 */
struct __attribute__((__packed__)) rmnet_shs_wq_cpu_cap_usr_s {
u64 pps_capacity;
u64 avg_pps_capacity;
- u64 bps_capacity;
+ u64 bps;
u16 cpu_num;
};
+/* 30 bytes * 128 max = 3840 bytes < 4096 */
struct __attribute__((__packed__)) rmnet_shs_wq_gflows_usr_s {
u64 rx_pps;
u64 avg_pps;
@@ -41,6 +48,7 @@ struct __attribute__((__packed__)) rmnet_shs_wq_gflows_usr_s {
u16 cpu_num;
};
+/* 30 bytes * 128 max = 3840 bytes < 4096 */
struct __attribute__((__packed__)) rmnet_shs_wq_ssflows_usr_s {
u64 rx_pps;
u64 avg_pps;
@@ -49,6 +57,21 @@ struct __attribute__((__packed__)) rmnet_shs_wq_ssflows_usr_s {
u16 cpu_num;
};
+/* 16 + 8*9 + 8 = 89 bytes, 89*40 netdev = 3560 bytes < 4096 */
+struct __attribute__((__packed__)) rmnet_shs_wq_netdev_usr_s {
+ char name[RMNET_SHS_IFNAMSIZ];
+ u64 coal_ip_miss;
+ u64 hw_evict;
+ u64 coal_rx_pkts;
+ u64 coal_tcp;
+ u64 coal_tcp_bytes;
+ u64 coal_udp;
+ u64 coal_udp_bytes;
+ u64 udp_rx_bps;
+ u64 tcp_rx_bps;
+ u8 mux_id;
+};
+
extern struct list_head gflows;
extern struct list_head ssflows;
extern struct list_head cpu_caps;
@@ -58,6 +81,7 @@ enum {RMNET_SHS_BUFFER_SIZE = 4096};
struct rmnet_shs_mmap_info {
char *data;
+ refcount_t refcnt;
};
/* Function Definitions */
@@ -81,6 +105,7 @@ void rmnet_shs_wq_mem_update_cached_cpu_caps(struct list_head *cpu_caps);
void rmnet_shs_wq_mem_update_cached_sorted_gold_flows(struct list_head *gold_flows);
void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows);
+void rmnet_shs_wq_mem_update_cached_netdevs(void);
void rmnet_shs_wq_mem_init(void);