summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2019-12-18 07:19:21 -0800
committerLinux Build Service Account <lnxbuild@localhost>2019-12-18 07:19:21 -0800
commit8844d1d9538a8952841f7feb3aa13a66d42efd0f (patch)
tree89d82958d32863060dadaf626251657bbf05d975
parent5d1f94bfe36ac0e1cb889067d0638c40f26aec1f (diff)
parent4eeda91ff12a1b561a65fa7d0d1a28d0b92d624b (diff)
downloaddata-kernel-8844d1d9538a8952841f7feb3aa13a66d42efd0f.tar.gz
Merge 4eeda91ff12a1b561a65fa7d0d1a28d0b92d624b on remote branch
Change-Id: I6feb6a89d841735476ea9e7cac86874bc0b55978
-rw-r--r--drivers/emac-dwc-eqos/Android.mk9
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_desc.c4
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c73
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c336
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_ipa.c6
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c24
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c202
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_poll_support.c5
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c7
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_rgmii_io_macro.c23
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_yapphdr.h2
-rw-r--r--drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h12
-rw-r--r--drivers/emac-dwc-eqos/Makefile.builtin2
-rw-r--r--drivers/emac-dwc-eqos/emac_perf_settings.sh22
-rw-r--r--drivers/rmnet/perf/rmnet_perf_config.c7
-rw-r--r--drivers/rmnet/perf/rmnet_perf_opt.c10
-rw-r--r--drivers/rmnet/shs/Android.mk2
-rw-r--r--drivers/rmnet/shs/Kbuild2
-rw-r--r--drivers/rmnet/shs/rmnet_shs.h19
-rw-r--r--drivers/rmnet/shs/rmnet_shs_config.c11
-rw-r--r--drivers/rmnet/shs/rmnet_shs_config.h6
-rw-r--r--drivers/rmnet/shs/rmnet_shs_freq.c4
-rwxr-xr-xdrivers/rmnet/shs/rmnet_shs_main.c170
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.c638
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.h71
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_genl.c358
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_genl.h76
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_mem.c689
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_mem.h89
29 files changed, 2539 insertions, 340 deletions
diff --git a/drivers/emac-dwc-eqos/Android.mk b/drivers/emac-dwc-eqos/Android.mk
index a50d64f..cff761a 100644
--- a/drivers/emac-dwc-eqos/Android.mk
+++ b/drivers/emac-dwc-eqos/Android.mk
@@ -22,6 +22,15 @@ KBUILD_OPTIONS += DCONFIG_DEBUGFS_OBJ=1
LOCAL_MODULE := emac_dwc_eqos.ko
LOCAL_MODULE_TAGS := optional
include $(DLKM_DIR)/AndroidKernelModule.mk
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := emac_perf_settings.sh
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_DATA)/emac
+LOCAL_SRC_FILES := emac_perf_settings.sh
+include $(BUILD_PREBUILT)
+
endif
endif
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_desc.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_desc.c
index ac56b19..ab9b06f 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_desc.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_desc.c
@@ -1440,7 +1440,7 @@ static int DWC_ETH_QOS_map_page_buffs(struct DWC_ETH_QOS_prv_data *pdata,
DBGPR("-->DWC_ETH_QOS_map_page_buffs\n");
if (size > DWC_ETH_QOS_MAX_DATA_PER_TX_BUF) {
- if (!prev_buffer->dma2) {
+ if (prev_buffer && !prev_buffer->dma2) {
DBGPR("prev_buffer->dma2 is empty\n");
/* fill the first buffer pointer in pre_buffer->dma2 */
prev_buffer->dma2 =
@@ -1505,7 +1505,7 @@ static int DWC_ETH_QOS_map_page_buffs(struct DWC_ETH_QOS_prv_data *pdata,
buffer->buf2_mapped_as_page = Y_TRUE;
}
} else {
- if (!prev_buffer->dma2) {
+ if (prev_buffer && !prev_buffer->dma2) {
DBGPR("prev_buffer->dma2 is empty\n");
/* fill the first buffer pointer in pre_buffer->dma2 */
prev_buffer->dma2 = dma_map_page(GET_MEM_PDEV_DEV,
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c
index 4d5d51f..7ce0a31 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c
@@ -1136,8 +1136,9 @@ static INT drop_tx_status_enabled(void)
static INT config_sub_second_increment(ULONG ptp_clock)
{
- ULONG val;
ULONG VARMAC_TCR;
+ double ss_inc = 0;
+ double sns_inc = 0;
MAC_TCR_RGRD(VARMAC_TCR);
@@ -1145,30 +1146,69 @@ static INT config_sub_second_increment(ULONG ptp_clock)
/* formula is : ((1/ptp_clock) * 1000000000) */
/* where, ptp_clock = 50MHz if FINE correction */
/* and ptp_clock = DWC_ETH_QOS_SYSCLOCK if COARSE correction */
-#ifdef CONFIG_PPS_OUTPUT
if (GET_VALUE(VARMAC_TCR, MAC_TCR_TSCFUPDT_LPOS, MAC_TCR_TSCFUPDT_HPOS) == 1) {
EMACDBG("Using PTP clock %ld MHz\n", ptp_clock);
- val = ((1 * 1000000000ull) / ptp_clock);
+ ss_inc = (double)1000000000.0 / (double)ptp_clock;
}
else {
EMACDBG("Using SYSCLOCK for coarse correction\n");
- val = ((1 * 1000000000ull) / DWC_ETH_QOS_SYSCLOCK );
+ ss_inc = (double)1000000000.0 / (double)DWC_ETH_QOS_SYSCLOCK;
}
-#else
- if (GET_VALUE(VARMAC_TCR, MAC_TCR_TSCFUPDT_LPOS, MAC_TCR_TSCFUPDT_HPOS) == 1) {
- val = ((1 * 1000000000ull) / 50000000);
- }
- else {
- val = ((1 * 1000000000ull) / ptp_clock);
- }
-#endif
- /* 0.465ns accurecy */
+
+ /* 0.465ns accuracy */
if (GET_VALUE(
VARMAC_TCR, MAC_TCR_TSCTRLSSR_LPOS,
- MAC_TCR_TSCTRLSSR_HPOS) == 0)
- val = (val * 1000) / 465;
+ MAC_TCR_TSCTRLSSR_HPOS) == 0) {
+ EMACDBG("using 0.465 ns accuracy");
+ ss_inc /= 0.465;
+ }
- MAC_SSIR_SSINC_UDFWR(val);
+ sns_inc = ss_inc - (int)ss_inc; // take remainder
+ sns_inc *= 256.0; // sns_inc needs to be multiplied by 2^8, per spec.
+ sns_inc += 0.5; // round to nearest int value.
+
+ MAC_SSIR_SSINC_UDFWR((int)ss_inc);
+ MAC_SSIR_SNSINC_UDFWR((int)sns_inc);
+ EMACDBG("ss_inc = %d, sns_inc = %d\n", (int)ss_inc, (int)sns_inc);
+
+ return Y_SUCCESS;
+ }
+/*!
+ * \brief
+ * \param[in]
+ * \return Success or Failure
+ * \retval 0 Success
+ * \retval -1 Failure
+ */
+
+static INT config_default_addend(struct DWC_ETH_QOS_prv_data *pdata, ULONG ptp_clock)
+{
+ struct hw_if_struct *hw_if = &pdata->hw_if;
+ u64 temp;
+
+ /* formula is :
+ * addend = 2^32/freq_div_ratio;
+ *
+ * where, freq_div_ratio = DWC_ETH_QOS_SYSCLOCK/50MHz
+ *
+ * hence, addend = ((2^32) * 50MHz)/DWC_ETH_QOS_SYSCLOCK;
+ *
+ * NOTE: DWC_ETH_QOS_SYSCLOCK should be >= 50MHz to
+ * achive 20ns accuracy.
+ *
+ * 2^x * y == (y << x), hence
+ * 2^32 * 50000000 ==> (50000000 << 32)
+ */
+ if (ptp_clock == DWC_ETH_QOS_SYSCLOCK) {
+ // If PTP_CLOCK == SYS_CLOCK, best we can do is 2^32 - 1
+ pdata->default_addend = 0xFFFFFFFF;
+ } else {
+ temp = (u64)((u64)ptp_clock << 32);
+ pdata->default_addend = div_u64(temp, DWC_ETH_QOS_SYSCLOCK);
+ }
+ hw_if->config_addend(pdata->default_addend);
+ EMACDBG("PPS: PTPCLK_Config: freq=%dHz, addend_reg=0x%x\n",
+ ptp_clock, (unsigned int)pdata->default_addend);
return Y_SUCCESS;
}
@@ -5114,6 +5154,7 @@ void DWC_ETH_QOS_init_function_ptrs_dev(struct hw_if_struct *hw_if)
/* for hw time stamping */
hw_if->config_hw_time_stamping = config_hw_time_stamping;
hw_if->config_sub_second_increment = config_sub_second_increment;
+ hw_if->config_default_addend = config_default_addend;
hw_if->init_systime = init_systime;
hw_if->config_addend = config_addend;
hw_if->adjust_systime = adjust_systime;
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c
index 730d9fc..fda72a8 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c
@@ -784,30 +784,50 @@ void DWC_ETH_QOS_handle_phy_interrupt(struct DWC_ETH_QOS_prv_data *pdata)
int micrel_intr_status = 0;
EMACDBG("Enter\n");
- DWC_ETH_QOS_mdio_read_direct(
- pdata, pdata->phyaddr, DWC_ETH_QOS_BASIC_STATUS, &phy_intr_status);
- EMACDBG(
- "Basic Status Reg (%#x) = %#x\n", DWC_ETH_QOS_BASIC_STATUS, phy_intr_status);
-
- DWC_ETH_QOS_mdio_read_direct(
- pdata, pdata->phyaddr, DWC_ETH_QOS_MICREL_PHY_INTCS, &micrel_intr_status);
- EMACDBG(
- "MICREL PHY Intr EN Reg (%#x) = %#x\n", DWC_ETH_QOS_MICREL_PHY_INTCS, micrel_intr_status);
-
- /* Call ack interrupt to clear the WOL interrupt status fields */
- if (pdata->phydev->drv->ack_interrupt)
- pdata->phydev->drv->ack_interrupt(pdata->phydev);
-
- /* Interrupt received for link state change */
- if (phy_intr_status & LINK_STATE_MASK) {
- EMACDBG("Interrupt received for link UP state\n");
- phy_mac_interrupt(pdata->phydev, LINK_UP);
- } else if (!(phy_intr_status & LINK_STATE_MASK)) {
- EMACDBG("Interrupt received for link DOWN state\n");
- phy_mac_interrupt(pdata->phydev, LINK_DOWN);
- } else if (!(phy_intr_status & AUTONEG_STATE_MASK)) {
- EMACDBG("Interrupt received for link down with"
+ if ((pdata->phydev->phy_id & pdata->phydev->drv->phy_id_mask) == MICREL_PHY_ID) {
+ DWC_ETH_QOS_mdio_read_direct(
+ pdata, pdata->phyaddr, DWC_ETH_QOS_BASIC_STATUS, &phy_intr_status);
+ EMACDBG(
+ "Basic Status Reg (%#x) = %#x\n", DWC_ETH_QOS_BASIC_STATUS, phy_intr_status);
+
+ DWC_ETH_QOS_mdio_read_direct(
+ pdata, pdata->phyaddr, DWC_ETH_QOS_MICREL_PHY_INTCS, &micrel_intr_status);
+ EMACDBG(
+ "MICREL PHY Intr EN Reg (%#x) = %#x\n", DWC_ETH_QOS_MICREL_PHY_INTCS, micrel_intr_status);
+
+ /* Call ack interrupt to clear the WOL interrupt status fields */
+ if (pdata->phydev->drv->ack_interrupt)
+ pdata->phydev->drv->ack_interrupt(pdata->phydev);
+
+ /* Interrupt received for link state change */
+ if (phy_intr_status & LINK_STATE_MASK) {
+ EMACDBG("Interrupt received for link UP state\n");
+ phy_mac_interrupt(pdata->phydev, LINK_UP);
+ } else if (!(phy_intr_status & LINK_STATE_MASK)) {
+ EMACDBG("Interrupt received for link DOWN state\n");
+ phy_mac_interrupt(pdata->phydev, LINK_DOWN);
+ } else if (!(phy_intr_status & AUTONEG_STATE_MASK)) {
+ EMACDBG("Interrupt received for link down with"
+ " auto-negotiation error\n");
+ }
+ } else {
+ DWC_ETH_QOS_mdio_read_direct(
+ pdata, pdata->phyaddr, DWC_ETH_QOS_PHY_INTR_STATUS, &phy_intr_status);
+ EMACDBG("Phy Interrupt status Reg at offset 0x13 = %#x\n", phy_intr_status);
+ /* Interrupt received for link state change */
+ if (phy_intr_status & LINK_UP_STATE) {
+ pdata->hw_if.stop_mac_tx_rx();
+ EMACDBG("Interrupt received for link UP state\n");
+ phy_mac_interrupt(pdata->phydev, LINK_UP);
+ } else if (phy_intr_status & LINK_DOWN_STATE) {
+ EMACDBG("Interrupt received for link DOWN state\n");
+ phy_mac_interrupt(pdata->phydev, LINK_DOWN);
+ } else if (phy_intr_status & AUTO_NEG_ERROR) {
+ EMACDBG("Interrupt received for link down with"
" auto-negotiation error\n");
+ } else if (phy_intr_status & PHY_WOL) {
+ EMACDBG("Interrupt received for WoL packet\n");
+ }
}
EMACDBG("Exit\n");
@@ -2429,6 +2449,7 @@ inline UINT DWC_ETH_QOS_cal_int_mod(struct sk_buff *skb, UINT eth_type,
struct DWC_ETH_QOS_prv_data *pdata)
{
UINT ret = DEFAULT_INT_MOD;
+ bool is_udp;
#ifdef DWC_ETH_QOS_CONFIG_PTP
if (eth_type == ETH_P_1588)
@@ -2439,8 +2460,11 @@ inline UINT DWC_ETH_QOS_cal_int_mod(struct sk_buff *skb, UINT eth_type,
ret = AVB_INT_MOD;
} else if (eth_type == ETH_P_IP || eth_type == ETH_P_IPV6) {
#ifdef DWC_ETH_QOS_CONFIG_PTP
- if (udp_hdr(skb)->dest == htons(PTP_UDP_EV_PORT)
- || udp_hdr(skb)->dest == htons(PTP_UDP_GEN_PORT)) {
+ is_udp = (eth_type == ETH_P_IP && ip_hdr(skb)->protocol == IPPROTO_UDP)
+ || (eth_type == ETH_P_IPV6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP);
+
+ if (is_udp && (udp_hdr(skb)->dest == htons(PTP_UDP_EV_PORT)
+ || udp_hdr(skb)->dest == htons(PTP_UDP_GEN_PORT))) {
ret = PTP_INT_MOD;
} else
#endif
@@ -3113,10 +3137,11 @@ static void DWC_ETH_QOS_consume_page_split_hdr(
{
if (page2_used)
buffer->page2 = NULL;
-
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
+ if (skb != NULL) {
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ }
}
/* Receive Checksum Offload configuration */
@@ -3224,7 +3249,7 @@ static int DWC_ETH_QOS_clean_split_hdr_rx_irq(
unsigned short payload_len = 0;
unsigned char intermediate_desc_cnt = 0;
unsigned char buf2_used = 0;
- int ret;
+ int ret = 0;
DBGPR("-->DWC_ETH_QOS_clean_split_hdr_rx_irq: qinx = %u, quota = %d\n",
qinx, quota);
@@ -3323,15 +3348,13 @@ static int DWC_ETH_QOS_clean_split_hdr_rx_irq(
} else {
/* this is the middle of a chain */
payload_len = pdata->rx_buffer_len;
- skb_fill_page_desc(desc_data->skb_top,
- skb_shinfo(desc_data->skb_top)->nr_frags,
- buffer->page2, 0,
- payload_len);
-
+ if (desc_data->skb_top != NULL)
+ skb_fill_page_desc(desc_data->skb_top,skb_shinfo(desc_data->skb_top)->nr_frags,buffer->page2, 0,payload_len);
/* re-use this skb, as consumed only the page */
buffer->skb = skb;
}
- DWC_ETH_QOS_consume_page_split_hdr(buffer,
+ if (desc_data->skb_top != NULL)
+ DWC_ETH_QOS_consume_page_split_hdr(buffer,
desc_data->skb_top,
payload_len, buf2_used);
goto next_desc;
@@ -3348,17 +3371,15 @@ static int DWC_ETH_QOS_clean_split_hdr_rx_irq(
(pdata->rx_buffer_len * intermediate_desc_cnt) -
buffer->rx_hdr_size);
}
-
- skb_fill_page_desc(desc_data->skb_top,
- skb_shinfo(desc_data->skb_top)->nr_frags,
- buffer->page2, 0,
- payload_len);
-
- /* re-use this skb, as consumed only the page */
- buffer->skb = skb;
- skb = desc_data->skb_top;
+ if (desc_data->skb_top != NULL) {
+ skb_fill_page_desc(desc_data->skb_top,skb_shinfo(desc_data->skb_top)->nr_frags,buffer->page2, 0,payload_len);
+ /* re-use this skb, as consumed only the page */
+ buffer->skb = skb;
+ skb = desc_data->skb_top;
+ }
desc_data->skb_top = NULL;
- DWC_ETH_QOS_consume_page_split_hdr(buffer, skb,
+ if (skb != NULL)
+ DWC_ETH_QOS_consume_page_split_hdr(buffer, skb,
payload_len, buf2_used);
} else {
/* no chain, got both FD + LD together */
@@ -3402,11 +3423,13 @@ static int DWC_ETH_QOS_clean_split_hdr_rx_irq(
hdr_len = 0;
}
- DWC_ETH_QOS_config_rx_csum(pdata, skb, RX_NORMAL_DESC);
+ if (skb != NULL) {
+ DWC_ETH_QOS_config_rx_csum(pdata, skb, RX_NORMAL_DESC);
#ifdef DWC_ETH_QOS_ENABLE_VLAN_TAG
- DWC_ETH_QOS_get_rx_vlan(pdata, skb, RX_NORMAL_DESC);
+ DWC_ETH_QOS_get_rx_vlan(pdata, skb, RX_NORMAL_DESC);
#endif
+ }
#ifdef YDEBUG_FILTER
DWC_ETH_QOS_check_rx_filter_status(RX_NORMAL_DESC);
@@ -3415,14 +3438,16 @@ static int DWC_ETH_QOS_clean_split_hdr_rx_irq(
if ((pdata->hw_feat.tsstssel) && (pdata->hwts_rx_en)) {
/* get rx tstamp if available */
if (hw_if->rx_tstamp_available(RX_NORMAL_DESC)) {
- ret = DWC_ETH_QOS_get_rx_hwtstamp(pdata,
+ if (skb != NULL )
+ ret = DWC_ETH_QOS_get_rx_hwtstamp(pdata,
skb, desc_data, qinx);
if (ret == 0) {
/* device has not yet updated the CONTEXT desc to hold the
* time stamp, hence delay the packet reception
*/
buffer->skb = skb;
- buffer->dma = dma_map_single(GET_MEM_PDEV_DEV, skb->data,
+ if (skb != NULL)
+ buffer->dma = dma_map_single(GET_MEM_PDEV_DEV, skb->data,
pdata->rx_buffer_len, DMA_FROM_DEVICE);
if (dma_mapping_error(GET_MEM_PDEV_DEV, buffer->dma))
dev_alert(&pdata->pdev->dev, "failed to do the RX dma map\n");
@@ -3442,8 +3467,10 @@ static int DWC_ETH_QOS_clean_split_hdr_rx_irq(
#endif
/* update the statistics */
dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
- DWC_ETH_QOS_receive_skb(pdata, dev, skb, qinx);
+ if ( skb != NULL) {
+ dev->stats.rx_bytes += skb->len;
+ DWC_ETH_QOS_receive_skb(pdata, dev, skb, qinx);
+ }
received++;
next_desc:
desc_data->dirty_rx++;
@@ -3504,7 +3531,7 @@ static int DWC_ETH_QOS_clean_jumbo_rx_irq(struct DWC_ETH_QOS_prv_data *pdata,
u16 pkt_len;
UCHAR intermediate_desc_cnt = 0;
unsigned int buf2_used;
- int ret;
+ int ret = 0 ;
DBGPR("-->DWC_ETH_QOS_clean_jumbo_rx_irq: qinx = %u, quota = %d\n",
qinx, quota);
@@ -3575,20 +3602,22 @@ static int DWC_ETH_QOS_clean_jumbo_rx_irq(struct DWC_ETH_QOS_prv_data *pdata,
pdata->rx_buffer_len);
} else {
/* this is the middle of a chain */
- skb_fill_page_desc(desc_data->skb_top,
+ if (desc_data->skb_top != NULL) {
+ skb_fill_page_desc(desc_data->skb_top,
skb_shinfo(desc_data->skb_top)->nr_frags,
buffer->page, 0,
pdata->rx_buffer_len);
-
- DBGPR("RX: pkt in second buffer pointer\n");
- skb_fill_page_desc(desc_data->skb_top,
+ DBGPR("RX: pkt in second buffer pointer\n");
+ skb_fill_page_desc(desc_data->skb_top,
skb_shinfo(desc_data->skb_top)->nr_frags,
buffer->page2, 0,
pdata->rx_buffer_len);
+ }
/* re-use this skb, as consumed only the page */
buffer->skb = skb;
}
- DWC_ETH_QOS_consume_page(buffer,
+ if (desc_data->skb_top != NULL )
+ DWC_ETH_QOS_consume_page(buffer,
desc_data->skb_top,
(pdata->rx_buffer_len * 2),
buf2_used);
@@ -3599,19 +3628,21 @@ static int DWC_ETH_QOS_clean_jumbo_rx_irq(struct DWC_ETH_QOS_prv_data *pdata,
pkt_len =
(pkt_len - (pdata->rx_buffer_len * intermediate_desc_cnt));
if (pkt_len > pdata->rx_buffer_len) {
- skb_fill_page_desc(desc_data->skb_top,
+ if (desc_data->skb_top != NULL) {
+ skb_fill_page_desc(desc_data->skb_top,
skb_shinfo(desc_data->skb_top)->nr_frags,
buffer->page, 0,
pdata->rx_buffer_len);
-
- DBGPR("RX: pkt in second buffer pointer\n");
- skb_fill_page_desc(desc_data->skb_top,
+ DBGPR("RX: pkt in second buffer pointer\n");
+ skb_fill_page_desc(desc_data->skb_top,
skb_shinfo(desc_data->skb_top)->nr_frags,
buffer->page2, 0,
(pkt_len - pdata->rx_buffer_len));
+ }
buf2_used = 1;
} else {
- skb_fill_page_desc(desc_data->skb_top,
+ if (desc_data->skb_top != NULL)
+ skb_fill_page_desc(desc_data->skb_top,
skb_shinfo(desc_data->skb_top)->nr_frags,
buffer->page, 0,
pkt_len);
@@ -3619,9 +3650,11 @@ static int DWC_ETH_QOS_clean_jumbo_rx_irq(struct DWC_ETH_QOS_prv_data *pdata,
}
/* re-use this skb, as consumed only the page */
buffer->skb = skb;
- skb = desc_data->skb_top;
+ if (desc_data->skb_top != NULL)
+ skb = desc_data->skb_top;
desc_data->skb_top = NULL;
- DWC_ETH_QOS_consume_page(buffer, skb,
+ if (skb != NULL)
+ DWC_ETH_QOS_consume_page(buffer, skb,
pkt_len,
buf2_used);
} else {
@@ -3671,11 +3704,13 @@ static int DWC_ETH_QOS_clean_jumbo_rx_irq(struct DWC_ETH_QOS_prv_data *pdata,
intermediate_desc_cnt = 0;
}
- DWC_ETH_QOS_config_rx_csum(pdata, skb, RX_NORMAL_DESC);
+ if (skb != NULL) {
+ DWC_ETH_QOS_config_rx_csum(pdata, skb, RX_NORMAL_DESC);
#ifdef DWC_ETH_QOS_ENABLE_VLAN_TAG
- DWC_ETH_QOS_get_rx_vlan(pdata, skb, RX_NORMAL_DESC);
+ DWC_ETH_QOS_get_rx_vlan(pdata, skb, RX_NORMAL_DESC);
#endif
+ }
#ifdef YDEBUG_FILTER
DWC_ETH_QOS_check_rx_filter_status(RX_NORMAL_DESC);
@@ -3684,15 +3719,16 @@ static int DWC_ETH_QOS_clean_jumbo_rx_irq(struct DWC_ETH_QOS_prv_data *pdata,
if ((pdata->hw_feat.tsstssel) && (pdata->hwts_rx_en)) {
/* get rx tstamp if available */
if (hw_if->rx_tstamp_available(RX_NORMAL_DESC)) {
- ret = DWC_ETH_QOS_get_rx_hwtstamp(pdata,
- skb, desc_data, qinx);
+ if (skb != NULL)
+ ret = DWC_ETH_QOS_get_rx_hwtstamp(pdata, skb, desc_data, qinx);
if (ret == 0) {
/* device has not yet updated the CONTEXT desc to hold the
* time stamp, hence delay the packet reception
*/
buffer->skb = skb;
- buffer->dma = dma_map_single(GET_MEM_PDEV_DEV, skb->data,
- pdata->rx_buffer_len, DMA_FROM_DEVICE);
+ if (skb != NULL)
+ buffer->dma = dma_map_single(GET_MEM_PDEV_DEV, skb->data, pdata->rx_buffer_len, DMA_FROM_DEVICE);
+
if (dma_mapping_error(GET_MEM_PDEV_DEV, buffer->dma))
dev_alert(&pdata->pdev->dev, "failed to do the RX dma map\n");
@@ -3712,16 +3748,16 @@ static int DWC_ETH_QOS_clean_jumbo_rx_irq(struct DWC_ETH_QOS_prv_data *pdata,
#endif
/* update the statistics */
dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
-
- /* eth type trans needs skb->data to point to something */
- if (!pskb_may_pull(skb, ETH_HLEN)) {
- dev_alert(&pdata->pdev->dev, "pskb_may_pull failed\n");
- dev_kfree_skb_any(skb);
- goto next_desc;
+ if (skb != NULL) {
+ dev->stats.rx_bytes += skb->len;
+ /* eth type trans needs skb->data to point to something */
+ if (!pskb_may_pull(skb, ETH_HLEN)) {
+ dev_alert(&pdata->pdev->dev, "pskb_may_pull failed\n");
+ dev_kfree_skb_any(skb);
+ goto next_desc;
+ }
+ DWC_ETH_QOS_receive_skb(pdata, dev, skb, qinx);
}
-
- DWC_ETH_QOS_receive_skb(pdata, dev, skb, qinx);
received++;
next_desc:
desc_data->dirty_rx++;
@@ -4922,52 +4958,14 @@ static VOID DWC_ETH_QOS_config_timer_registers(
{
struct timespec now;
struct hw_if_struct *hw_if = &pdata->hw_if;
- u64 temp;
DBGPR("-->DWC_ETH_QOS_config_timer_registers\n");
+ pdata->ptpclk_freq = DWC_ETH_QOS_DEFAULT_PTP_CLOCK;
+ /* program default addend */
+ hw_if->config_default_addend(pdata, DWC_ETH_QOS_DEFAULT_PTP_CLOCK);
/* program Sub Second Increment Reg */
-#ifdef CONFIG_PPS_OUTPUT
- /* If default_addend is already programmed, then we expect that
- * sub_second_increment is also programmed already */
- if(pdata->default_addend == 0){
- hw_if->config_sub_second_increment(DWC_ETH_QOS_SYSCLOCK); // Using default 250MHz
- }
- else {
- u64 pclk;
- pclk = (u64) (pdata->default_addend) * DWC_ETH_QOS_SYSCLOCK;
- pclk += 0x8000000;
- pclk >>= 32;
- hw_if->config_sub_second_increment((u32)pclk);
- }
-#else
- hw_if->config_sub_second_increment(DWC_ETH_QOS_SYSCLOCK);
-#endif
- /* formula is :
- * addend = 2^32/freq_div_ratio;
- *
- * where, freq_div_ratio = DWC_ETH_QOS_SYSCLOCK/50MHz
- *
- * hence, addend = ((2^32) * 50MHz)/DWC_ETH_QOS_SYSCLOCK;
- *
- * NOTE: DWC_ETH_QOS_SYSCLOCK should be >= 50MHz to
- * achive 20ns accuracy.
- *
- * 2^x * y == (y << x), hence
- * 2^32 * 50000000 ==> (50000000 << 32)
- */
-#ifdef CONFIG_PPS_OUTPUT
- if(pdata->default_addend == 0){
- temp = (u64)(50000000ULL << 32);
- pdata->default_addend = div_u64(temp, DWC_ETH_QOS_SYSCLOCK);
- EMACDBG("Using default PTP clock = 250MHz\n");
- }
-#else
- temp = (u64)(50000000ULL << 32);
- pdata->default_addend = div_u64(temp, DWC_ETH_QOS_SYSCLOCK);
-#endif
- hw_if->config_addend(pdata->default_addend);
-
+ hw_if->config_sub_second_increment(DWC_ETH_QOS_DEFAULT_PTP_CLOCK);
/* initialize system time */
getnstimeofday(&now);
hw_if->init_systime(now.tv_sec, now.tv_nsec);
@@ -5105,7 +5103,6 @@ static int ETH_PTPCLK_Config(struct DWC_ETH_QOS_prv_data *pdata, struct ifr_data
struct ETH_PPS_Config *eth_pps_cfg = (struct ETH_PPS_Config *)req->ptr;
struct hw_if_struct *hw_if = &pdata->hw_if;
int ret = 0;
- u64 val;
if ((eth_pps_cfg->ppsout_ch < 0) ||
(eth_pps_cfg->ppsout_ch >= pdata->hw_feat.pps_out_num))
@@ -5119,17 +5116,9 @@ static int ETH_PTPCLK_Config(struct DWC_ETH_QOS_prv_data *pdata, struct ifr_data
eth_pps_cfg->ptpclk_freq );
return -1;
}
+
pdata->ptpclk_freq = eth_pps_cfg->ptpclk_freq;
- val = (u64)(1ULL << 32);
- val = val * (eth_pps_cfg->ptpclk_freq);
- val += (DWC_ETH_QOS_SYSCLOCK/2);
- val = div_u64(val, DWC_ETH_QOS_SYSCLOCK);
- if ( val > 0xFFFFFFFF) val = 0xFFFFFFFF;
- EMACDBG("PPS: PTPCLK_Config: freq=%dHz, addend_reg=0x%x\n",
- eth_pps_cfg->ptpclk_freq, (unsigned int)val);
-
- pdata->default_addend = val;
- ret = hw_if->config_addend((unsigned int)val);
+ ret = hw_if->config_default_addend(pdata, (ULONG)eth_pps_cfg->ptpclk_freq);
ret |= hw_if->config_sub_second_increment( (ULONG)eth_pps_cfg->ptpclk_freq);
return ret;
@@ -5212,15 +5201,8 @@ void DWC_ETH_QOS_pps_timer_init(struct ifr_data_struct *req)
/* Enable timestamping. This is required to start system time generator.*/
MAC_TCR_TSENA_UDFWR(0x1);
-
- /* Configure MAC Sub-second and Sub-nanosecond increment register based on PTP clock. */
- MAC_SSIR_SSINC_UDFWR(0x4); // Sub-second increment value for 250MHz and 230.4MHz ptp clock
-
- MAC_SSIR_SNSINC_UDFWR(0x0); // Sub-nanosecond increment value for 250 MHz ptp clock
- EMACDBG("250 clock\n");
-
MAC_TCR_TSUPDT_UDFWR(0x1);
- MAC_TCR_TSCFUPDT_UDFWR(0x0); // Coarse Timestamp Update method.
+ MAC_TCR_TSCFUPDT_UDFWR(0x1); // Fine Timestamp Update method.
/* Initialize MAC System Time Update register */
MAC_STSUR_TSS_UDFWR(0x0); // MAC system time in seconds
@@ -5279,12 +5261,16 @@ int ETH_PPSOUT_Config(struct DWC_ETH_QOS_prv_data *pdata, struct ifr_data_struct
struct ETH_PPS_Config *eth_pps_cfg = (struct ETH_PPS_Config *)req->ptr;
unsigned int val;
int interval, width;
- int interval_ns; /*interval in nano seconds*/
+ struct hw_if_struct *hw_if = &pdata->hw_if;
- if (pdata->emac_hw_version_type == EMAC_HW_v2_3_1 &&
- eth_pps_cfg->ptpclk_freq <= 0) {
- /* Set PTP clock to default 250 */
+ /* For lpass we need 19.2Mhz PPS frequency for PPS0.
+ If lpass is enabled don't allow to change the PTP clock
+ becuase if we change PTP clock then addend & subsecond increament
+ will change & We will not see 19.2Mhz for PPS0.
+ */
+ if (pdata->res_data->pps_lpass_conn_en ) {
eth_pps_cfg->ptpclk_freq = DWC_ETH_QOS_DEFAULT_PTP_CLOCK;
+ EMACDBG("using default ptp clock \n");
}
if ((eth_pps_cfg->ppsout_ch < 0) ||
@@ -5303,6 +5289,12 @@ int ETH_PPSOUT_Config(struct DWC_ETH_QOS_prv_data *pdata, struct ifr_data_struct
eth_pps_cfg->ppsout_duty = 99;
}
+ /* Configure increment values */
+ hw_if->config_sub_second_increment(eth_pps_cfg->ptpclk_freq);
+
+ /* Configure addent value as Fine Timestamp method is used */
+ hw_if->config_default_addend(pdata, eth_pps_cfg->ptpclk_freq);
+
if(0 < eth_pps_cfg->ptpclk_freq) {
pdata->ptpclk_freq = eth_pps_cfg->ptpclk_freq;
interval = (eth_pps_cfg->ptpclk_freq + eth_pps_cfg->ppsout_freq/2)
@@ -5323,17 +5315,9 @@ int ETH_PPSOUT_Config(struct DWC_ETH_QOS_prv_data *pdata, struct ifr_data_struct
EMACDBG("PPS: PPSOut_Config: interval=%d, width=%d\n", interval, width);
- if (pdata->emac_hw_version_type == EMAC_HW_v2_3_1) {
- //calculate interval & width
- interval_ns = (1000000000/eth_pps_cfg->ppsout_freq) ;
- interval = ((interval_ns)/4) - 1;
- width = ((interval_ns)/(2*4)) - 1;
- EMACDBG("pps_interval=%d,width=%d\n",interval,width);
- }
-
switch (eth_pps_cfg->ppsout_ch) {
case DWC_ETH_QOS_PPS_CH_0:
- if (pdata->emac_hw_version_type == EMAC_HW_v2_3_1) {
+ if (pdata->res_data->pps_lpass_conn_en) {
if (eth_pps_cfg->ppsout_start == DWC_ETH_QOS_PPS_START) {
MAC_PPSC_PPSEN0_UDFWR(0x1);
MAC_PPS_INTVAL_PPSINT0_UDFWR(DWC_ETH_QOS_PPS_CH_0, interval);
@@ -5885,7 +5869,6 @@ static int DWC_ETH_QOS_handle_hwtstamp_ioctl(struct DWC_ETH_QOS_prv_data *pdata,
u32 ts_event_en = 0;
u32 av_8021asm_en = 0;
u32 VARMAC_TCR = 0;
- u64 temp = 0;
struct timespec now;
DBGPR_PTP("-->DWC_ETH_QOS_handle_hwtstamp_ioctl\n");
@@ -6056,46 +6039,11 @@ static int DWC_ETH_QOS_handle_hwtstamp_ioctl(struct DWC_ETH_QOS_prv_data *pdata,
hw_if->config_hw_time_stamping(VARMAC_TCR);
+ /* program default addend */
+ hw_if->config_default_addend(pdata, DWC_ETH_QOS_DEFAULT_PTP_CLOCK);
+
/* program Sub Second Increment Reg */
-#ifdef CONFIG_PPS_OUTPUT
- /* If default_addend is already programmed, then we expect that
- * sub_second_increment is also programmed already */
- if (pdata->default_addend == 0) {
- hw_if->config_sub_second_increment(DWC_ETH_QOS_SYSCLOCK); // Using default 250MHz
- } else {
- u64 pclk;
- pclk = (u64) (pdata->default_addend) * DWC_ETH_QOS_SYSCLOCK;
- pclk += 0x8000000;
- pclk >>= 32;
- hw_if->config_sub_second_increment((u32)pclk);
- }
-#else
- hw_if->config_sub_second_increment(DWC_ETH_QOS_SYSCLOCK);
-#endif
- /* formula is :
- * addend = 2^32/freq_div_ratio;
- *
- * where, freq_div_ratio = DWC_ETH_QOS_SYSCLOCK/50MHz
- *
- * hence, addend = ((2^32) * 50MHz)/DWC_ETH_QOS_SYSCLOCK;
- *
- * NOTE: DWC_ETH_QOS_SYSCLOCK should be >= 50MHz to
- * achive 20ns accuracy.
- *
- * 2^x * y == (y << x), hence
- * 2^32 * 50000000 ==> (50000000 << 32)
- *
- */
-#ifdef CONFIG_PPS_OUTPUT
- if(pdata->default_addend == 0){
- temp = (u64)(50000000ULL << 32);
- pdata->default_addend = div_u64(temp, DWC_ETH_QOS_SYSCLOCK);
- EMACINFO("Using default PTP clock = 250MHz\n");
-#else
- temp = (u64)(50000000ULL << 32);
- pdata->default_addend = div_u64(temp, DWC_ETH_QOS_SYSCLOCK);
-#endif
- hw_if->config_addend(pdata->default_addend);
+ hw_if->config_sub_second_increment(DWC_ETH_QOS_DEFAULT_PTP_CLOCK);
/* initialize system time */
getnstimeofday(&now);
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_ipa.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_ipa.c
index 92d165d..c09a6f5 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_ipa.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_ipa.c
@@ -525,7 +525,7 @@ static int DWC_ETH_QOS_ipa_offload_resume(struct DWC_ETH_QOS_prv_data *pdata)
static int DWC_ETH_QOS_ipa_ready(struct DWC_ETH_QOS_prv_data *pdata)
{
- int ret;
+ int ret = 0 ;
EMACDBG("Enter \n");
@@ -960,8 +960,8 @@ static int DWC_ETH_QOS_ipa_offload_connect(struct DWC_ETH_QOS_prv_data *pdata)
struct DWC_ETH_QOS_prv_ipa_data *ntn_ipa = &pdata->prv_ipa;
struct ipa_uc_offload_conn_in_params in;
struct ipa_uc_offload_conn_out_params out;
- struct ipa_ntn_setup_info rx_setup_info;
- struct ipa_ntn_setup_info tx_setup_info;
+ struct ipa_ntn_setup_info rx_setup_info = {0};
+ struct ipa_ntn_setup_info tx_setup_info = {0};
struct ipa_perf_profile profile;
int ret = 0;
int i = 0;
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c
index 017fe03..faac49e 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c
@@ -543,7 +543,8 @@ static void set_phy_rx_tx_delay(struct DWC_ETH_QOS_prv_data *pdata,
EMACDBG("Read 0x%x from offset 0x8\n",phydata);
phydata = 0;
- if (pdata->emac_hw_version_type == EMAC_HW_v2_1_2) {
+ if (pdata->emac_hw_version_type == EMAC_HW_v2_1_2
+ || pdata->emac_hw_version_type == EMAC_HW_v2_1_1) {
u16 tx_clk = 0xE;
/* Provide TX_CLK delay of -0.06nsec */
DWC_ETH_QOS_mdio_mmd_register_read_direct(pdata, pdata->phyaddr,
@@ -562,7 +563,8 @@ static void set_phy_rx_tx_delay(struct DWC_ETH_QOS_prv_data *pdata,
DWC_ETH_QOS_mdio_mmd_register_read_direct(pdata, pdata->phyaddr,
DWC_ETH_QOS_MICREL_PHY_DEBUG_MMD_DEV_ADDR,0x5,&phydata);
phydata &= ~(0xFF);
- if (pdata->emac_hw_version_type == EMAC_HW_v2_1_2)
+ if (pdata->emac_hw_version_type == EMAC_HW_v2_1_2 ||
+ pdata->emac_hw_version_type == EMAC_HW_v2_1_1)
phydata |= ((0x2 << 12) | (0x2 << 8) | (0x2 << 4) | 0x2);
else
/* Default settings for EMAC_HW_v2_1_0 */
@@ -579,7 +581,8 @@ static void set_phy_rx_tx_delay(struct DWC_ETH_QOS_prv_data *pdata,
DWC_ETH_QOS_mdio_mmd_register_read_direct(pdata, pdata->phyaddr,
DWC_ETH_QOS_MICREL_PHY_DEBUG_MMD_DEV_ADDR,0x4,&phydata);
phydata &= ~(0xF << 4);
- if (pdata->emac_hw_version_type == EMAC_HW_v2_1_2)
+ if (pdata->emac_hw_version_type == EMAC_HW_v2_1_2 ||
+ pdata->emac_hw_version_type == EMAC_HW_v2_1_1)
phydata |= (0x2 << 4);
else
/* Default settings for EMAC_HW_v2_1_0 */
@@ -654,9 +657,10 @@ static void configure_phy_rx_tx_delay(struct DWC_ETH_QOS_prv_data *pdata)
set_phy_rx_tx_delay(pdata, ENABLE_RX_DELAY, ENABLE_TX_DELAY);
} else {
/* Settings for RGMII ID mode.
- Not applicable for EMAC core version 2.1.0 and 2.1.2 */
+ Not applicable for EMAC core version 2.1.0, 2.1.2 and 2.1.1 */
if (pdata->emac_hw_version_type != EMAC_HW_v2_1_0 &&
- pdata->emac_hw_version_type != EMAC_HW_v2_1_2)
+ pdata->emac_hw_version_type != EMAC_HW_v2_1_2 &&
+ pdata->emac_hw_version_type != EMAC_HW_v2_1_1)
set_phy_rx_tx_delay(pdata, DISABLE_RX_DELAY, DISABLE_TX_DELAY);
}
break;
@@ -675,9 +679,10 @@ static void configure_phy_rx_tx_delay(struct DWC_ETH_QOS_prv_data *pdata)
set_phy_rx_tx_delay(pdata, DISABLE_RX_DELAY, ENABLE_TX_DELAY);
} else {
/* Settings for RGMII ID mode */
- /* Not applicable for EMAC core version 2.1.0 and 2.1.2 */
+ /* Not applicable for EMAC core version 2.1.0, 2.1.2 and 2.1.1 */
if (pdata->emac_hw_version_type != EMAC_HW_v2_1_0 &&
- pdata->emac_hw_version_type != EMAC_HW_v2_1_2)
+ pdata->emac_hw_version_type != EMAC_HW_v2_1_2 &&
+ pdata->emac_hw_version_type != EMAC_HW_v2_1_1)
set_phy_rx_tx_delay(pdata, DISABLE_RX_DELAY, DISABLE_TX_DELAY);
}
}
@@ -799,7 +804,6 @@ static inline int DWC_ETH_QOS_configure_io_macro_dll_settings(
EMACDBG("Enter\n");
#ifndef DWC_ETH_QOS_EMULATION_PLATFORM
- if (pdata->emac_hw_version_type == EMAC_HW_v2_0_0 || pdata->emac_hw_version_type == EMAC_HW_v2_3_1)
DWC_ETH_QOS_rgmii_io_macro_dll_reset(pdata);
/* For RGMII ID mode with internal delay*/
@@ -1010,7 +1014,9 @@ void DWC_ETH_QOS_adjust_link(struct net_device *dev)
DWC_ETH_QOS_ipa_offload_event_handler(pdata, EV_PHY_LINK_DOWN);
}
- if (phydev->link == 0 && pdata->io_macro_phy_intf != RMII_MODE)
+ if (phydev->link == 1)
+ pdata->hw_if.start_mac_tx_rx();
+ else if (phydev->link == 0 && pdata->io_macro_phy_intf != RMII_MODE)
DWC_ETH_QOS_set_clk_and_bus_config(pdata, SPEED_10);
}
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c
index 48200c2..50d1e55 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -526,8 +526,10 @@ static void DWC_ETH_QOS_configure_gpio_pins(struct platform_device *pdev)
return;
}
EMACDBG("get pinctrl succeed\n");
+ dwc_eth_qos_res_data.pinctrl = pinctrl;
if (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_2_0 ||
+ dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_2 ||
dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_3_1) {
/* PPS0 pin */
emac_pps_0 = pinctrl_lookup_state(pinctrl, EMAC_PIN_PPS0);
@@ -719,6 +721,28 @@ static void DWC_ETH_QOS_configure_gpio_pins(struct platform_device *pdev)
else
EMACDBG("Set rgmii_rxc_state succeed\n");
+ dwc_eth_qos_res_data.rgmii_rxc_suspend_state =
+ pinctrl_lookup_state(pinctrl, EMAC_RGMII_RXC_SUSPEND);
+ if (IS_ERR_OR_NULL(dwc_eth_qos_res_data.rgmii_rxc_suspend_state)) {
+ ret = PTR_ERR(dwc_eth_qos_res_data.rgmii_rxc_suspend_state);
+ EMACERR("Failed to get rgmii_rxc_suspend_state, err = %d\n", ret);
+ dwc_eth_qos_res_data.rgmii_rxc_suspend_state = NULL;
+ }
+ else {
+ EMACDBG("Get rgmii_rxc_suspend_state succeed\n");
+ }
+
+ dwc_eth_qos_res_data.rgmii_rxc_resume_state =
+ pinctrl_lookup_state(pinctrl, EMAC_RGMII_RXC_RESUME);
+ if (IS_ERR_OR_NULL(dwc_eth_qos_res_data.rgmii_rxc_resume_state)) {
+ ret = PTR_ERR(dwc_eth_qos_res_data.rgmii_rxc_resume_state);
+ EMACERR("Failed to get rgmii_rxc_resume_state, err = %d\n", ret);
+ dwc_eth_qos_res_data.rgmii_rxc_resume_state = NULL;
+ }
+ else {
+ EMACDBG("Get rgmii_rxc_resume_state succeed\n");
+ }
+
rgmii_rx_ctl_state = pinctrl_lookup_state(pinctrl, EMAC_RGMII_RX_CTL);
if (IS_ERR_OR_NULL(rgmii_rx_ctl_state)) {
ret = PTR_ERR(rgmii_rx_ctl_state);
@@ -824,6 +848,10 @@ static int DWC_ETH_QOS_get_dts_config(struct platform_device *pdev)
}
EMACDBG(": emac_core_version = %d\n", dwc_eth_qos_res_data.emac_hw_version_type);
+ if (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_3_1 ||
+ dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_2)
+ dwc_eth_qos_res_data.pps_lpass_conn_en = true;
+
if (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_3_1) {
resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
@@ -941,7 +969,7 @@ int DWC_ETH_QOS_qmp_mailbox_init(struct DWC_ETH_QOS_prv_data *pdata)
pdata->qmp_mbox_client = devm_kzalloc(
&pdata->pdev->dev, sizeof(*pdata->qmp_mbox_client), GFP_KERNEL);
- if (IS_ERR(pdata->qmp_mbox_client)){
+ if (pdata->qmp_mbox_client == NULL || IS_ERR(pdata->qmp_mbox_client)){
EMACERR("qmp alloc client failed\n");
return -1;
}
@@ -1009,8 +1037,9 @@ int DWC_ETH_QOS_enable_ptp_clk(struct device *dev)
int ret;
const char* ptp_clock_name;
- if (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_0 ||
- dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_2)
+ if (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_0
+ || dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_2
+ || dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_1)
ptp_clock_name = "emac_ptp_clk";
else
ptp_clock_name = "eth_ptp_clk";
@@ -1158,8 +1187,9 @@ static int DWC_ETH_QOS_get_clks(struct device *dev)
dwc_eth_qos_res_data.rgmii_clk = NULL;
dwc_eth_qos_res_data.ptp_clk = NULL;
- if (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_0 ||
- (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_2)) {
+ if (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_0
+ || dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_2
+ || dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_1) {
/* EMAC core version 2.1.0 clocks */
axi_clock_name = "emac_axi_clk";
ahb_clock_name = "emac_slv_ahb_clk";
@@ -2234,6 +2264,13 @@ int DWC_ETH_QOS_remove(struct platform_device *pdev)
static void DWC_ETH_QOS_shutdown(struct platform_device *pdev)
{
pr_info("qcom-emac-dwc-eqos: DWC_ETH_QOS_shutdown\n");
+#ifdef DWC_ETH_QOS_BUILTIN
+ if (gDWC_ETH_QOS_prv_data->dev->flags & IFF_UP) {
+ gDWC_ETH_QOS_prv_data->dev->netdev_ops->ndo_stop(gDWC_ETH_QOS_prv_data->dev);
+ gDWC_ETH_QOS_prv_data->dev->flags &= ~IFF_UP;
+ }
+ DWC_ETH_QOS_remove(pdev);
+#endif
}
#ifdef CONFIG_PM
@@ -2260,10 +2297,10 @@ static void DWC_ETH_QOS_shutdown(struct platform_device *pdev)
* \retval 0
*/
-static INT DWC_ETH_QOS_suspend(struct platform_device *pdev, pm_message_t state)
+static INT DWC_ETH_QOS_suspend(struct device *dev)
{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct DWC_ETH_QOS_prv_data *pdata = netdev_priv(dev);
+ struct DWC_ETH_QOS_prv_data *pdata = gDWC_ETH_QOS_prv_data;
+ struct net_device *net_dev = pdata->dev;
struct hw_if_struct *hw_if = &pdata->hw_if;
INT ret, pmt_flags = 0;
unsigned int rwk_filter_values[] = {
@@ -2299,7 +2336,7 @@ static INT DWC_ETH_QOS_suspend(struct platform_device *pdev, pm_message_t state)
EMACDBG("-->DWC_ETH_QOS_suspend\n");
- if (of_device_is_compatible(pdev->dev.of_node, "qcom,emac-smmu-embedded")) {
+ if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded")) {
EMACDBG("<--DWC_ETH_QOS_suspend smmu return\n");
return 0;
}
@@ -2311,7 +2348,7 @@ static INT DWC_ETH_QOS_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
- if (!dev || !netif_running(dev)) {
+ if (!net_dev || !netif_running(net_dev)) {
return -EINVAL;
}
@@ -2323,10 +2360,22 @@ static INT DWC_ETH_QOS_suspend(struct platform_device *pdev, pm_message_t state)
if (pdata->hw_feat.mgk_sel && (pdata->wolopts & WAKE_MAGIC))
pmt_flags |= DWC_ETH_QOS_MAGIC_WAKEUP;
- ret = DWC_ETH_QOS_powerdown(dev, pmt_flags, DWC_ETH_QOS_DRIVER_CONTEXT);
+ ret = DWC_ETH_QOS_powerdown(net_dev, pmt_flags, DWC_ETH_QOS_DRIVER_CONTEXT);
DWC_ETH_QOS_suspend_clks(pdata);
+ /* Suspend the PHY RXC clock. */
+ if (dwc_eth_qos_res_data.is_pinctrl_names &&
+ (dwc_eth_qos_res_data.rgmii_rxc_suspend_state != NULL)) {
+ /* Remove RXC clock source from Phy.*/
+ ret = pinctrl_select_state(dwc_eth_qos_res_data.pinctrl,
+ dwc_eth_qos_res_data.rgmii_rxc_suspend_state);
+ if (ret)
+ EMACERR("Unable to set rgmii_rxc_suspend_state state, err = %d\n", ret);
+ else
+ EMACDBG("Set rgmii_rxc_suspend_state succeed\n");
+ }
+
EMACDBG("<--DWC_ETH_QOS_suspend ret = %d\n", ret);
#ifdef CONFIG_MSM_BOOT_TIME_MARKER
pdata->print_kpi = 0;
@@ -2356,18 +2405,18 @@ static INT DWC_ETH_QOS_suspend(struct platform_device *pdev, pm_message_t state)
* \retval 0
*/
-static INT DWC_ETH_QOS_resume(struct platform_device *pdev)
+static INT DWC_ETH_QOS_resume(struct device *dev)
{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct DWC_ETH_QOS_prv_data *pdata = netdev_priv(dev);
+ struct DWC_ETH_QOS_prv_data *pdata = gDWC_ETH_QOS_prv_data;
+ struct net_device *net_dev = pdata->dev;
INT ret;
EMACDBG("-->DWC_ETH_QOS_resume\n");
- if (of_device_is_compatible(pdev->dev.of_node, "qcom,emac-smmu-embedded"))
+ if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded"))
return 0;
- if (!dev || !netif_running(dev)) {
- EMACERR("<--DWC_ETH_QOS_dev_resume not possible\n");
+ if (!net_dev || !netif_running(net_dev)) {
+ EMACERR("<--DWC_ETH_QOS_dev_resume\n");
return -EINVAL;
}
@@ -2384,20 +2433,33 @@ static INT DWC_ETH_QOS_resume(struct platform_device *pdev)
/* Wakeup reason can be PHY link event or a RX packet */
/* Set a wakeup event to ensure enough time for processing */
- pm_wakeup_event(&pdev->dev, 5000);
+ pm_wakeup_event(dev, 5000);
return 0;
}
+ /* Resume the PhY RXC clock. */
+ if (dwc_eth_qos_res_data.is_pinctrl_names &&
+ (dwc_eth_qos_res_data.rgmii_rxc_resume_state != NULL)) {
+
+ /* Enable RXC clock source from Phy.*/
+ ret = pinctrl_select_state(dwc_eth_qos_res_data.pinctrl,
+ dwc_eth_qos_res_data.rgmii_rxc_resume_state);
+ if (ret)
+ EMACERR("Unable to set rgmii_rxc_resume_state state, err = %d\n", ret);
+ else
+ EMACDBG("Set rgmii_rxc_resume_state succeed\n");
+ }
+
DWC_ETH_QOS_resume_clks(pdata);
- ret = DWC_ETH_QOS_powerup(dev, DWC_ETH_QOS_DRIVER_CONTEXT);
+ ret = DWC_ETH_QOS_powerup(net_dev, DWC_ETH_QOS_DRIVER_CONTEXT);
if (pdata->ipa_enabled)
DWC_ETH_QOS_ipa_offload_event_handler(pdata, EV_DPM_RESUME);
/* Wakeup reason can be PHY link event or a RX packet */
/* Set a wakeup event to ensure enough time for processing */
- pm_wakeup_event(&pdev->dev, 5000);
+ pm_wakeup_event(dev, 5000);
EMACDBG("<--DWC_ETH_QOS_resume\n");
@@ -2406,18 +2468,108 @@ static INT DWC_ETH_QOS_resume(struct platform_device *pdev)
#endif /* CONFIG_PM */
-static struct platform_driver DWC_ETH_QOS_plat_drv = {
- .probe = DWC_ETH_QOS_probe,
- .remove = DWC_ETH_QOS_remove,
- .shutdown = DWC_ETH_QOS_shutdown,
+static int DWC_ETH_QOS_hib_restore(struct device *dev) {
+ struct DWC_ETH_QOS_prv_data *pdata = gDWC_ETH_QOS_prv_data;
+ int ret = 0;
+
+ if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded"))
+ return 0;
+
+ EMACINFO(" start\n");
+
+ ret = DWC_ETH_QOS_init_regulators(dev);
+ if (ret)
+ return ret;
+
+ ret = DWC_ETH_QOS_init_gpios(dev);
+ if (ret)
+ return ret;
+
+ ret = DWC_ETH_QOS_get_clks(dev);
+ if (ret)
+ return ret;
+
+ DWC_ETH_QOS_set_clk_and_bus_config(pdata, pdata->speed);
+
+ DWC_ETH_QOS_set_rgmii_func_clk_en();
+
+#ifdef DWC_ETH_QOS_CONFIG_PTP
+ DWC_ETH_QOS_ptp_init(pdata);
+#endif /* end of DWC_ETH_QOS_CONFIG_PTP */
+
+ /* issue software reset to device */
+ pdata->hw_if.exit();
+
+ /* Bypass PHYLIB for TBI, RTBI and SGMII interface */
+ if (pdata->hw_feat.sma_sel == 1) {
+ ret = DWC_ETH_QOS_mdio_register(pdata->dev);
+ if (ret < 0) {
+ EMACERR("MDIO bus (id %d) registration failed\n",
+ pdata->bus_id);
+ return ret;
+ }
+ }
+
+ if (!(pdata->dev->flags & IFF_UP)) {
+ pdata->dev->netdev_ops->ndo_open(pdata->dev);
+ pdata->dev->flags |= IFF_UP;
+ }
+
+ EMACINFO("end\n");
+
+ return ret;
+}
+
+static int DWC_ETH_QOS_hib_freeze(struct device *dev) {
+ struct DWC_ETH_QOS_prv_data *pdata = gDWC_ETH_QOS_prv_data;
+ int ret = 0;
+
+ if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded"))
+ return 0;
+
+ EMACINFO(" start\n");
+ if (pdata->dev->flags & IFF_UP) {
+ pdata->dev->netdev_ops->ndo_stop(pdata->dev);
+ pdata->dev->flags &= ~IFF_UP;
+ }
+
+ if (pdata->hw_feat.sma_sel == 1)
+ DWC_ETH_QOS_mdio_unregister(pdata->dev);
+
+#ifdef DWC_ETH_QOS_CONFIG_PTP
+ DWC_ETH_QOS_ptp_remove(pdata);
+#endif /* end of DWC_ETH_QOS_CONFIG_PTP */
+
+ DWC_ETH_QOS_disable_clks(dev);
+
+ DWC_ETH_QOS_disable_regulators();
+
+ DWC_ETH_QOS_free_gpios();
+
+ EMACINFO("end\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops DWC_ETH_QOS_pm_ops = {
+ .freeze = DWC_ETH_QOS_hib_freeze,
+ .restore = DWC_ETH_QOS_hib_restore,
+ .thaw = DWC_ETH_QOS_hib_restore,
#ifdef CONFIG_PM
.suspend = DWC_ETH_QOS_suspend,
.resume = DWC_ETH_QOS_resume,
#endif
+};
+
+static struct platform_driver DWC_ETH_QOS_plat_drv = {
+ .probe = DWC_ETH_QOS_probe,
+ .remove = DWC_ETH_QOS_remove,
+ .shutdown = DWC_ETH_QOS_shutdown,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = DWC_ETH_QOS_plat_drv_match,
+ .pm = &DWC_ETH_QOS_pm_ops,
},
};
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_poll_support.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_poll_support.c
index 5962c15..1e2c998 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_poll_support.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_poll_support.c
@@ -36,7 +36,7 @@ static ssize_t pps_fops_read(struct file *filp, char __user *buf,
unsigned int len = 0, buf_len = 5000;
char* temp_buf;
- ssize_t ret_cnt;
+ ssize_t ret_cnt = 0;
struct pps_info *info;
info = filp->private_data;
@@ -54,7 +54,8 @@ static ssize_t pps_fops_read(struct file *filp, char __user *buf,
ret_cnt = simple_read_from_buffer(buf, count, f_pos, temp_buf, len);
kfree(temp_buf);
- EMACERR("poll pps2intr info=%d sent by kernel\n", gDWC_ETH_QOS_prv_data->avb_class_a_intr_cnt);
+ if (gDWC_ETH_QOS_prv_data)
+ EMACERR("poll pps2intr info=%d sent by kernel\n", gDWC_ETH_QOS_prv_data->avb_class_a_intr_cnt);
} else if (info->channel_no == AVB_CLASS_B_CHANNEL_NUM ) {
temp_buf = kzalloc(buf_len, GFP_KERNEL);
if (!temp_buf)
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c
index 0a286ae..1bcead3 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c
@@ -315,12 +315,13 @@ int DWC_ETH_QOS_ptp_init(struct DWC_ETH_QOS_prv_data *pdata)
}
#ifdef CONFIG_PPS_OUTPUT
- if (pdata->emac_hw_version_type == EMAC_HW_v2_3_1) {
- /*Configuaring PPS0 PPS output frequency to defualt 19.2 Mhz*/
+ if (pdata->res_data->pps_lpass_conn_en) {
+ /*Configuring PPS0 PPS output frequency to defualt 19.2 Mhz*/
eth_pps_cfg.ppsout_ch = 0;
eth_pps_cfg.ptpclk_freq = DWC_ETH_QOS_DEFAULT_PTP_CLOCK;
- eth_pps_cfg.ppsout_freq = 19200000;
+ eth_pps_cfg.ppsout_freq = DWC_ETH_QOS_DEFAULT_LPASS_PPS_FREQUENCY;
eth_pps_cfg.ppsout_start = 1;
+ eth_pps_cfg.ppsout_duty = 50;
req.ptr = (void*)&eth_pps_cfg;
DWC_ETH_QOS_pps_timer_init(&req);
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_rgmii_io_macro.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_rgmii_io_macro.c
index ad6736f..ebd302c 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_rgmii_io_macro.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_rgmii_io_macro.c
@@ -364,7 +364,8 @@ int DWC_ETH_QOS_rgmii_io_macro_init(struct DWC_ETH_QOS_prv_data *pdata)
uint rgmii_data_divide_clk;
ULONG data;
- if (pdata->emac_hw_version_type == EMAC_HW_v2_3_0 || (pdata->emac_hw_version_type == EMAC_HW_v2_3_1)) {
+ if (pdata->emac_hw_version_type == EMAC_HW_v2_3_0 || (pdata->emac_hw_version_type == EMAC_HW_v2_3_1)
+ || (pdata->emac_hw_version_type == EMAC_HW_v2_1_1)) {
if(pdata->io_macro_phy_intf == RGMII_MODE)
loopback_mode_en = 0x1;
rgmii_data_divide_clk = 0x0;
@@ -403,7 +404,8 @@ int DWC_ETH_QOS_rgmii_io_macro_init(struct DWC_ETH_QOS_prv_data *pdata)
RGMII_LOOPBACK_EN_UDFWR(loopback_mode_en);
if (pdata->emac_hw_version_type == EMAC_HW_v2_1_0 ||
pdata->emac_hw_version_type == EMAC_HW_v2_1_2 ||
- (pdata->emac_hw_version_type == EMAC_HW_v2_3_1))
+ (pdata->emac_hw_version_type == EMAC_HW_v2_3_1) ||
+ pdata->emac_hw_version_type == EMAC_HW_v2_1_1)
RGMII_CONFIG_2_TX_CLK_PHASE_SHIFT_EN_UDFWR(0x1);
} else {
/* Enable DDR mode*/
@@ -429,6 +431,8 @@ int DWC_ETH_QOS_rgmii_io_macro_init(struct DWC_ETH_QOS_prv_data *pdata)
SDCC_HC_PRG_RCLK_DLY_UDFWR(52);
else if (pdata->emac_hw_version_type == EMAC_HW_v2_3_1)
SDCC_HC_PRG_RCLK_DLY_UDFWR(104);
+ else if (pdata->emac_hw_version_type == EMAC_HW_v2_1_1)
+ SDCC_HC_PRG_RCLK_DLY_UDFWR(130);
else { /* Program PRG_RCLK_DLY to 57 for a required delay of 1.8 ns */
SDCC_HC_PRG_RCLK_DLY_UDFWR(57);
}
@@ -459,9 +463,11 @@ int DWC_ETH_QOS_rgmii_io_macro_init(struct DWC_ETH_QOS_prv_data *pdata)
RGMII_LOOPBACK_EN_UDFWR(loopback_mode_en);
if (pdata->emac_hw_version_type == EMAC_HW_v2_1_0 ||
pdata->emac_hw_version_type == EMAC_HW_v2_1_2 ||
- (pdata->emac_hw_version_type == EMAC_HW_v2_3_1))
+ (pdata->emac_hw_version_type == EMAC_HW_v2_3_1) ||
+ pdata->emac_hw_version_type == EMAC_HW_v2_1_1)
RGMII_CONFIG_2_RX_PROG_SWAP_UDFWR(0x1);
- if (pdata->emac_hw_version_type == EMAC_HW_v2_1_2)
+ if (pdata->emac_hw_version_type == EMAC_HW_v2_1_2 ||
+ pdata->emac_hw_version_type == EMAC_HW_v2_1_1)
RGMII_CONFIG_2_TX_CLK_PHASE_SHIFT_EN_UDFWR(0x1);
} else{
RGMII_DDR_MODE_UDFWR(0x1);
@@ -506,9 +512,11 @@ int DWC_ETH_QOS_rgmii_io_macro_init(struct DWC_ETH_QOS_prv_data *pdata)
RGMII_LOOPBACK_EN_UDFWR(loopback_mode_en);
if (pdata->emac_hw_version_type == EMAC_HW_v2_1_0 ||
pdata->emac_hw_version_type == EMAC_HW_v2_1_2 ||
- (pdata->emac_hw_version_type == EMAC_HW_v2_3_1))
+ (pdata->emac_hw_version_type == EMAC_HW_v2_3_1) ||
+ pdata->emac_hw_version_type == EMAC_HW_v2_1_1)
RGMII_CONFIG_2_RX_PROG_SWAP_UDFWR(0x1);
- if (pdata->emac_hw_version_type == EMAC_HW_v2_1_2)
+ if (pdata->emac_hw_version_type == EMAC_HW_v2_1_2 ||
+ pdata->emac_hw_version_type == EMAC_HW_v2_1_1)
RGMII_CONFIG_2_TX_CLK_PHASE_SHIFT_EN_UDFWR(0x1);
} else{
RGMII_DDR_MODE_UDFWR(0x1);
@@ -570,7 +578,8 @@ int DWC_ETH_QOS_rgmii_io_macro_init(struct DWC_ETH_QOS_prv_data *pdata)
RGMII_CONFIG_2_DATA_DIVIDE_CLK_SEL_UDFWR(0x1);
RGMII_CONFIG_2_TX_CLK_PHASE_SHIFT_EN_UDFWR(0x0);
RGMII_CONFIG_2_RERVED_CONFIG_16_EN_UDFWR(0x1);
- if (pdata->emac_hw_version_type == EMAC_HW_v2_1_2)
+ if (pdata->emac_hw_version_type == EMAC_HW_v2_1_2 ||
+ pdata->emac_hw_version_type == EMAC_HW_v2_1_1)
RGMII_CONFIG_2_TX_CLK_PHASE_SHIFT_EN_UDFWR(0x1);
if (pdata->emac_hw_version_type == EMAC_HW_v2_3_1)
RGMII_LOOPBACK_EN_UDFWR(0x1);
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_yapphdr.h b/drivers/emac-dwc-eqos/DWC_ETH_QOS_yapphdr.h
index 6af15b7..1c4980d 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_yapphdr.h
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_yapphdr.h
@@ -50,7 +50,7 @@
#define DWC_ETH_QOS_MAX_TX_QUEUE_CNT 8
#define DWC_ETH_QOS_MAX_RX_QUEUE_CNT 8
-//#define CONFIG_PPS_OUTPUT // for PPS Output
+#define CONFIG_PPS_OUTPUT // for PPS Output
/* Private IOCTL for handling device specific task */
#define DWC_ETH_QOS_PRV_IOCTL SIOCDEVPRIVATE
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h b/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h
index e1d5923..ee29121 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights
* reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -427,7 +427,8 @@ extern void *ipc_emac_log_ctxt;
#define DWC_ETH_QOS_SYSCLOCK 250000000 /* System clock is 250MHz */
#define DWC_ETH_QOS_SYSTIMEPERIOD 4 /* System time period is 4ns */
-#define DWC_ETH_QOS_DEFAULT_PTP_CLOCK 250000000
+#define DWC_ETH_QOS_DEFAULT_PTP_CLOCK 96000000
+#define DWC_ETH_QOS_DEFAULT_LPASS_PPS_FREQUENCY 19200000
#define DWC_ETH_QOS_TX_QUEUE_CNT (pdata->tx_queue_cnt)
#define DWC_ETH_QOS_RX_QUEUE_CNT (pdata->rx_queue_cnt)
@@ -1005,6 +1006,7 @@ struct hw_if_struct {
/* for hw time stamping */
INT(*config_hw_time_stamping)(UINT);
INT(*config_sub_second_increment)(unsigned long ptp_clock);
+ INT(*config_default_addend)(struct DWC_ETH_QOS_prv_data *pdata, unsigned long ptp_clock);
INT(*init_systime)(UINT, UINT);
INT(*config_addend)(UINT);
INT(*adjust_systime)(UINT, UINT, INT, bool);
@@ -1564,6 +1566,9 @@ struct DWC_ETH_QOS_res_data {
bool is_pinctrl_names;
int gpio_phy_intr_redirect;
int gpio_phy_reset;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *rgmii_rxc_suspend_state;
+ struct pinctrl_state *rgmii_rxc_resume_state;
/* Regulators */
struct regulator *gdsc_emac;
@@ -1578,6 +1583,7 @@ struct DWC_ETH_QOS_res_data {
struct clk *ptp_clk;
unsigned int emac_hw_version_type;
bool early_eth_en;
+ bool pps_lpass_conn_en;
};
struct DWC_ETH_QOS_prv_ipa_data {
@@ -2036,6 +2042,8 @@ void DWC_ETH_QOS_set_clk_and_bus_config(struct DWC_ETH_QOS_prv_data *pdata, int
#define EMAC_PHY_RESET "dev-emac-phy_reset_state"
#define EMAC_PHY_INTR "dev-emac-phy_intr"
#define EMAC_PIN_PPS0 "dev-emac_pin_pps_0"
+#define EMAC_RGMII_RXC_SUSPEND "dev-emac-rgmii_rxc_suspend_state"
+#define EMAC_RGMII_RXC_RESUME "dev-emac-rgmii_rxc_resume_state"
#ifdef PER_CH_INT
void DWC_ETH_QOS_handle_DMA_Int(struct DWC_ETH_QOS_prv_data *pdata, int chinx, bool);
diff --git a/drivers/emac-dwc-eqos/Makefile.builtin b/drivers/emac-dwc-eqos/Makefile.builtin
index 520cb8d..f6a5573 100644
--- a/drivers/emac-dwc-eqos/Makefile.builtin
+++ b/drivers/emac-dwc-eqos/Makefile.builtin
@@ -13,7 +13,7 @@ EXTRA_CFLAGS+=-DCONFIG_PTPSUPPORT_OBJ
obj-$(CONFIG_EMAC_DWC_EQOS) += DWC_ETH_QOS_ptp.o
endif
-ifeq ($(CONFIG_IPA3), y)
+ifeq ($(CONFIG_IPA_OFFLOAD), y)
KBUILD_CFLAGS += -DDWC_ETH_QOS_ENABLE_IPA
obj-$(CONFIG_EMAC_DWC_EQOS) += DWC_ETH_QOS_ipa.o
endif
diff --git a/drivers/emac-dwc-eqos/emac_perf_settings.sh b/drivers/emac-dwc-eqos/emac_perf_settings.sh
new file mode 100644
index 0000000..508e69d
--- /dev/null
+++ b/drivers/emac-dwc-eqos/emac_perf_settings.sh
@@ -0,0 +1,22 @@
+#!/vendor/bin/sh
+#Copyright (c) 2019, The Linux Foundation. All rights reserved.
+#
+#This program is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License version 2 and
+#only version 2 as published by the Free Software Foundation.
+#
+#This program is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+#
+#
+echo 12582912 > /proc/sys/net/core/wmem_max;
+echo 12582912 > /proc/sys/net/core/rmem_max;
+echo 10240 87380 12582912 > /proc/sys/net/ipv4/tcp_rmem;
+echo 10240 87380 12582912 > /proc/sys/net/ipv4/tcp_wmem;
+echo 12582912 > /proc/sys/net/ipv4/udp_rmem_min;
+echo 12582912 > /proc/sys/net/ipv4/udp_wmem_min;
+echo 1 > /proc/sys/net/ipv4/tcp_window_scaling;
+echo 18 > /sys/class/net/eth0/queues/rx-0/rps_cpus;
+
diff --git a/drivers/rmnet/perf/rmnet_perf_config.c b/drivers/rmnet/perf/rmnet_perf_config.c
index 8a5f50e..be245d3 100644
--- a/drivers/rmnet/perf/rmnet_perf_config.c
+++ b/drivers/rmnet/perf/rmnet_perf_config.c
@@ -397,7 +397,9 @@ static int rmnet_perf_config_notify_cb(struct notifier_block *nb,
switch (event) {
case NETDEV_UNREGISTER:
- if (rmnet_is_real_dev_registered(dev) &&
+ pr_info("%s(): rmnet_perf netdevice unregister, name = %s\n",
+ __func__, dev->name);
+ if (perf && rmnet_is_real_dev_registered(dev) &&
rmnet_perf_config_hook_registered() &&
(!strncmp(dev->name, "rmnet_ipa0", 10) ||
!strncmp(dev->name, "rmnet_mhi0", 10))) {
@@ -413,6 +415,7 @@ static int rmnet_perf_config_notify_cb(struct notifier_block *nb,
RCU_INIT_POINTER(rmnet_perf_deag_entry, NULL);
RCU_INIT_POINTER(rmnet_perf_desc_entry, NULL);
RCU_INIT_POINTER(rmnet_perf_chain_end, NULL);
+ perf = NULL;
}
break;
case NETDEV_REGISTER:
@@ -421,7 +424,7 @@ static int rmnet_perf_config_notify_cb(struct notifier_block *nb,
/* Check prevents us from allocating resources for every
* interface
*/
- if (!rmnet_perf_config_hook_registered() &&
+ if (!perf && !rmnet_perf_config_hook_registered() &&
strncmp(dev->name, "rmnet_data", 10) == 0) {
struct rmnet_priv *priv = netdev_priv(dev);
diff --git a/drivers/rmnet/perf/rmnet_perf_opt.c b/drivers/rmnet/perf/rmnet_perf_opt.c
index b411483..d6b21f7 100644
--- a/drivers/rmnet/perf/rmnet_perf_opt.c
+++ b/drivers/rmnet/perf/rmnet_perf_opt.c
@@ -318,10 +318,12 @@ rmnet_perf_opt_add_flow_subfrags(struct rmnet_perf_opt_flow_node *flow_node)
new_frag = pkt_list[i].frag_desc;
/* Pull headers if they're there */
- if (new_frag->hdr_ptr == rmnet_frag_data_ptr(new_frag))
- rmnet_frag_pull(new_frag, perf->rmnet_port,
- flow_node->ip_len +
- flow_node->trans_len);
+ if (new_frag->hdr_ptr == rmnet_frag_data_ptr(new_frag)) {
+ if (!rmnet_frag_pull(new_frag, perf->rmnet_port,
+ flow_node->ip_len +
+ flow_node->trans_len))
+ continue;
+ }
/* Move the fragment onto the subfrags list */
list_move_tail(&new_frag->list, &head_frag->sub_frags);
diff --git a/drivers/rmnet/shs/Android.mk b/drivers/rmnet/shs/Android.mk
index 08215a0..b150417 100644
--- a/drivers/rmnet/shs/Android.mk
+++ b/drivers/rmnet/shs/Android.mk
@@ -14,7 +14,7 @@ LOCAL_CLANG :=true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
LOCAL_MODULE := rmnet_shs.ko
-LOCAL_SRC_FILES := rmnet_shs_main.c rmnet_shs_config.c rmnet_shs_wq.c rmnet_shs_freq.c
+LOCAL_SRC_FILES := rmnet_shs_main.c rmnet_shs_config.c rmnet_shs_wq.c rmnet_shs_freq.c rmnet_shs_wq_mem.c rmnet_shs_wq_genl.c
RMNET_SHS_BLD_DIR := ../../vendor/qcom/opensource/data-kernel/drivers/rmnet/shs
DLKM_DIR := ./device/qcom/common/dlkm
diff --git a/drivers/rmnet/shs/Kbuild b/drivers/rmnet/shs/Kbuild
index 1593101..196d128 100644
--- a/drivers/rmnet/shs/Kbuild
+++ b/drivers/rmnet/shs/Kbuild
@@ -1,2 +1,2 @@
obj-m += rmnet_shs.o
-rmnet_shs-y := rmnet_shs_config.o rmnet_shs_main.o rmnet_shs_wq.o rmnet_shs_freq.o
+rmnet_shs-y := rmnet_shs_config.o rmnet_shs_main.o rmnet_shs_wq.o rmnet_shs_freq.o rmnet_shs_wq_mem.o rmnet_shs_wq_genl.o
diff --git a/drivers/rmnet/shs/rmnet_shs.h b/drivers/rmnet/shs/rmnet_shs.h
index 9f12e5d..f6ce09e 100644
--- a/drivers/rmnet/shs/rmnet_shs.h
+++ b/drivers/rmnet/shs/rmnet_shs.h
@@ -54,14 +54,14 @@
//#define RMNET_SHS_UDP_PPS_SILVER_CORE_UPPER_THRESH 90000
//#define RMNET_SHS_TCP_PPS_SILVER_CORE_UPPER_THRESH 90000
-#define SHS_TRACE_ERR(...) if (rmnet_shs_debug) \
- trace_rmnet_shs_err(__VA_ARGS__)
+#define SHS_TRACE_ERR(...) \
+ do { if (rmnet_shs_debug) trace_rmnet_shs_err(__VA_ARGS__); } while (0)
-#define SHS_TRACE_HIGH(...) if (rmnet_shs_debug) \
- trace_rmnet_shs_high(__VA_ARGS__)
+#define SHS_TRACE_HIGH(...) \
+ do { if (rmnet_shs_debug) trace_rmnet_shs_high(__VA_ARGS__); } while (0)
-#define SHS_TRACE_LOW(...) if (rmnet_shs_debug) \
- trace_rmnet_shs_low(__VA_ARGS__)
+#define SHS_TRACE_LOW(...) \
+ do { if (rmnet_shs_debug) trace_rmnet_shs_low(__VA_ARGS__); } while (0)
#define RMNET_SHS_MAX_SILVER_CORE_BURST_CAPACITY 204800
@@ -77,6 +77,9 @@
#define RMNET_SHS_UDP_PPS_PERF_CPU_LTHRESH 40000
#define RMNET_SHS_TCP_PPS_PERF_CPU_LTHRESH (40000*RMNET_SHS_TCP_COALESCING_RATIO)
+#define RMNET_SHS_UDP_PPS_HEADROOM 20000
+#define RMNET_SHS_GOLD_BALANCING_THRESH (RMNET_SHS_UDP_PPS_PERF_CPU_UTHRESH / 2)
+
struct core_flush_s {
struct hrtimer core_timer;
struct work_struct work;
@@ -92,8 +95,8 @@ struct rmnet_shs_cfg_s {
struct rmnet_port *port;
struct core_flush_s core_flush[MAX_CPUS];
u64 core_skbs[MAX_CPUS];
- long int num_bytes_parked;
- long int num_pkts_parked;
+ long num_bytes_parked;
+ long num_pkts_parked;
u32 is_reg_dl_mrk_ind;
u16 num_flows;
u8 is_pkt_parked;
diff --git a/drivers/rmnet/shs/rmnet_shs_config.c b/drivers/rmnet/shs/rmnet_shs_config.c
index 4112e1a..e6b4002 100644
--- a/drivers/rmnet/shs/rmnet_shs_config.c
+++ b/drivers/rmnet/shs/rmnet_shs_config.c
@@ -20,6 +20,7 @@
#include "rmnet_shs_config.h"
#include "rmnet_shs.h"
#include "rmnet_shs_wq.h"
+#include "rmnet_shs_wq_genl.h"
MODULE_LICENSE("GPL v2");
@@ -32,7 +33,7 @@ unsigned int rmnet_shs_stats_enabled __read_mostly = 1;
module_param(rmnet_shs_stats_enabled, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_stats_enabled, "Enable Disable stats collection");
-unsigned long int rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
+unsigned long rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
module_param_array(rmnet_shs_crit_err, ulong, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_crit_err, "rmnet shs crtical error type");
@@ -51,6 +52,11 @@ int __init rmnet_shs_module_init(void)
pr_info("%s(): Starting rmnet SHS module\n", __func__);
trace_rmnet_shs_high(RMNET_SHS_MODULE, RMNET_SHS_MODULE_INIT,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+
+ if (rmnet_shs_wq_genl_init()) {
+ rm_err("%s", "SHS_GNL: Failed to init generic netlink");
+ }
+
return register_netdevice_notifier(&rmnet_shs_dev_notifier);
}
@@ -60,6 +66,9 @@ void __exit rmnet_shs_module_exit(void)
trace_rmnet_shs_high(RMNET_SHS_MODULE, RMNET_SHS_MODULE_EXIT,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
unregister_netdevice_notifier(&rmnet_shs_dev_notifier);
+
+ rmnet_shs_wq_genl_deinit();
+
pr_info("%s(): Exiting rmnet SHS module\n", __func__);
}
diff --git a/drivers/rmnet/shs/rmnet_shs_config.h b/drivers/rmnet/shs/rmnet_shs_config.h
index d033723..dc385e4 100644
--- a/drivers/rmnet/shs/rmnet_shs_config.h
+++ b/drivers/rmnet/shs/rmnet_shs_config.h
@@ -42,12 +42,16 @@ enum rmnet_shs_crit_err_e {
RMNET_SHS_CPU_PKTLEN_ERR,
RMNET_SHS_NULL_SKB_HEAD,
RMNET_SHS_RPS_MASK_CHANGE,
+ RMNET_SHS_WQ_INVALID_CPU_ERR,
+ RMNET_SHS_WQ_INVALID_PTR_ERR,
+ RMNET_SHS_WQ_NODE_MALLOC_ERR,
+ RMNET_SHS_WQ_NL_SOCKET_ERR,
RMNET_SHS_CRIT_ERR_MAX
};
extern unsigned int rmnet_shs_debug;
extern unsigned int rmnet_shs_stats_enabled;
-extern unsigned long int rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
+extern unsigned long rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
extern struct rmnet_shs_cfg_s rmnet_shs_cfg;
extern int rmnet_is_real_dev_registered(const struct net_device *real_dev);
diff --git a/drivers/rmnet/shs/rmnet_shs_freq.c b/drivers/rmnet/shs/rmnet_shs_freq.c
index 0a2fd8e..c6123c6 100644
--- a/drivers/rmnet/shs/rmnet_shs_freq.c
+++ b/drivers/rmnet/shs/rmnet_shs_freq.c
@@ -88,7 +88,7 @@ void rmnet_shs_reset_freq(void)
}
}
-void rmnet_shs_boost_cpus()
+void rmnet_shs_boost_cpus(void)
{
struct cpu_freq *boost;
int i;
@@ -110,7 +110,7 @@ void rmnet_shs_boost_cpus()
queue_work(shs_boost_wq, &boost_cpu);
}
-void rmnet_shs_reset_cpus()
+void rmnet_shs_reset_cpus(void)
{
struct cpu_freq *boost;
int i;
diff --git a/drivers/rmnet/shs/rmnet_shs_main.c b/drivers/rmnet/shs/rmnet_shs_main.c
index 2ea09dc..ae66460 100755
--- a/drivers/rmnet/shs/rmnet_shs_main.c
+++ b/drivers/rmnet/shs/rmnet_shs_main.c
@@ -57,11 +57,11 @@ struct rmnet_shs_cfg_s rmnet_shs_cfg;
struct rmnet_shs_flush_work shs_rx_work;
/* Delayed workqueue that will be used to flush parked packets*/
-unsigned long int rmnet_shs_switch_reason[RMNET_SHS_SWITCH_MAX_REASON];
+unsigned long rmnet_shs_switch_reason[RMNET_SHS_SWITCH_MAX_REASON];
module_param_array(rmnet_shs_switch_reason, ulong, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_switch_reason, "rmnet shs skb core swtich type");
-unsigned long int rmnet_shs_flush_reason[RMNET_SHS_FLUSH_MAX_REASON];
+unsigned long rmnet_shs_flush_reason[RMNET_SHS_FLUSH_MAX_REASON];
module_param_array(rmnet_shs_flush_reason, ulong, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_flush_reason, "rmnet shs skb flush trigger type");
@@ -161,16 +161,36 @@ int rmnet_shs_is_skb_stamping_reqd(struct sk_buff *skb)
case htons(ETH_P_IP):
if (!ip_is_fragment(ip_hdr(skb)) &&
((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
- (ip_hdr(skb)->protocol == IPPROTO_UDP)))
+ (ip_hdr(skb)->protocol == IPPROTO_UDP))){
ret_val = 1;
-
+ break;
+ }
+ /* RPS logic is skipped if RPS hash is 0 while sw_hash
+ * is set as active and packet is processed on the same
+ * CPU as the initial caller.
+ */
+ if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
+ skb->hash = 0;
+ skb->sw_hash = 1;
+ }
break;
case htons(ETH_P_IPV6):
if (!(ipv6_hdr(skb)->nexthdr == NEXTHDR_FRAGMENT) &&
((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) ||
- (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)))
+ (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))) {
ret_val = 1;
+ break;
+ }
+
+ /* RPS logic is skipped if RPS hash is 0 while sw_hash
+ * is set as active and packet is processed on the same
+ * CPU as the initial caller.
+ */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_ICMP) {
+ skb->hash = 0;
+ skb->sw_hash = 1;
+ }
break;
@@ -187,6 +207,15 @@ int rmnet_shs_is_skb_stamping_reqd(struct sk_buff *skb)
(ip4h->protocol == IPPROTO_TCP ||
ip4h->protocol == IPPROTO_UDP)) {
ret_val = 1;
+ break;
+ }
+ /* RPS logic is skipped if RPS hash is 0 while sw_hash
+ * is set as active and packet is processed on the same
+ * CPU as the initial caller.
+ */
+ if (ip4h->protocol == IPPROTO_ICMP) {
+ skb->hash = 0;
+ skb->sw_hash = 1;
}
break;
@@ -196,8 +225,18 @@ int rmnet_shs_is_skb_stamping_reqd(struct sk_buff *skb)
if (!(ip6h->nexthdr == NEXTHDR_FRAGMENT) &&
((ip6h->nexthdr == IPPROTO_TCP) ||
- (ip6h->nexthdr == IPPROTO_UDP)))
+ (ip6h->nexthdr == IPPROTO_UDP))) {
ret_val = 1;
+ break;
+ }
+ /* RPS logic is skipped if RPS hash is 0 while sw_hash
+ * is set as active and packet is processed on the same
+ * CPU as the initial caller.
+ */
+ if (ip6h->nexthdr == IPPROTO_ICMP) {
+ skb->hash = 0;
+ skb->sw_hash = 1;
+ }
break;
@@ -218,7 +257,7 @@ static void rmnet_shs_update_core_load(int cpu, int burst)
struct timespec time1;
struct timespec *time2;
- long int curinterval;
+ long curinterval;
int maxinterval = (rmnet_shs_inst_rate_interval < MIN_MS) ? MIN_MS :
rmnet_shs_inst_rate_interval;
@@ -305,7 +344,8 @@ static void rmnet_shs_deliver_skb(struct sk_buff *skb)
0xDEF, 0xDEF, 0xDEF, 0xDEF, skb, NULL);
if (rmnet_shs_check_skb_can_gro(skb)) {
- if ((napi = get_current_napi_context())) {
+ napi = get_current_napi_context();
+ if (napi) {
napi_gro_receive(napi, skb);
} else {
priv = netdev_priv(skb->dev);
@@ -327,6 +367,48 @@ static void rmnet_shs_deliver_skb_wq(struct sk_buff *skb)
gro_cells_receive(&priv->gro_cells, skb);
}
+/* Delivers skbs after segmenting, directly to network stack */
+static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb, u8 ctext)
+{
+ struct sk_buff *skb = NULL;
+ struct sk_buff *nxt_skb = NULL;
+ struct sk_buff *segs = NULL;
+ int count = 0;
+
+ SHS_TRACE_LOW(RMNET_SHS_DELIVER_SKB, RMNET_SHS_DELIVER_SKB_START,
+ 0x1, 0xDEF, 0xDEF, 0xDEF, in_skb, NULL);
+
+ segs = __skb_gso_segment(in_skb, NETIF_F_SG, false);
+ if (unlikely(IS_ERR_OR_NULL(segs))) {
+ if (ctext == RMNET_RX_CTXT)
+ netif_receive_skb(in_skb);
+ else
+ netif_rx(in_skb);
+
+ return;
+ }
+
+ /* Send segmeneted skb */
+ for ((skb = segs); skb != NULL; skb = nxt_skb) {
+ nxt_skb = skb->next;
+
+ skb->hash = in_skb->hash;
+ skb->dev = in_skb->dev;
+ skb->next = NULL;
+
+ if (ctext == RMNET_RX_CTXT)
+ netif_receive_skb(skb);
+ else
+ netif_rx(skb);
+
+ count += 1;
+ }
+
+ consume_skb(in_skb);
+
+ return;
+}
+
int rmnet_shs_flow_num_perf_cores(struct rmnet_shs_skbn_s *node_p)
{
int ret = 0;
@@ -388,9 +470,9 @@ u8 rmnet_shs_mask_from_map(struct rps_map *map)
u8 mask = 0;
u8 i;
- for (i = 0; i < map->len; i++) {
+ for (i = 0; i < map->len; i++)
mask |= 1 << map->cpus[i];
- }
+
return mask;
}
@@ -417,15 +499,14 @@ int rmnet_shs_get_core_prio_flow(u8 mask)
*/
for (i = 0; i < MAX_CPUS; i++) {
- if (!(mask & (1 <<i)))
+ if (!(mask & (1 << i)))
continue;
if (mask & (1 << i))
curr_idx++;
- if (list_empty(&rmnet_shs_cpu_node_tbl[i].node_list_id)) {
+ if (list_empty(&rmnet_shs_cpu_node_tbl[i].node_list_id))
return i;
- }
if (cpu_num_flows[i] <= least_flows ||
least_flows == INVALID_CPU) {
@@ -479,7 +560,7 @@ int rmnet_shs_idx_from_cpu(u8 cpu, u8 mask)
ret = idx;
break;
}
- if(mask & (1 << i))
+ if (mask & (1 << i))
idx++;
}
return ret;
@@ -532,14 +613,14 @@ int rmnet_shs_get_suggested_cpu(struct rmnet_shs_skbn_s *node)
int rmnet_shs_get_hash_map_idx_to_stamp(struct rmnet_shs_skbn_s *node)
{
int cpu, idx = INVALID_CPU;
- cpu = rmnet_shs_get_suggested_cpu(node);
+ cpu = rmnet_shs_get_suggested_cpu(node);
idx = rmnet_shs_idx_from_cpu(cpu, rmnet_shs_cfg.map_mask);
- /* If suggested CPU is no longer in mask. Try using current.*/
- if (unlikely(idx < 0))
- idx = rmnet_shs_idx_from_cpu(node->map_cpu,
- rmnet_shs_cfg.map_mask);
+ /* If suggested CPU is no longer in mask. Try using current.*/
+ if (unlikely(idx < 0))
+ idx = rmnet_shs_idx_from_cpu(node->map_cpu,
+ rmnet_shs_cfg.map_mask);
SHS_TRACE_LOW(RMNET_SHS_HASH_MAP,
RMNET_SHS_HASH_MAP_IDX_TO_STAMP,
@@ -661,7 +742,7 @@ int rmnet_shs_node_can_flush_pkts(struct rmnet_shs_skbn_s *node, u8 force_flush)
break;
}
node->is_shs_enabled = 1;
- if (!map){
+ if (!map) {
node->is_shs_enabled = 0;
ret = 1;
break;
@@ -682,12 +763,12 @@ int rmnet_shs_node_can_flush_pkts(struct rmnet_shs_skbn_s *node, u8 force_flush)
(force_flush)) {
if (rmnet_shs_switch_cores) {
- /* Move the amount parked to other core's count
- * Update old core's parked to not include diverted
- * packets and update new core's packets
- */
- new_cpu = rmnet_shs_cpu_from_idx(cpu_map_index,
- rmnet_shs_cfg.map_mask);
+ /* Move the amount parked to other core's count
+ * Update old core's parked to not include diverted
+ * packets and update new core's packets
+ */
+ new_cpu = rmnet_shs_cpu_from_idx(cpu_map_index,
+ rmnet_shs_cfg.map_mask);
if (new_cpu < 0) {
ret = 1;
break;
@@ -700,7 +781,7 @@ int rmnet_shs_node_can_flush_pkts(struct rmnet_shs_skbn_s *node, u8 force_flush)
if (cur_cpu_qhead < node_qhead) {
rmnet_shs_switch_reason[RMNET_SHS_OOO_PACKET_SWITCH]++;
- rmnet_shs_switch_reason[RMNET_SHS_OOO_PACKET_TOTAL]+=
+ rmnet_shs_switch_reason[RMNET_SHS_OOO_PACKET_TOTAL] +=
(node_qhead -
cur_cpu_qhead);
}
@@ -814,6 +895,7 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
u32 skb_bytes_delivered = 0;
u32 hash2stamp = 0; /* the default value of skb->hash*/
u8 map = 0, maplen = 0;
+ u8 segment_enable = 0;
if (!node->skb_list.head)
return;
@@ -835,6 +917,8 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
node->skb_list.num_parked_bytes,
node, node->skb_list.head);
+ segment_enable = node->hstats->segment_enable;
+
for ((skb = node->skb_list.head); skb != NULL; skb = nxt_skb) {
nxt_skb = skb->next;
@@ -844,11 +928,15 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
skb->next = NULL;
skbs_delivered += 1;
skb_bytes_delivered += skb->len;
- if (ctext == RMNET_RX_CTXT)
- rmnet_shs_deliver_skb(skb);
- else
- rmnet_shs_deliver_skb_wq(skb);
+ if (segment_enable) {
+ rmnet_shs_deliver_skb_segmented(skb, ctext);
+ } else {
+ if (ctext == RMNET_RX_CTXT)
+ rmnet_shs_deliver_skb(skb);
+ else
+ rmnet_shs_deliver_skb_wq(skb);
+ }
}
node->skb_list.num_parked_skbs = 0;
@@ -916,14 +1004,14 @@ int rmnet_shs_chk_and_flush_node(struct rmnet_shs_skbn_s *node,
SHS_TRACE_HIGH(RMNET_SHS_FLUSH,
RMNET_SHS_FLUSH_CHK_AND_FLUSH_NODE_START,
- force_flush, 0xDEF, 0xDEF, 0xDEF,
+ force_flush, ctxt, 0xDEF, 0xDEF,
node, NULL);
/* Return saved cpu assignment if an entry found*/
if (rmnet_shs_cpu_from_idx(node->map_index, map) != node->map_cpu) {
/* Keep flow on the same core if possible
- * or put Orphaned flow on the default 1st core
- */
+ * or put Orphaned flow on the default 1st core
+ */
map_idx = rmnet_shs_idx_from_cpu(node->map_cpu,
map);
if (map_idx >= 0) {
@@ -1017,8 +1105,8 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
rmnet_shs_cpu_node_tbl[n->map_cpu].parkedlen -= num_pkts_flush;
n->skb_list.skb_load = 0;
if (n->map_cpu == cpu_num) {
- cpu_tail += num_pkts_flush;
- n->queue_head = cpu_tail;
+ cpu_tail += num_pkts_flush;
+ n->queue_head = cpu_tail;
}
}
@@ -1075,9 +1163,8 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
rmnet_shs_cfg.is_pkt_parked = 0;
rmnet_shs_cfg.force_flush_state = RMNET_SHS_FLUSH_DONE;
if (rmnet_shs_fall_back_timer) {
- if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs)) {
+ if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs))
hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs);
- }
}
}
@@ -1108,7 +1195,7 @@ void rmnet_shs_chain_to_skb_list(struct sk_buff *skb,
/* Early flush for TCP if PSH packet.
* Flush before parking PSH packet.
*/
- if (skb->cb[SKB_FLUSH]){
+ if (skb->cb[SKB_FLUSH]) {
rmnet_shs_flush_lock_table(0, RMNET_RX_CTXT);
rmnet_shs_flush_reason[RMNET_SHS_FLUSH_PSH_PKT_FLUSH]++;
napi_gro_flush(napi, false);
@@ -1182,9 +1269,8 @@ static void rmnet_flush_buffered(struct work_struct *work)
if (rmnet_shs_fall_back_timer &&
rmnet_shs_cfg.num_bytes_parked &&
rmnet_shs_cfg.num_pkts_parked){
- if(hrtimer_active(&rmnet_shs_cfg.hrtimer_shs)) {
+ if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs))
hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs);
- }
hrtimer_start(&rmnet_shs_cfg.hrtimer_shs,
ns_to_ktime(rmnet_shs_timeout * NS_IN_MS),
@@ -1464,7 +1550,7 @@ void rmnet_shs_assign(struct sk_buff *skb, struct rmnet_port *port)
return;
}
- if ((unlikely(!map))|| !rmnet_shs_cfg.rmnet_shs_init_complete) {
+ if ((unlikely(!map)) || !rmnet_shs_cfg.rmnet_shs_init_complete) {
rmnet_shs_deliver_skb(skb);
SHS_TRACE_ERR(RMNET_SHS_ASSIGN,
RMNET_SHS_ASSIGN_CRIT_ERROR_NO_SHS_REQD,
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c
index 1ec35ec..4c69b57 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq.c
@@ -14,8 +14,12 @@
*/
#include "rmnet_shs.h"
-#include <linux/module.h>
+#include "rmnet_shs_wq_genl.h"
+#include "rmnet_shs_wq_mem.h"
#include <linux/workqueue.h>
+#include <linux/list_sort.h>
+#include <net/sock.h>
+#include <linux/skbuff.h>
MODULE_LICENSE("GPL v2");
/* Local Macros */
@@ -149,6 +153,19 @@ unsigned long long rmnet_shs_flow_rx_pps[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_rx_pps, ullong, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_flow_rx_pps, "SHS stamp pkt enq rate per flow");
+/* Counters for suggestions made by wq */
+unsigned long long rmnet_shs_flow_silver_to_gold[MAX_SUPPORTED_FLOWS_DEBUG];
+module_param_array(rmnet_shs_flow_silver_to_gold, ullong, 0, 0444);
+MODULE_PARM_DESC(rmnet_shs_flow_silver_to_gold, "SHS Suggest Silver to Gold");
+
+unsigned long long rmnet_shs_flow_gold_to_silver[MAX_SUPPORTED_FLOWS_DEBUG];
+module_param_array(rmnet_shs_flow_gold_to_silver, ullong, 0, 0444);
+MODULE_PARM_DESC(rmnet_shs_flow_gold_to_silver, "SHS Suggest Gold to Silver");
+
+unsigned long long rmnet_shs_flow_gold_balance[MAX_SUPPORTED_FLOWS_DEBUG];
+module_param_array(rmnet_shs_flow_gold_balance, ullong, 0, 0444);
+MODULE_PARM_DESC(rmnet_shs_flow_gold_balance, "SHS Suggest Gold Balance");
+
static DEFINE_SPINLOCK(rmnet_shs_hstat_tbl_lock);
static DEFINE_SPINLOCK(rmnet_shs_ep_lock);
@@ -371,10 +388,16 @@ struct rmnet_shs_wq_hstat_s *rmnet_shs_wq_get_new_hstat_node(void)
return ret_node;
}
+
void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p)
{
struct timespec time;
+ if (!node_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
node_p->hstats = rmnet_shs_wq_get_new_hstat_node();
if (node_p->hstats != NULL) {
(void)getnstimeofday(&time);
@@ -383,6 +406,12 @@ void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p)
node_p->hstats->skb_tport_proto = node_p->skb_tport_proto;
node_p->hstats->current_cpu = node_p->map_cpu;
node_p->hstats->suggested_cpu = node_p->map_cpu;
+
+ /* Start TCP flows with segmentation if userspace connected */
+ if (rmnet_shs_userspace_connected &&
+ node_p->hstats->skb_tport_proto == IPPROTO_TCP)
+ node_p->hstats->segment_enable = 1;
+
node_p->hstats->node = node_p;
node_p->hstats->c_epoch = RMNET_SHS_SEC_TO_NSEC(time.tv_sec) +
time.tv_nsec;
@@ -396,12 +425,110 @@ void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p)
node_p, node_p->hstats);
}
+
+/* Compute the average pps for a flow based on tuning param
+ * Often when we decide to switch from a small cluster core,
+ * it is because of the heavy traffic on that core. In such
+ * circumstances, we want to switch to a big cluster
+ * core as soon as possible. Therefore, we will provide a
+ * greater weightage to the most recent sample compared to
+ * the previous samples.
+ *
+ * On the other hand, when a flow which is on a big cluster
+ * cpu suddenly starts to receive low traffic we move to a
+ * small cluster core after observing low traffic for some
+ * more samples. This approach avoids switching back and forth
+ * to small cluster cpus due to momentary decrease in data
+ * traffic.
+ */
+static u64 rmnet_shs_wq_get_flow_avg_pps(struct rmnet_shs_wq_hstat_s *hnode)
+{
+ u64 avg_pps, mov_avg_pps;
+ u16 new_weight, old_weight;
+
+ if (!hnode) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return 0;
+ }
+
+ if (rmnet_shs_is_lpwr_cpu(hnode->current_cpu)) {
+ /* More weight to current value */
+ new_weight = rmnet_shs_wq_tuning;
+ old_weight = 100 - rmnet_shs_wq_tuning;
+ } else {
+ old_weight = rmnet_shs_wq_tuning;
+ new_weight = 100 - rmnet_shs_wq_tuning;
+ }
+
+ /* computing weighted average per flow, if the flow has just started,
+ * there is no past values, so we use the current pps as the avg
+ */
+ if (hnode->last_pps == 0) {
+ avg_pps = hnode->rx_pps;
+ } else {
+ mov_avg_pps = (hnode->last_pps + hnode->avg_pps) / 2;
+ avg_pps = (((new_weight * hnode->rx_pps) +
+ (old_weight * mov_avg_pps)) /
+ (new_weight + old_weight));
+ }
+
+ return avg_pps;
+}
+
+static u64 rmnet_shs_wq_get_cpu_avg_pps(u16 cpu_num)
+{
+ u64 avg_pps, mov_avg_pps;
+ u16 new_weight, old_weight;
+ struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_node;
+ struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p = &rmnet_shs_rx_flow_tbl;
+
+ if (cpu_num >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return 0;
+ }
+
+ cpu_node = &rx_flow_tbl_p->cpu_list[cpu_num];
+
+ if (rmnet_shs_is_lpwr_cpu(cpu_num)) {
+ /* More weight to current value */
+ new_weight = rmnet_shs_wq_tuning;
+ old_weight = 100 - rmnet_shs_wq_tuning;
+ } else {
+ old_weight = rmnet_shs_wq_tuning;
+ new_weight = 100 - rmnet_shs_wq_tuning;
+ }
+
+ /* computing weighted average per flow, if the cpu has not past values
+ * for pps, we use the current value as the average
+ */
+ if (cpu_node->last_rx_pps == 0) {
+ avg_pps = cpu_node->avg_pps;
+ } else {
+ mov_avg_pps = (cpu_node->last_rx_pps + cpu_node->avg_pps) / 2;
+ avg_pps = (((new_weight * cpu_node->rx_pps) +
+ (old_weight * mov_avg_pps)) /
+ (new_weight + old_weight));
+ }
+
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_CPU_STATS,
+ RMNET_SHS_WQ_CPU_STATS_CORE2SWITCH_EVAL_CPU,
+ cpu_num, cpu_node->rx_pps, cpu_node->last_rx_pps,
+ avg_pps, NULL, NULL);
+
+ return avg_pps;
+}
+
/* Refresh the RPS mask associated with this flow */
void rmnet_shs_wq_update_hstat_rps_msk(struct rmnet_shs_wq_hstat_s *hstat_p)
{
struct rmnet_shs_skbn_s *node_p = NULL;
struct rmnet_shs_wq_ep_s *ep = NULL;
+ if (!hstat_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
node_p = hstat_p->node;
/*Map RPS mask from the endpoint associated with this flow*/
@@ -430,6 +557,11 @@ void rmnet_shs_wq_update_hash_stats_debug(struct rmnet_shs_wq_hstat_s *hstats_p,
if (!rmnet_shs_stats_enabled)
return;
+ if (!hstats_p || !node_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
if (hstats_p->stat_idx < 0) {
idx = idx % MAX_SUPPORTED_FLOWS_DEBUG;
hstats_p->stat_idx = idx;
@@ -447,6 +579,12 @@ void rmnet_shs_wq_update_hash_stats_debug(struct rmnet_shs_wq_hstat_s *hstats_p,
rmnet_shs_flow_cpu[hstats_p->stat_idx] = hstats_p->current_cpu;
rmnet_shs_flow_cpu_recommended[hstats_p->stat_idx] =
hstats_p->suggested_cpu;
+ rmnet_shs_flow_silver_to_gold[hstats_p->stat_idx] =
+ hstats_p->rmnet_shs_wq_suggs[RMNET_SHS_WQ_SUGG_SILVER_TO_GOLD];
+ rmnet_shs_flow_gold_to_silver[hstats_p->stat_idx] =
+ hstats_p->rmnet_shs_wq_suggs[RMNET_SHS_WQ_SUGG_GOLD_TO_SILVER];
+ rmnet_shs_flow_gold_balance[hstats_p->stat_idx] =
+ hstats_p->rmnet_shs_wq_suggs[RMNET_SHS_WQ_SUGG_GOLD_BALANCE];
}
@@ -456,6 +594,11 @@ void rmnet_shs_wq_update_hash_stats_debug(struct rmnet_shs_wq_hstat_s *hstats_p,
u8 rmnet_shs_wq_is_hash_rx_new_pkt(struct rmnet_shs_wq_hstat_s *hstats_p,
struct rmnet_shs_skbn_s *node_p)
{
+ if (!hstats_p || !node_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return 0;
+ }
+
if (node_p->num_skb == hstats_p->rx_skb)
return 0;
@@ -467,6 +610,11 @@ void rmnet_shs_wq_update_hash_tinactive(struct rmnet_shs_wq_hstat_s *hstats_p,
{
time_t tdiff;
+ if (!hstats_p || !node_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
tdiff = rmnet_shs_wq_tnsec - hstats_p->c_epoch;
hstats_p->inactive_duration = tdiff;
@@ -482,10 +630,16 @@ void rmnet_shs_wq_update_hash_stats(struct rmnet_shs_wq_hstat_s *hstats_p)
u64 skb_diff, bytes_diff;
struct rmnet_shs_skbn_s *node_p;
+ if (!hstats_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
node_p = hstats_p->node;
if (!rmnet_shs_wq_is_hash_rx_new_pkt(hstats_p, node_p)) {
hstats_p->rx_pps = 0;
+ hstats_p->avg_pps = 0;
hstats_p->rx_bps = 0;
rmnet_shs_wq_update_hash_tinactive(hstats_p, node_p);
rmnet_shs_wq_update_hash_stats_debug(hstats_p, node_p);
@@ -514,6 +668,8 @@ void rmnet_shs_wq_update_hash_stats(struct rmnet_shs_wq_hstat_s *hstats_p)
hstats_p->rx_pps = RMNET_SHS_RX_BPNSEC_TO_BPSEC(skb_diff)/(tdiff);
hstats_p->rx_bps = RMNET_SHS_RX_BPNSEC_TO_BPSEC(bytes_diff)/(tdiff);
hstats_p->rx_bps = RMNET_SHS_BYTE_TO_BIT(hstats_p->rx_bps);
+ hstats_p->avg_pps = rmnet_shs_wq_get_flow_avg_pps(hstats_p);
+ hstats_p->last_pps = hstats_p->rx_pps;
rmnet_shs_wq_update_hash_stats_debug(hstats_p, node_p);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
@@ -529,6 +685,16 @@ static void rmnet_shs_wq_refresh_cpu_rates_debug(u16 cpu,
if (!rmnet_shs_stats_enabled)
return;
+ if (cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return;
+ }
+
+ if (!cpu_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
rmnet_shs_cpu_rx_bps[cpu] = cpu_p->rx_bps;
rmnet_shs_cpu_rx_pps[cpu] = cpu_p->rx_pps;
rmnet_shs_cpu_rx_flows[cpu] = cpu_p->flows;
@@ -597,15 +763,20 @@ static void rmnet_shs_wq_refresh_cpu_stats(u16 cpu)
struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_p;
time_t tdiff;
u64 new_skbs, new_bytes;
+ u64 last_rx_bps, last_rx_pps;
u32 new_qhead;
+ if (cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return;
+ }
+
cpu_p = &rmnet_shs_rx_flow_tbl.cpu_list[cpu];
new_skbs = cpu_p->rx_skbs - cpu_p->last_rx_skbs;
new_qhead = rmnet_shs_get_cpu_qhead(cpu);
- if (cpu_p->qhead_start == 0) {
+ if (cpu_p->qhead_start == 0)
cpu_p->qhead_start = new_qhead;
- }
cpu_p->last_qhead = cpu_p->qhead;
cpu_p->qhead = new_qhead;
@@ -619,23 +790,37 @@ static void rmnet_shs_wq_refresh_cpu_stats(u16 cpu)
cpu_p->l_epoch = rmnet_shs_wq_tnsec;
cpu_p->rx_bps = 0;
cpu_p->rx_pps = 0;
+ cpu_p->avg_pps = 0;
+ if (rmnet_shs_userspace_connected) {
+ rmnet_shs_wq_cpu_caps_list_add(&rmnet_shs_rx_flow_tbl,
+ cpu_p, &cpu_caps);
+ }
rmnet_shs_wq_refresh_cpu_rates_debug(cpu, cpu_p);
return;
}
tdiff = rmnet_shs_wq_tnsec - cpu_p->l_epoch;
new_bytes = cpu_p->rx_bytes - cpu_p->last_rx_bytes;
- cpu_p->last_rx_bps = cpu_p->rx_bps;
- cpu_p->last_rx_pps = cpu_p->rx_pps;
+
+ last_rx_bps = cpu_p->rx_bps;
+ last_rx_pps = cpu_p->rx_pps;
cpu_p->rx_pps = RMNET_SHS_RX_BPNSEC_TO_BPSEC(new_skbs)/tdiff;
cpu_p->rx_bps = RMNET_SHS_RX_BPNSEC_TO_BPSEC(new_bytes)/tdiff;
cpu_p->rx_bps = RMNET_SHS_BYTE_TO_BIT(cpu_p->rx_bps);
+ cpu_p->avg_pps = rmnet_shs_wq_get_cpu_avg_pps(cpu);
+ cpu_p->last_rx_bps = last_rx_bps;
+ cpu_p->last_rx_pps = last_rx_pps;
cpu_p->l_epoch = rmnet_shs_wq_tnsec;
cpu_p->last_rx_skbs = cpu_p->rx_skbs;
cpu_p->last_rx_bytes = cpu_p->rx_bytes;
cpu_p->rx_bps_est = cpu_p->rx_bps;
+ if (rmnet_shs_userspace_connected) {
+ rmnet_shs_wq_cpu_caps_list_add(&rmnet_shs_rx_flow_tbl,
+ cpu_p, &cpu_caps);
+ }
+
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_CPU_STATS,
RMNET_SHS_WQ_CPU_STATS_UPDATE, cpu,
cpu_p->flows, cpu_p->rx_pps,
@@ -643,6 +828,7 @@ static void rmnet_shs_wq_refresh_cpu_stats(u16 cpu)
rmnet_shs_wq_refresh_cpu_rates_debug(cpu, cpu_p);
}
+
static void rmnet_shs_wq_refresh_all_cpu_stats(void)
{
u16 cpu;
@@ -666,6 +852,11 @@ void rmnet_shs_wq_update_cpu_rx_tbl(struct rmnet_shs_wq_hstat_s *hstat_p)
u64 skb_diff, byte_diff;
u16 cpu_num;
+ if (!hstat_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
node_p = hstat_p->node;
if (hstat_p->inactive_duration > 0)
@@ -683,10 +874,18 @@ void rmnet_shs_wq_update_cpu_rx_tbl(struct rmnet_shs_wq_hstat_s *hstat_p)
if (hstat_p->is_new_flow) {
rmnet_shs_wq_cpu_list_add(hstat_p,
&tbl_p->cpu_list[cpu_num].hstat_id);
+ rm_err("SHS_FLOW: adding flow 0x%x on cpu[%d] "
+ "pps: %llu | avg_pps %llu",
+ hstat_p->hash, hstat_p->current_cpu,
+ hstat_p->rx_pps, hstat_p->avg_pps);
hstat_p->is_new_flow = 0;
}
/* check if the flow has switched to another CPU*/
if (cpu_num != hstat_p->current_cpu) {
+ rm_err("SHS_FLOW: moving flow 0x%x on cpu[%d] to cpu[%d] "
+ "pps: %llu | avg_pps %llu",
+ hstat_p->hash, hstat_p->current_cpu, cpu_num,
+ hstat_p->rx_pps, hstat_p->avg_pps);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
RMNET_SHS_WQ_FLOW_STATS_UPDATE_NEW_CPU,
hstat_p->hash, hstat_p->current_cpu,
@@ -739,6 +938,85 @@ void rmnet_shs_wq_chng_suggested_cpu(u16 old_cpu, u16 new_cpu,
}
}
+/* Increment the per-flow counter for suggestion type */
+static void rmnet_shs_wq_inc_sugg_type(u32 sugg_type,
+ struct rmnet_shs_wq_hstat_s *hstat_p)
+{
+ if (sugg_type >= RMNET_SHS_WQ_SUGG_MAX || hstat_p == NULL)
+ return;
+
+ hstat_p->rmnet_shs_wq_suggs[sugg_type] += 1;
+}
+
+/* Change suggested cpu, return 1 if suggestion was made, 0 otherwise */
+static int rmnet_shs_wq_chng_flow_cpu(u16 old_cpu, u16 new_cpu,
+ struct rmnet_shs_wq_ep_s *ep,
+ u32 hash_to_move, u32 sugg_type)
+{
+ struct rmnet_shs_skbn_s *node_p;
+ struct rmnet_shs_wq_hstat_s *hstat_p;
+ int rc = 0;
+ u16 bkt;
+
+ if (!ep) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
+ return 0;
+ }
+
+ if (old_cpu >= MAX_CPUS || new_cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return 0;
+ }
+
+ hash_for_each(RMNET_SHS_HT, bkt, node_p, list) {
+ if (!node_p)
+ continue;
+
+ if (!node_p->hstats)
+ continue;
+
+ hstat_p = node_p->hstats;
+
+ if (hash_to_move != 0) {
+ /* If hash_to_move is given, only move that flow,
+ * otherwise move all the flows on that cpu
+ */
+ if (hstat_p->hash != hash_to_move)
+ continue;
+ }
+
+ rm_err("SHS_HT: >> sugg cpu %d | old cpu %d | new_cpu %d | "
+ "map_cpu = %d | flow 0x%x",
+ hstat_p->suggested_cpu, old_cpu, new_cpu,
+ node_p->map_cpu, hash_to_move);
+
+ if ((hstat_p->suggested_cpu == old_cpu) &&
+ (node_p->dev == ep->ep)) {
+
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
+ RMNET_SHS_WQ_FLOW_STATS_SUGGEST_NEW_CPU,
+ hstat_p->hash, hstat_p->suggested_cpu,
+ new_cpu, 0xDEF, hstat_p, NULL);
+
+ node_p->hstats->suggested_cpu = new_cpu;
+ rmnet_shs_wq_inc_sugg_type(sugg_type, hstat_p);
+ if (hash_to_move) { /* Stop after moving one flow */
+ rm_err("SHS_CHNG: moving single flow: flow 0x%x "
+ "sugg_cpu changed from %d to %d",
+ hstat_p->hash, old_cpu,
+ node_p->hstats->suggested_cpu);
+ return 1;
+ }
+ rm_err("SHS_CHNG: moving all flows: flow 0x%x "
+ "sugg_cpu changed from %d to %d",
+ hstat_p->hash, old_cpu,
+ node_p->hstats->suggested_cpu);
+ rc |= 1;
+ }
+ }
+ return rc;
+}
+
u64 rmnet_shs_wq_get_max_pps_among_cores(u32 core_msk)
{
int cpu_num;
@@ -849,9 +1127,8 @@ u16 rmnet_shs_wq_find_cpu_to_move_flows(u16 current_cpu,
* for a few ticks and reset it afterwards
*/
- if (rmnet_shs_cpu_node_tbl[current_cpu].wqprio) {
+ if (rmnet_shs_cpu_node_tbl[current_cpu].wqprio)
return current_cpu;
- }
for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) {
@@ -910,6 +1187,273 @@ void rmnet_shs_wq_find_cpu_and_move_flows(u16 cur_cpu)
rmnet_shs_wq_chng_suggested_cpu(cur_cpu, new_cpu, ep);
}
}
+
+/* Return 1 if we can move a flow to dest_cpu for this endpoint,
+ * otherwise return 0. Basically check rps mask and cpu is online
+ * Also check that dest cpu is not isolated
+ */
+int rmnet_shs_wq_check_cpu_move_for_ep(u16 current_cpu, u16 dest_cpu,
+ struct rmnet_shs_wq_ep_s *ep)
+{
+ u16 cpu_in_rps_mask = 0;
+
+ if (!ep) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
+ return 0;
+ }
+
+ if (current_cpu >= MAX_CPUS || dest_cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return 0;
+ }
+
+ cpu_in_rps_mask = (1 << dest_cpu) & ep->rps_config_msk;
+
+ rm_err("SHS_MASK: cur cpu [%d] | dest_cpu [%d] | "
+ "cpu isolation_mask = 0x%x | ep_rps_mask = 0x%x | "
+ "cpu_online(dest) = %d cpu_in_rps_mask = %d | "
+ "cpu isolated(dest) = %d",
+ current_cpu, dest_cpu, __cpu_isolated_mask, ep->rps_config_msk,
+ cpu_online(dest_cpu), cpu_in_rps_mask, cpu_isolated(dest_cpu));
+
+ /* We cannot move to dest cpu if the cur cpu is the same,
+ * the dest cpu is offline, dest cpu is not in the rps mask,
+ * or if the dest cpu is isolated
+ */
+ if (current_cpu == dest_cpu || !cpu_online(dest_cpu) ||
+ !cpu_in_rps_mask || cpu_isolated(dest_cpu)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* rmnet_shs_wq_try_to_move_flow - try to make a flow suggestion
+ * return 1 if flow move was suggested, otherwise return 0
+ */
+int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
+ u32 sugg_type)
+{
+ struct rmnet_shs_wq_ep_s *ep;
+
+ if (cur_cpu >= MAX_CPUS || dest_cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return 0;
+ }
+
+ /* Traverse end-point list, check if cpu can be used, based
+ * on it if is online, rps mask, isolation, etc. then make
+ * suggestion to change the cpu for the flow by passing its hash
+ */
+ list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
+ if (!ep)
+ continue;
+
+ if (!ep->is_ep_active)
+ continue;
+
+ if (!rmnet_shs_wq_check_cpu_move_for_ep(cur_cpu,
+ dest_cpu,
+ ep)) {
+ rm_err("SHS_FDESC: >> Cannot move flow 0x%x on ep"
+ " from cpu[%d] to cpu[%d]",
+ hash_to_move, cur_cpu, dest_cpu);
+ continue;
+ }
+
+ if (rmnet_shs_wq_chng_flow_cpu(cur_cpu, dest_cpu, ep,
+ hash_to_move, sugg_type)) {
+ rm_err("SHS_FDESC: >> flow 0x%x was suggested to"
+ " move from cpu[%d] to cpu[%d] sugg_type [%d]",
+ hash_to_move, cur_cpu, dest_cpu, sugg_type);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Change flow segmentation, return 1 if set, 0 otherwise */
+int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
+{
+ struct rmnet_shs_skbn_s *node_p;
+ struct rmnet_shs_wq_hstat_s *hstat_p;
+ u16 bkt;
+
+ hash_for_each(RMNET_SHS_HT, bkt, node_p, list) {
+ if (!node_p)
+ continue;
+
+ if (!node_p->hstats)
+ continue;
+
+ hstat_p = node_p->hstats;
+
+ if (hstat_p->hash != hash_to_set)
+ continue;
+
+ rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u",
+ hash_to_set, seg_enable);
+
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
+ RMNET_SHS_WQ_FLOW_STATS_SET_FLOW_SEGMENTATION,
+ hstat_p->hash, seg_enable,
+ 0xDEF, 0xDEF, hstat_p, NULL);
+
+ node_p->hstats->segment_enable = seg_enable;
+ return 1;
+ }
+
+ rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u not set - hash not found",
+ hash_to_set, seg_enable);
+ return 0;
+}
+
+
+/* Comparison function to sort gold flow loads - based on flow avg_pps
+ * return -1 if a is before b, 1 if a is after b, 0 if equal
+ */
+int cmp_fn_flow_pps(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct rmnet_shs_wq_gold_flow_s *flow_a;
+ struct rmnet_shs_wq_gold_flow_s *flow_b;
+
+ if (!a || !b)
+ return 0;
+
+ flow_a = list_entry(a, struct rmnet_shs_wq_gold_flow_s, gflow_list);
+ flow_b = list_entry(b, struct rmnet_shs_wq_gold_flow_s, gflow_list);
+
+ if (flow_a->avg_pps > flow_b->avg_pps)
+ return -1;
+ else if (flow_a->avg_pps < flow_b->avg_pps)
+ return 1;
+
+ return 0;
+}
+
+/* Comparison function to sort cpu capacities - based on cpu avg_pps capacity
+ * return -1 if a is before b, 1 if a is after b, 0 if equal
+ */
+int cmp_fn_cpu_pps(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct rmnet_shs_wq_cpu_cap_s *cpu_a;
+ struct rmnet_shs_wq_cpu_cap_s *cpu_b;
+
+ if (!a || !b)
+ return 0;
+
+ cpu_a = list_entry(a, struct rmnet_shs_wq_cpu_cap_s, cpu_cap_list);
+ cpu_b = list_entry(b, struct rmnet_shs_wq_cpu_cap_s, cpu_cap_list);
+
+ if (cpu_a->avg_pps_capacity > cpu_b->avg_pps_capacity)
+ return -1;
+ else if (cpu_a->avg_pps_capacity < cpu_b->avg_pps_capacity)
+ return 1;
+
+ return 0;
+}
+
+
+/* Prints cpu stats and flows to dmesg for debugging */
+void rmnet_shs_wq_debug_print_flows(void)
+{
+ struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p = &rmnet_shs_rx_flow_tbl;
+ struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_node;
+ struct rmnet_shs_wq_hstat_s *hnode;
+ int flows, i;
+ u16 cpu_num = 0;
+
+ if (!RMNET_SHS_DEBUG)
+ return;
+
+ for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) {
+ cpu_node = &rx_flow_tbl_p->cpu_list[cpu_num];
+ flows = rx_flow_tbl_p->cpu_list[cpu_num].flows;
+
+ rm_err("SHS_CPU: cpu[%d]: flows=%d pps=%llu bps=%llu "
+ "qhead_diff %u qhead_total = %u qhead_start = %u "
+ "qhead = %u qhead_last = %u isolated = %d ",
+ cpu_num, flows, cpu_node->rx_pps, cpu_node->rx_bps,
+ cpu_node->qhead_diff, cpu_node->qhead_total,
+ cpu_node->qhead_start,
+ cpu_node->qhead, cpu_node->last_qhead,
+ cpu_isolated(cpu_num));
+
+ list_for_each_entry(hnode,
+ &rmnet_shs_wq_hstat_tbl,
+ hstat_node_id) {
+ if (!hnode)
+ continue;
+
+ if (hnode->in_use == 0)
+ continue;
+
+ if (hnode->node) {
+ if (hnode->current_cpu == cpu_num)
+ rm_err("SHS_CPU: > flow 0x%x "
+ "with pps %llu avg_pps %llu rx_bps %llu ",
+ hnode->hash, hnode->rx_pps,
+ hnode->avg_pps, hnode->rx_bps);
+ }
+ } /* loop per flow */
+
+ for (i = 0; i < 3 - flows; i++) {
+ rm_err("%s", "SHS_CPU: > ");
+ }
+ } /* loop per cpu */
+}
+
+/* Prints the sorted gold flow list to dmesg */
+void rmnet_shs_wq_debug_print_sorted_gold_flows(struct list_head *gold_flows)
+{
+ struct rmnet_shs_wq_gold_flow_s *gflow_node;
+
+ if (!RMNET_SHS_DEBUG)
+ return;
+
+ if (!gold_flows) {
+ rm_err("%s", "SHS_GDMA: Gold Flows List is NULL");
+ return;
+ }
+
+ rm_err("%s", "SHS_GDMA: List of sorted gold flows:");
+ list_for_each_entry(gflow_node, gold_flows, gflow_list) {
+ if (!gflow_node)
+ continue;
+
+ rm_err("SHS_GDMA: > flow 0x%x with pps %llu on cpu[%d]",
+ gflow_node->hash, gflow_node->rx_pps,
+ gflow_node->cpu_num);
+ }
+}
+
+/* Userspace evaluation. we send userspace the response to the sync message
+ * after we update shared memory. shsusr will send a netlink message if
+ * flows should be moved around.
+ */
+void rmnet_shs_wq_eval_cpus_caps_and_flows(struct list_head *cpu_caps,
+ struct list_head *gold_flows,
+ struct list_head *ss_flows)
+{
+ if (!cpu_caps || !gold_flows || !ss_flows) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ list_sort(NULL, cpu_caps, &cmp_fn_cpu_pps);
+ list_sort(NULL, gold_flows, &cmp_fn_flow_pps);
+
+ rmnet_shs_wq_mem_update_cached_cpu_caps(cpu_caps);
+ rmnet_shs_wq_mem_update_cached_sorted_gold_flows(gold_flows);
+ rmnet_shs_wq_mem_update_cached_sorted_ss_flows(ss_flows);
+
+ rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_RESP_INT);
+
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_SHSUSR_SYNC_END,
+ 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+}
+
+/* Default wq evaluation logic, use this if rmnet_shs_userspace_connected is 0 */
void rmnet_shs_wq_eval_suggested_cpu(void)
{
@@ -1100,7 +1644,7 @@ int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev)
}
/* Increment CPU assignment idx to be ready for next flow assignment*/
- if ((cpu_assigned >= 0)|| ((ep->new_lo_idx + 1) >= ep->new_lo_max))
+ if ((cpu_assigned >= 0) || ((ep->new_lo_idx + 1) >= ep->new_lo_max))
ep->new_lo_idx = ((ep->new_lo_idx + 1) % ep->new_lo_max);
return cpu_assigned;
@@ -1209,6 +1753,10 @@ void rmnet_shs_wq_cleanup_hash_tbl(u8 force_clean)
hash_del_rcu(&node_p->list);
kfree(node_p);
}
+ rm_err("SHS_FLOW: removing flow 0x%x on cpu[%d] "
+ "pps: %llu avg_pps: %llu",
+ hnode->hash, hnode->current_cpu,
+ hnode->rx_pps, hnode->avg_pps);
rmnet_shs_wq_cpu_list_remove(hnode);
if (hnode->is_perm == 0 || force_clean) {
rmnet_shs_wq_hstat_tbl_remove(hnode);
@@ -1258,6 +1806,11 @@ void rmnet_shs_wq_reset_ep_active(struct net_device *dev)
struct rmnet_shs_wq_ep_s *tmp = NULL;
unsigned long flags;
+ if (!dev) {
+ rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
+ return;
+ }
+
spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
list_for_each_entry_safe(ep, tmp, &rmnet_shs_wq_ep_tbl, ep_list_id) {
if (!ep)
@@ -1279,6 +1832,11 @@ void rmnet_shs_wq_set_ep_active(struct net_device *dev)
struct rmnet_shs_wq_ep_s *ep = NULL;
unsigned long flags;
+ if (!dev) {
+ rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
+ return;
+ }
+
spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
@@ -1352,15 +1910,40 @@ void rmnet_shs_wq_update_stats(void)
if (hnode->node) {
rmnet_shs_wq_update_hash_stats(hnode);
rmnet_shs_wq_update_cpu_rx_tbl(hnode);
+
+ if (rmnet_shs_userspace_connected) {
+ if (!rmnet_shs_is_lpwr_cpu(hnode->current_cpu)) {
+ /* Add golds flows to list */
+ rmnet_shs_wq_gflow_list_add(hnode, &gflows);
+ }
+ if (hnode->skb_tport_proto == IPPROTO_TCP) {
+ rmnet_shs_wq_ssflow_list_add(hnode, &ssflows);
+ }
+ } else {
+ /* Disable segmentation if userspace gets disconnected connected */
+ hnode->node->hstats->segment_enable = 0;
+ }
}
}
rmnet_shs_wq_refresh_all_cpu_stats();
rmnet_shs_wq_refresh_total_stats();
rmnet_shs_wq_refresh_dl_mrkr_stats();
- rmnet_shs_wq_eval_suggested_cpu();
+
+ if (rmnet_shs_userspace_connected) {
+ rm_err("%s", "SHS_UPDATE: Userspace connected, relying on userspace evaluation");
+ rmnet_shs_wq_eval_cpus_caps_and_flows(&cpu_caps, &gflows, &ssflows);
+ rmnet_shs_wq_cleanup_gold_flow_list(&gflows);
+ rmnet_shs_wq_cleanup_ss_flow_list(&ssflows);
+ rmnet_shs_wq_cleanup_cpu_caps_list(&cpu_caps);
+ } else {
+ rm_err("%s", "SHS_UPDATE: shs userspace not connected, using default logic");
+ rmnet_shs_wq_eval_suggested_cpu();
+ }
+
rmnet_shs_wq_refresh_new_flow_list();
/*Invoke after both the locks are released*/
rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
+ rmnet_shs_wq_debug_print_flows();
}
void rmnet_shs_wq_process_wq(struct work_struct *work)
@@ -1409,6 +1992,8 @@ void rmnet_shs_wq_exit(void)
if (!rmnet_shs_wq || !rmnet_shs_delayed_wq)
return;
+ rmnet_shs_wq_mem_deinit();
+
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_EXIT, RMNET_SHS_WQ_EXIT_START,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
@@ -1439,6 +2024,7 @@ void rmnet_shs_wq_init_cpu_rx_flow_tbl(void)
rx_flow_tbl_p = &rmnet_shs_rx_flow_tbl.cpu_list[cpu_num];
INIT_LIST_HEAD(&rx_flow_tbl_p->hstat_id);
+ rx_flow_tbl_p->cpu_num = cpu_num;
}
}
@@ -1463,6 +2049,13 @@ void rmnet_shs_wq_init(struct net_device *dev)
if (rmnet_shs_wq)
return;
+ if (!dev) {
+ rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
+ return;
+ }
+
+ rmnet_shs_wq_mem_init();
+
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_INIT, RMNET_SHS_WQ_INIT_START,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
rmnet_shs_wq = alloc_workqueue("rmnet_shs_wq",
@@ -1490,6 +2083,7 @@ void rmnet_shs_wq_init(struct net_device *dev)
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_INIT, RMNET_SHS_WQ_INIT_END,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
}
+
int rmnet_shs_wq_get_num_cpu_flows(u16 cpu)
{
int flows = -1;
@@ -1561,6 +2155,11 @@ int rmnet_shs_wq_get_max_flows_per_cluster(u16 cpu)
void rmnet_shs_wq_inc_cpu_flow(u16 cpu)
{
+ if (cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return;
+ }
+
rmnet_shs_rx_flow_tbl.cpu_list[cpu].flows++;
trace_rmnet_shs_wq_low(RMNET_SHS_WQ_CPU_STATS,
@@ -1571,6 +2170,11 @@ void rmnet_shs_wq_inc_cpu_flow(u16 cpu)
void rmnet_shs_wq_dec_cpu_flow(u16 cpu)
{
+ if (cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return;
+ }
+
if (rmnet_shs_rx_flow_tbl.cpu_list[cpu].flows > 0)
rmnet_shs_rx_flow_tbl.cpu_list[cpu].flows--;
@@ -1582,5 +2186,21 @@ void rmnet_shs_wq_dec_cpu_flow(u16 cpu)
u64 rmnet_shs_wq_get_max_allowed_pps(u16 cpu)
{
+
+ if (cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return 0;
+ }
+
return rmnet_shs_cpu_rx_max_pps_thresh[cpu];
}
+
+void rmnet_shs_wq_ep_lock_bh(void)
+{
+ spin_lock_bh(&rmnet_shs_ep_lock);
+}
+
+void rmnet_shs_wq_ep_unlock_bh(void)
+{
+ spin_unlock_bh(&rmnet_shs_ep_lock);
+}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.h b/drivers/rmnet/shs/rmnet_shs_wq.h
index 90d1604..0d86200 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,11 @@
#include "rmnet_shs_config.h"
#include "rmnet_shs.h"
+#define RMNET_SHS_DEBUG 0
+
+#define rm_err(fmt, ...) \
+ do { if (RMNET_SHS_DEBUG) pr_err(fmt, __VA_ARGS__); } while (0)
+
#define MAX_SUPPORTED_FLOWS_DEBUG 16
#define RMNET_SHS_RX_BPNSEC_TO_BPSEC(x) ((x)*1000000000)
@@ -28,6 +33,9 @@
#define RMNET_SHS_MIN_HSTAT_NODES_REQD 16
#define RMNET_SHS_WQ_DELAY_TICKS 10
+extern unsigned long long rmnet_shs_cpu_rx_max_pps_thresh[MAX_CPUS]__read_mostly;
+extern unsigned long long rmnet_shs_cpu_rx_min_pps_thresh[MAX_CPUS]__read_mostly;
+
/* stores wq and end point details */
struct rmnet_shs_wq_ep_s {
@@ -50,7 +58,17 @@ struct list_head ep_id;
struct rmnet_shs_wq_ep_s ep;
};
+/* Types of suggestions made by shs wq */
+enum rmnet_shs_wq_suggestion_type {
+ RMNET_SHS_WQ_SUGG_NONE,
+ RMNET_SHS_WQ_SUGG_SILVER_TO_GOLD,
+ RMNET_SHS_WQ_SUGG_GOLD_TO_SILVER,
+ RMNET_SHS_WQ_SUGG_GOLD_BALANCE,
+ RMNET_SHS_WQ_SUGG_MAX,
+};
+
struct rmnet_shs_wq_hstat_s {
+ unsigned long int rmnet_shs_wq_suggs[RMNET_SHS_WQ_SUGG_MAX];
struct list_head cpu_node_id;
struct list_head hstat_node_id;
struct rmnet_shs_skbn_s *node; //back pointer to node
@@ -61,6 +79,8 @@ struct rmnet_shs_wq_hstat_s {
u64 rx_bytes;
u64 rx_pps; /*pkts per second*/
u64 rx_bps; /*bits per second*/
+ u64 last_pps;
+ u64 avg_pps;
u64 last_rx_skb;
u64 last_rx_bytes;
u32 rps_config_msk; /*configured rps mask for net device*/
@@ -69,13 +89,14 @@ struct rmnet_shs_wq_hstat_s {
u32 pri_core_msk; /* priority cores availability mask*/
u32 available_core_msk; /* other available cores for this flow*/
u32 hash; /*skb hash*/
+ int stat_idx; /*internal used for datatop*/
u16 suggested_cpu; /* recommended CPU to stamp pkts*/
u16 current_cpu; /* core where the flow is being processed*/
u16 skb_tport_proto;
- int stat_idx; /*internal used for datatop*/
u8 in_use;
u8 is_perm;
u8 is_new_flow;
+ u8 segment_enable; /* segment coalesces packets */
};
struct rmnet_shs_wq_cpu_rx_pkt_q_s {
@@ -97,6 +118,7 @@ struct rmnet_shs_wq_cpu_rx_pkt_q_s {
u32 qhead_start; /* start mark of total pp*/
u32 qhead_total; /* end mark of total pp*/
int flows;
+ u16 cpu_num;
};
struct rmnet_shs_wq_rx_flow_s {
@@ -134,7 +156,32 @@ struct rmnet_shs_delay_wq_s {
struct delayed_work wq;
};
+/* Structures to be used for creating sorted versions of flow and cpu lists */
+struct rmnet_shs_wq_cpu_cap_s {
+ struct list_head cpu_cap_list;
+ u64 pps_capacity;
+ u64 avg_pps_capacity;
+ u16 cpu_num;
+};
+
+struct rmnet_shs_wq_gold_flow_s {
+ struct list_head gflow_list;
+ u64 rx_pps;
+ u64 avg_pps;
+ u32 hash;
+ u16 cpu_num;
+};
+
+struct rmnet_shs_wq_ss_flow_s {
+ struct list_head ssflow_list;
+ u64 rx_pps;
+ u64 avg_pps;
+ u64 rx_bps;
+ u32 hash;
+ u16 cpu_num;
+};
+/* Tracing Definitions */
enum rmnet_shs_wq_trace_func {
RMNET_SHS_WQ_INIT,
RMNET_SHS_WQ_PROCESS_WQ,
@@ -145,6 +192,7 @@ enum rmnet_shs_wq_trace_func {
RMNET_SHS_WQ_FLOW_STATS,
RMNET_SHS_WQ_CPU_STATS,
RMNET_SHS_WQ_TOTAL_STATS,
+ RMNET_SHS_WQ_SHSUSR,
};
enum rmnet_shs_wq_trace_evt {
@@ -201,8 +249,13 @@ enum rmnet_shs_wq_trace_evt {
RMNET_SHS_WQ_INIT_END,
RMNET_SHS_WQ_EXIT_START,
RMNET_SHS_WQ_EXIT_END,
-
-
+ RMNET_SHS_WQ_TRY_PASS,
+ RMNET_SHS_WQ_TRY_FAIL,
+ RMNET_SHS_WQ_SHSUSR_SYNC_START,
+ RMNET_SHS_WQ_SHSUSR_SYNC_END,
+ RMNET_SHS_WQ_FLOW_STATS_SET_FLOW_SEGMENTATION,
+ RMNET_SHS_WQ_FLOW_SEG_SET_PASS,
+ RMNET_SHS_WQ_FLOW_SEG_SET_FAIL,
};
extern struct rmnet_shs_cpu_node_s rmnet_shs_cpu_node_tbl[MAX_CPUS];
@@ -226,4 +279,14 @@ void rmnet_shs_hstat_tbl_delete(void);
void rmnet_shs_wq_set_ep_active(struct net_device *dev);
void rmnet_shs_wq_reset_ep_active(struct net_device *dev);
void rmnet_shs_wq_refresh_new_flow_list(void);
+
+int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
+ u32 sugg_type);
+
+int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable);
+
+void rmnet_shs_wq_ep_lock_bh(void);
+
+void rmnet_shs_wq_ep_unlock_bh(void);
+
#endif /*_RMNET_SHS_WQ_H_*/
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.c b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
new file mode 100644
index 0000000..b28f0c2
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
@@ -0,0 +1,358 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash Workqueue Generic Netlink Functions
+ *
+ */
+
+#include "rmnet_shs_wq_genl.h"
+#include <net/sock.h>
+#include <linux/skbuff.h>
+
+MODULE_LICENSE("GPL v2");
+
+static struct net *last_net;
+static u32 last_snd_portid;
+
+uint32_t rmnet_shs_genl_seqnum;
+int rmnet_shs_userspace_connected;
+
+/* Static Functions and Definitions */
+static struct nla_policy rmnet_shs_genl_attr_policy[RMNET_SHS_GENL_ATTR_MAX + 1] = {
+ [RMNET_SHS_GENL_ATTR_INT] = { .type = NLA_S32 },
+ [RMNET_SHS_GENL_ATTR_SUGG] = { .len = sizeof(struct rmnet_shs_wq_sugg_info) },
+ [RMNET_SHS_GENL_ATTR_SEG] = { .len = sizeof(struct rmnet_shs_wq_seg_info) },
+ [RMNET_SHS_GENL_ATTR_STR] = { .type = NLA_NUL_STRING },
+};
+
+#define RMNET_SHS_GENL_OP(_cmd, _func) \
+ { \
+ .cmd = _cmd, \
+ .policy = rmnet_shs_genl_attr_policy, \
+ .doit = _func, \
+ .dumpit = NULL, \
+ .flags = 0, \
+ }
+
+static const struct genl_ops rmnet_shs_genl_ops[] = {
+ RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_INIT_DMA,
+ rmnet_shs_genl_dma_init),
+ RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_TRY_TO_MOVE_FLOW,
+ rmnet_shs_genl_try_to_move_flow),
+ RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_SET_FLOW_SEGMENTATION,
+ rmnet_shs_genl_set_flow_segmentation),
+ RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_MEM_SYNC,
+ rmnet_shs_genl_mem_sync),
+};
+
+struct genl_family rmnet_shs_genl_family = {
+ .hdrsize = 0,
+ .name = RMNET_SHS_GENL_FAMILY_NAME,
+ .version = RMNET_SHS_GENL_VERSION,
+ .maxattr = RMNET_SHS_GENL_ATTR_MAX,
+ .ops = rmnet_shs_genl_ops,
+ .n_ops = ARRAY_SIZE(rmnet_shs_genl_ops),
+};
+
+int rmnet_shs_genl_send_int_to_userspace(struct genl_info *info, int val)
+{
+ struct sk_buff *skb;
+ void *msg_head;
+ int rc;
+
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (skb == NULL)
+ goto out;
+
+ msg_head = genlmsg_put(skb, 0, info->snd_seq+1, &rmnet_shs_genl_family,
+ 0, RMNET_SHS_GENL_CMD_INIT_DMA);
+ if (msg_head == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ rc = nla_put_u32(skb, RMNET_SHS_GENL_ATTR_INT, val);
+ if (rc != 0)
+ goto out;
+
+ genlmsg_end(skb, msg_head);
+
+ rc = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
+ if (rc != 0)
+ goto out;
+
+ rm_err("SHS_GNL: Successfully sent int %d\n", val);
+ return 0;
+
+out:
+ /* TODO: Need to free skb?? */
+ rm_err("SHS_GNL: FAILED to send int %d\n", val);
+ return -1;
+}
+
+int rmnet_shs_genl_send_int_to_userspace_no_info(int val)
+{
+ struct sk_buff *skb;
+ void *msg_head;
+ int rc;
+
+ if (last_net == NULL) {
+ rm_err("SHS_GNL: FAILED to send int %d - last_net is NULL\n",
+ val);
+ return -1;
+ }
+
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (skb == NULL)
+ goto out;
+
+ msg_head = genlmsg_put(skb, 0, rmnet_shs_genl_seqnum++, &rmnet_shs_genl_family,
+ 0, RMNET_SHS_GENL_CMD_INIT_DMA);
+ if (msg_head == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ rc = nla_put_u32(skb, RMNET_SHS_GENL_ATTR_INT, val);
+ if (rc != 0)
+ goto out;
+
+ genlmsg_end(skb, msg_head);
+
+ rc = genlmsg_unicast(last_net, skb, last_snd_portid);
+ if (rc != 0)
+ goto out;
+
+ rm_err("SHS_GNL: Successfully sent int %d\n", val);
+ return 0;
+
+out:
+ /* TODO: Need to free skb?? */
+ rm_err("SHS_GNL: FAILED to send int %d\n", val);
+ rmnet_shs_userspace_connected = 0;
+ return -1;
+}
+
+
+int rmnet_shs_genl_send_msg_to_userspace(void)
+{
+ struct sk_buff *skb;
+ void *msg_head;
+ int rc;
+ int val = rmnet_shs_genl_seqnum++;
+
+ rm_err("SHS_GNL: Trying to send msg %d\n", val);
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (skb == NULL)
+ goto out;
+
+ msg_head = genlmsg_put(skb, 0, rmnet_shs_genl_seqnum++, &rmnet_shs_genl_family,
+ 0, RMNET_SHS_GENL_CMD_INIT_DMA);
+ if (msg_head == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ rc = nla_put_u32(skb, RMNET_SHS_GENL_ATTR_INT, val);
+ if (rc != 0)
+ goto out;
+
+ genlmsg_end(skb, msg_head);
+
+ genlmsg_multicast(&rmnet_shs_genl_family, skb, 0, 0, GFP_ATOMIC);
+
+ rm_err("SHS_GNL: Successfully sent int %d\n", val);
+ return 0;
+
+out:
+ /* TODO: Need to free skb?? */
+ rm_err("SHS_GNL: FAILED to send int %d\n", val);
+ rmnet_shs_userspace_connected = 0;
+ return -1;
+}
+
+/* Currently unused - handles message from userspace to initialize the shared memory,
+ * memory is inited by kernel wq automatically
+ */
+int rmnet_shs_genl_dma_init(struct sk_buff *skb_2, struct genl_info *info)
+{
+ rm_err("%s", "SHS_GNL: rmnet_shs_genl_dma_init");
+
+ if (info == NULL) {
+ rm_err("%s", "SHS_GNL: an error occured - info is null");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int rmnet_shs_genl_set_flow_segmentation(struct sk_buff *skb_2, struct genl_info *info)
+{
+ struct nlattr *na;
+ struct rmnet_shs_wq_seg_info seg_info;
+ int rc = 0;
+
+ rm_err("%s", "SHS_GNL: rmnet_shs_genl_set_flow_segmentation");
+
+ if (info == NULL) {
+ rm_err("%s", "SHS_GNL: an error occured - info is null");
+ return -1;
+ }
+
+ na = info->attrs[RMNET_SHS_GENL_ATTR_SEG];
+ if (na) {
+ if (nla_memcpy(&seg_info, na, sizeof(seg_info)) > 0) {
+ rm_err("SHS_GNL: recv segmentation req "
+ "hash_to_set = 0x%x segment_enable = %u",
+ seg_info.hash_to_set,
+ seg_info.segment_enable);
+
+ rc = rmnet_shs_wq_set_flow_segmentation(seg_info.hash_to_set,
+ seg_info.segment_enable);
+
+ if (rc == 1) {
+ rmnet_shs_genl_send_int_to_userspace(info, 0);
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
+ RMNET_SHS_WQ_FLOW_SEG_SET_PASS,
+ seg_info.hash_to_set, seg_info.segment_enable,
+ 0xDEF, 0xDEF, NULL, NULL);
+ } else {
+ rmnet_shs_genl_send_int_to_userspace(info, -1);
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
+ RMNET_SHS_WQ_FLOW_SEG_SET_FAIL,
+ seg_info.hash_to_set, seg_info.segment_enable,
+ 0xDEF, 0xDEF, NULL, NULL);
+ return 0;
+ }
+ } else {
+ rm_err("SHS_GNL: nla_memcpy failed %d\n",
+ RMNET_SHS_GENL_ATTR_SEG);
+ rmnet_shs_genl_send_int_to_userspace(info, -1);
+ return 0;
+ }
+ } else {
+ rm_err("SHS_GNL: no info->attrs %d\n",
+ RMNET_SHS_GENL_ATTR_SEG);
+ rmnet_shs_genl_send_int_to_userspace(info, -1);
+ return 0;
+ }
+
+ return 0;
+}
+
+int rmnet_shs_genl_try_to_move_flow(struct sk_buff *skb_2, struct genl_info *info)
+{
+ struct nlattr *na;
+ struct rmnet_shs_wq_sugg_info sugg_info;
+ int rc = 0;
+
+ rm_err("%s", "SHS_GNL: rmnet_shs_genl_try_to_move_flow");
+
+ if (info == NULL) {
+ rm_err("%s", "SHS_GNL: an error occured - info is null");
+ return -1;
+ }
+
+ na = info->attrs[RMNET_SHS_GENL_ATTR_SUGG];
+ if (na) {
+ if (nla_memcpy(&sugg_info, na, sizeof(sugg_info)) > 0) {
+ rm_err("SHS_GNL: cur_cpu =%u dest_cpu = %u "
+ "hash_to_move = 0x%x sugg_type = %u",
+ sugg_info.cur_cpu,
+ sugg_info.dest_cpu,
+ sugg_info.hash_to_move,
+ sugg_info.sugg_type);
+ rc = rmnet_shs_wq_try_to_move_flow(sugg_info.cur_cpu,
+ sugg_info.dest_cpu,
+ sugg_info.hash_to_move,
+ sugg_info.sugg_type);
+ if (rc == 1) {
+ rmnet_shs_genl_send_int_to_userspace(info, 0);
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_TRY_PASS,
+ sugg_info.cur_cpu, sugg_info.dest_cpu,
+ sugg_info.hash_to_move, sugg_info.sugg_type, NULL, NULL);
+
+ } else {
+ rmnet_shs_genl_send_int_to_userspace(info, -1);
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_TRY_FAIL,
+ sugg_info.cur_cpu, sugg_info.dest_cpu,
+ sugg_info.hash_to_move, sugg_info.sugg_type, NULL, NULL);
+ return 0;
+ }
+ } else {
+ rm_err("SHS_GNL: nla_memcpy failed %d\n",
+ RMNET_SHS_GENL_ATTR_SUGG);
+ rmnet_shs_genl_send_int_to_userspace(info, -1);
+ return 0;
+ }
+ } else {
+ rm_err("SHS_GNL: no info->attrs %d\n",
+ RMNET_SHS_GENL_ATTR_SUGG);
+ rmnet_shs_genl_send_int_to_userspace(info, -1);
+ return 0;
+ }
+
+ return 0;
+}
+
+int rmnet_shs_genl_mem_sync(struct sk_buff *skb_2, struct genl_info *info)
+{
+ rm_err("%s", "SHS_GNL: rmnet_shs_genl_mem_sync");
+
+ if (!rmnet_shs_userspace_connected)
+ rmnet_shs_userspace_connected = 1;
+
+ /* Todo: detect when userspace is disconnected. If we dont get
+ * a sync message in the next 2 wq ticks, we got disconnected
+ */
+
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_SHSUSR_SYNC_START,
+ 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+
+ if (info == NULL) {
+ rm_err("%s", "SHS_GNL: an error occured - info is null");
+ return -1;
+ }
+
+ last_net = genl_info_net(info);
+ last_snd_portid = info->snd_portid;
+ return 0;
+}
+
+/* register new generic netlink family */
+int rmnet_shs_wq_genl_init(void)
+{
+ int ret;
+
+ rmnet_shs_userspace_connected = 0;
+ ret = genl_register_family(&rmnet_shs_genl_family);
+ if (ret != 0) {
+ rm_err("SHS_GNL: register family failed: %i", ret);
+ genl_unregister_family(&rmnet_shs_genl_family);
+ return -1;
+ }
+
+ rm_err("SHS_GNL: successfully registered generic netlink familiy: %s",
+ RMNET_SHS_GENL_FAMILY_NAME);
+
+ return 0;
+}
+
+/* Unregister the generic netlink family */
+int rmnet_shs_wq_genl_deinit(void)
+{
+ int ret;
+
+ ret = genl_unregister_family(&rmnet_shs_genl_family);
+ if(ret != 0){
+ rm_err("SHS_GNL: unregister family failed: %i\n",ret);
+ }
+ rmnet_shs_userspace_connected = 0;
+ return 0;
+}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.h b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
new file mode 100644
index 0000000..333de48
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash stamping solution
+ *
+ */
+
+#ifndef _RMNET_SHS_WQ_GENL_H_
+#define _RMNET_SHS_WQ_GENL_H_
+
+#include "rmnet_shs.h"
+#include <net/genetlink.h>
+
+/* Generic Netlink Definitions */
+#define RMNET_SHS_GENL_VERSION 1
+#define RMNET_SHS_GENL_FAMILY_NAME "RMNET_SHS"
+#define RMNET_SHS_SYNC_RESP_INT 828 /* Any number, sent after mem update */
+
+extern int rmnet_shs_userspace_connected;
+
+enum {
+ RMNET_SHS_GENL_CMD_UNSPEC,
+ RMNET_SHS_GENL_CMD_INIT_DMA,
+ RMNET_SHS_GENL_CMD_TRY_TO_MOVE_FLOW,
+ RMNET_SHS_GENL_CMD_SET_FLOW_SEGMENTATION,
+ RMNET_SHS_GENL_CMD_MEM_SYNC,
+ __RMNET_SHS_GENL_CMD_MAX,
+};
+
+enum {
+ RMNET_SHS_GENL_ATTR_UNSPEC,
+ RMNET_SHS_GENL_ATTR_STR,
+ RMNET_SHS_GENL_ATTR_INT,
+ RMNET_SHS_GENL_ATTR_SUGG,
+ RMNET_SHS_GENL_ATTR_SEG,
+ __RMNET_SHS_GENL_ATTR_MAX,
+};
+#define RMNET_SHS_GENL_ATTR_MAX (__RMNET_SHS_GENL_ATTR_MAX - 1)
+
+struct rmnet_shs_wq_sugg_info {
+ uint32_t hash_to_move;
+ uint32_t sugg_type;
+ uint16_t cur_cpu;
+ uint16_t dest_cpu;
+};
+
+struct rmnet_shs_wq_seg_info {
+ uint32_t hash_to_set;
+ uint32_t segment_enable;
+};
+
+/* Function Prototypes */
+int rmnet_shs_genl_dma_init(struct sk_buff *skb_2, struct genl_info *info);
+int rmnet_shs_genl_try_to_move_flow(struct sk_buff *skb_2, struct genl_info *info);
+int rmnet_shs_genl_set_flow_segmentation(struct sk_buff *skb_2, struct genl_info *info);
+int rmnet_shs_genl_mem_sync(struct sk_buff *skb_2, struct genl_info *info);
+
+int rmnet_shs_genl_send_int_to_userspace(struct genl_info *info, int val);
+
+int rmnet_shs_genl_send_int_to_userspace_no_info(int val);
+
+int rmnet_shs_genl_send_msg_to_userspace(void);
+
+int rmnet_shs_wq_genl_init(void);
+
+int rmnet_shs_wq_genl_deinit(void);
+
+#endif /*_RMNET_SHS_WQ_GENL_H_*/
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.c b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
new file mode 100644
index 0000000..1675517
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
@@ -0,0 +1,689 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash Workqueue Generic Netlink Functions
+ *
+ */
+
+#include "rmnet_shs_wq_mem.h"
+#include <linux/proc_fs.h>
+
+MODULE_LICENSE("GPL v2");
+
+struct proc_dir_entry *shs_proc_dir;
+
+/* Fixed arrays to copy to userspace over netlink */
+struct rmnet_shs_wq_cpu_cap_usr_s rmnet_shs_wq_cap_list_usr[MAX_CPUS];
+struct rmnet_shs_wq_gflows_usr_s rmnet_shs_wq_gflows_usr[RMNET_SHS_MAX_USRFLOWS];
+struct rmnet_shs_wq_ssflows_usr_s rmnet_shs_wq_ssflows_usr[RMNET_SHS_MAX_USRFLOWS];
+
+struct list_head gflows = LIST_HEAD_INIT(gflows); /* gold flows */
+struct list_head ssflows = LIST_HEAD_INIT(ssflows); /* slow start flows */
+struct list_head cpu_caps = LIST_HEAD_INIT(cpu_caps); /* capacities */
+
+struct rmnet_shs_mmap_info *cap_shared;
+struct rmnet_shs_mmap_info *gflow_shared;
+struct rmnet_shs_mmap_info *ssflow_shared;
+
+/* Static Functions and Definitions */
+static void rmnet_shs_vm_open(struct vm_area_struct *vma)
+{
+ return;
+}
+
+static void rmnet_shs_vm_close(struct vm_area_struct *vma)
+{
+ return;
+}
+
+static int rmnet_shs_vm_fault(struct vm_fault *vmf)
+{
+ struct page *page = NULL;
+ struct rmnet_shs_mmap_info *info;
+
+ rmnet_shs_wq_ep_lock_bh();
+ info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
+ if (info->data) {
+ page = virt_to_page(info->data);
+ get_page(page);
+ vmf->page = page;
+ }
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return 0;
+}
+
+static const struct vm_operations_struct rmnet_shs_vm_ops = {
+ .close = rmnet_shs_vm_close,
+ .open = rmnet_shs_vm_open,
+ .fault = rmnet_shs_vm_fault,
+};
+
+static int rmnet_shs_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &rmnet_shs_vm_ops;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = filp->private_data;
+
+ return 0;
+}
+
+static int rmnet_shs_open_caps(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_open - entry\n");
+
+ rmnet_shs_wq_ep_lock_bh();
+ if (!cap_shared) {
+ info = kzalloc(sizeof(struct rmnet_shs_mmap_info), GFP_ATOMIC);
+ if (!info)
+ goto fail;
+
+ info->data = (char *)get_zeroed_page(GFP_ATOMIC);
+ if (!info->data) {
+ kfree(info);
+ goto fail;
+ }
+
+ cap_shared = info;
+ rm_err("SHS_MEM: virt_to_phys = 0x%llx cap_shared = 0x%llx\n",
+ (unsigned long long)virt_to_phys((void *)info),
+ (unsigned long long)virt_to_phys((void *)cap_shared));
+ }
+
+ filp->private_data = cap_shared;
+ rmnet_shs_wq_ep_unlock_bh();
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_open - OK\n");
+
+ return 0;
+
+fail:
+ rmnet_shs_wq_ep_unlock_bh();
+ rm_err("%s", "SHS_MEM: rmnet_shs_open - FAILED\n");
+ return -ENOMEM;
+}
+
+static int rmnet_shs_open_g_flows(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_open g_flows - entry\n");
+
+ rmnet_shs_wq_ep_lock_bh();
+ if (!gflow_shared) {
+ info = kzalloc(sizeof(struct rmnet_shs_mmap_info), GFP_ATOMIC);
+ if (!info)
+ goto fail;
+
+ info->data = (char *)get_zeroed_page(GFP_ATOMIC);
+ if (!info->data) {
+ kfree(info);
+ goto fail;
+ }
+
+ gflow_shared = info;
+ rm_err("SHS_MEM: virt_to_phys = 0x%llx gflow_shared = 0x%llx\n",
+ (unsigned long long)virt_to_phys((void *)info),
+ (unsigned long long)virt_to_phys((void *)gflow_shared));
+ }
+ filp->private_data = gflow_shared;
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return 0;
+
+fail:
+ rmnet_shs_wq_ep_unlock_bh();
+ rm_err("%s", "SHS_MEM: rmnet_shs_open - FAILED\n");
+ return -ENOMEM;
+}
+
+static int rmnet_shs_open_ss_flows(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_open ss_flows - entry\n");
+
+ rmnet_shs_wq_ep_lock_bh();
+ if (!ssflow_shared) {
+ info = kzalloc(sizeof(struct rmnet_shs_mmap_info), GFP_ATOMIC);
+ if (!info)
+ goto fail;
+
+ info->data = (char *)get_zeroed_page(GFP_ATOMIC);
+ if (!info->data) {
+ kfree(info);
+ goto fail;
+ }
+
+ ssflow_shared = info;
+ rm_err("SHS_MEM: virt_to_phys = 0x%llx ssflow_shared = 0x%llx\n",
+ (unsigned long long)virt_to_phys((void *)info),
+ (unsigned long long)virt_to_phys((void *)ssflow_shared));
+ }
+ filp->private_data = ssflow_shared;
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return 0;
+
+fail:
+ rmnet_shs_wq_ep_unlock_bh();
+ rm_err("%s", "SHS_MEM: rmnet_shs_open - FAILED\n");
+ return -ENOMEM;
+}
+
+static ssize_t rmnet_shs_read(struct file *filp, char __user *buf, size_t len, loff_t *off)
+{
+ struct rmnet_shs_mmap_info *info;
+ int ret = 0;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_read - entry\n");
+
+ rmnet_shs_wq_ep_lock_bh();
+ info = filp->private_data;
+ ret = min_t(size_t, len, RMNET_SHS_BUFFER_SIZE);
+ if (copy_to_user(buf, info->data, ret))
+ ret = -EFAULT;
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return ret;
+}
+
+static ssize_t rmnet_shs_write(struct file *filp, const char __user *buf, size_t len, loff_t *off)
+{
+ struct rmnet_shs_mmap_info *info;
+ int ret;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_write - entry\n");
+
+ rmnet_shs_wq_ep_lock_bh();
+ info = filp->private_data;
+ ret = min_t(size_t, len, RMNET_SHS_BUFFER_SIZE);
+ if (copy_from_user(info->data, buf, ret))
+ ret = -EFAULT;
+ else
+ ret = len;
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return ret;
+}
+
+static int rmnet_shs_release_caps(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_release - entry\n");
+
+ rmnet_shs_wq_ep_lock_bh();
+ if (cap_shared) {
+ info = filp->private_data;
+ cap_shared = NULL;
+ free_page((unsigned long)info->data);
+ kfree(info);
+ filp->private_data = NULL;
+ }
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return 0;
+}
+
+static int rmnet_shs_release_g_flows(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_release - entry\n");
+
+ rmnet_shs_wq_ep_lock_bh();
+ if (gflow_shared) {
+ info = filp->private_data;
+ gflow_shared = NULL;
+ free_page((unsigned long)info->data);
+ kfree(info);
+ filp->private_data = NULL;
+ }
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return 0;
+}
+
+static int rmnet_shs_release_ss_flows(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_release - entry\n");
+
+ rmnet_shs_wq_ep_lock_bh();
+ if (ssflow_shared) {
+ info = filp->private_data;
+ ssflow_shared = NULL;
+ free_page((unsigned long)info->data);
+ kfree(info);
+ filp->private_data = NULL;
+ }
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return 0;
+}
+
+static const struct file_operations rmnet_shs_caps_fops = {
+ .owner = THIS_MODULE,
+ .mmap = rmnet_shs_mmap,
+ .open = rmnet_shs_open_caps,
+ .release = rmnet_shs_release_caps,
+ .read = rmnet_shs_read,
+ .write = rmnet_shs_write,
+};
+
+static const struct file_operations rmnet_shs_g_flows_fops = {
+ .owner = THIS_MODULE,
+ .mmap = rmnet_shs_mmap,
+ .open = rmnet_shs_open_g_flows,
+ .release = rmnet_shs_release_g_flows,
+ .read = rmnet_shs_read,
+ .write = rmnet_shs_write,
+};
+
+static const struct file_operations rmnet_shs_ss_flows_fops = {
+ .owner = THIS_MODULE,
+ .mmap = rmnet_shs_mmap,
+ .open = rmnet_shs_open_ss_flows,
+ .release = rmnet_shs_release_ss_flows,
+ .read = rmnet_shs_read,
+ .write = rmnet_shs_write,
+};
+
+
+/* Global Functions */
+/* Add a flow to the slow start flow list */
+void rmnet_shs_wq_ssflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
+ struct list_head *ss_flows)
+{
+ struct rmnet_shs_wq_ss_flow_s *ssflow_node;
+
+ if (!hnode || !ss_flows) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ ssflow_node = kzalloc(sizeof(*ssflow_node), GFP_ATOMIC);
+ if (ssflow_node != NULL) {
+ ssflow_node->avg_pps = hnode->avg_pps;
+ ssflow_node->cpu_num = hnode->current_cpu;
+ ssflow_node->hash = hnode->hash;
+ ssflow_node->rx_pps = hnode->rx_pps;
+ ssflow_node->rx_bps = hnode->rx_bps;
+
+ list_add(&ssflow_node->ssflow_list, ss_flows);
+ } else {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_NODE_MALLOC_ERR]++;
+ }
+}
+
+/* Clean up slow start flow list */
+void rmnet_shs_wq_cleanup_ss_flow_list(struct list_head *ss_flows)
+{
+ struct rmnet_shs_wq_ss_flow_s *ssflow_node;
+ struct list_head *ptr, *next;
+
+ if (!ss_flows) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ list_for_each_safe(ptr, next, ss_flows) {
+ ssflow_node = list_entry(ptr,
+ struct rmnet_shs_wq_ss_flow_s,
+ ssflow_list);
+ if (!ssflow_node)
+ continue;
+
+ list_del_init(&ssflow_node->ssflow_list);
+ kfree(ssflow_node);
+ }
+}
+
+/* Add a flow to the gold flow list */
+void rmnet_shs_wq_gflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
+ struct list_head *gold_flows)
+{
+ struct rmnet_shs_wq_gold_flow_s *gflow_node;
+
+ if (!hnode || !gold_flows) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ if (!rmnet_shs_is_lpwr_cpu(hnode->current_cpu)) {
+ gflow_node = kzalloc(sizeof(*gflow_node), GFP_ATOMIC);
+ if (gflow_node != NULL) {
+ gflow_node->avg_pps = hnode->avg_pps;
+ gflow_node->cpu_num = hnode->current_cpu;
+ gflow_node->hash = hnode->hash;
+ gflow_node->rx_pps = hnode->rx_pps;
+
+ list_add(&gflow_node->gflow_list, gold_flows);
+ } else {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_NODE_MALLOC_ERR]++;
+ }
+ }
+}
+
+/* Clean up gold flow list */
+void rmnet_shs_wq_cleanup_gold_flow_list(struct list_head *gold_flows)
+{
+ struct rmnet_shs_wq_gold_flow_s *gflow_node;
+ struct list_head *ptr, *next;
+
+ if (!gold_flows) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ list_for_each_safe(ptr, next, gold_flows) {
+ gflow_node = list_entry(ptr,
+ struct rmnet_shs_wq_gold_flow_s,
+ gflow_list);
+ if (!gflow_node)
+ continue;
+
+ list_del_init(&gflow_node->gflow_list);
+ kfree(gflow_node);
+ }
+}
+
+/* Add a cpu to the cpu capacities list */
+void rmnet_shs_wq_cpu_caps_list_add(
+ struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p,
+ struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_node,
+ struct list_head *cpu_caps)
+{
+ u64 pps_uthresh, pps_lthresh = 0;
+ struct rmnet_shs_wq_cpu_cap_s *cap_node;
+ int flows = 0;
+
+ if (!cpu_node || !cpu_caps) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ flows = rx_flow_tbl_p->cpu_list[cpu_node->cpu_num].flows;
+
+ pps_uthresh = rmnet_shs_cpu_rx_max_pps_thresh[cpu_node->cpu_num];
+ pps_lthresh = rmnet_shs_cpu_rx_min_pps_thresh[cpu_node->cpu_num];
+
+ cap_node = kzalloc(sizeof(*cap_node), GFP_ATOMIC);
+ if (cap_node == NULL) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_NODE_MALLOC_ERR]++;
+ return;
+ }
+
+ cap_node->cpu_num = cpu_node->cpu_num;
+
+ /* No flows means capacity is upper threshold */
+ if (flows <= 0) {
+ cap_node->pps_capacity = pps_uthresh;
+ cap_node->avg_pps_capacity = pps_uthresh;
+ list_add(&cap_node->cpu_cap_list, cpu_caps);
+ return;
+ }
+
+ /* Instantaneous PPS capacity */
+ if (cpu_node->rx_pps < pps_uthresh) {
+ cap_node->pps_capacity =
+ pps_uthresh - cpu_node->rx_pps;
+ } else {
+ cap_node->pps_capacity = 0;
+ }
+
+ /* Average PPS capacity */
+ if (cpu_node->avg_pps < pps_uthresh) {
+ cap_node->avg_pps_capacity =
+ pps_uthresh - cpu_node->avg_pps;
+ } else {
+ cap_node->avg_pps_capacity = 0;
+ }
+
+ list_add(&cap_node->cpu_cap_list, cpu_caps);
+}
+
+/* Clean up cpu capacities list */
+/* Can reuse this memory since num cpus doesnt change */
+void rmnet_shs_wq_cleanup_cpu_caps_list(struct list_head *cpu_caps)
+{
+ struct rmnet_shs_wq_cpu_cap_s *cap_node;
+ struct list_head *ptr, *next;
+
+ if (!cpu_caps) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ list_for_each_safe(ptr, next, cpu_caps) {
+ cap_node = list_entry(ptr,
+ struct rmnet_shs_wq_cpu_cap_s,
+ cpu_cap_list);
+ if (!cap_node)
+ continue;
+
+ list_del_init(&cap_node->cpu_cap_list);
+ kfree(cap_node);
+ }
+}
+
+/* Converts the kernel linked list to an array. Then memcpy to shared mem
+ * > The cpu capacity linked list is sorted: highest capacity first
+ * | cap_0 | cap_1 | cap_2 | ... | cap_7 |
+ */
+void rmnet_shs_wq_mem_update_cached_cpu_caps(struct list_head *cpu_caps)
+{
+ struct rmnet_shs_wq_cpu_cap_s *cap_node;
+
+ uint16_t idx = 0;
+
+ if (!cpu_caps) {
+ rm_err("%s", "SHS_SCAPS: CPU Capacities List is NULL");
+ return;
+ }
+
+ rm_err("%s", "SHS_SCAPS: Sorted CPU Capacities:");
+ list_for_each_entry(cap_node, cpu_caps, cpu_cap_list) {
+ if (!cap_node)
+ continue;
+
+ if (idx >= MAX_CPUS)
+ break;
+
+ rm_err("SHS_SCAPS: > cpu[%d] with pps capacity = %llu | "
+ "avg pps cap = %llu",
+ cap_node->cpu_num, cap_node->pps_capacity,
+ cap_node->avg_pps_capacity);
+
+ rmnet_shs_wq_cap_list_usr[idx].avg_pps_capacity = cap_node->avg_pps_capacity;
+ rmnet_shs_wq_cap_list_usr[idx].pps_capacity = cap_node->pps_capacity;
+ rmnet_shs_wq_cap_list_usr[idx].cpu_num = cap_node->cpu_num;
+ idx += 1;
+ }
+
+ rm_err("SHS_MEM: cap_dma_ptr = 0x%llx addr = 0x%pK\n",
+ (unsigned long long)virt_to_phys((void *)cap_shared), cap_shared);
+ if (!cap_shared) {
+ rm_err("%s", "SHS_WRITE: cap_shared is NULL");
+ return;
+ }
+ memcpy((char *) cap_shared->data,
+ (void *) &rmnet_shs_wq_cap_list_usr[0],
+ sizeof(rmnet_shs_wq_cap_list_usr));
+}
+
+/* Convert the kernel linked list of gold flows into an array that can be
+ * memcpy'd to shared memory.
+ * > Add number of flows at the beginning of the shared memory address.
+ * > After memcpy is complete, send userspace a message indicating that memcpy
+ * has just completed.
+ * > The gold flow list is sorted: heaviest gold flow is first
+ * | num_flows | flow_1 | flow_2 | ... | flow_n | ... |
+ * | 16 bits | ... |
+ */
+void rmnet_shs_wq_mem_update_cached_sorted_gold_flows(struct list_head *gold_flows)
+{
+ struct rmnet_shs_wq_gold_flow_s *gflow_node;
+ uint16_t idx = 0;
+ int num_gold_flows = 0;
+
+ if (!gold_flows) {
+ rm_err("%s", "SHS_SGOLD: Gold Flows List is NULL");
+ return;
+ }
+
+ rm_err("%s", "SHS_SGOLD: List of sorted gold flows:");
+ list_for_each_entry(gflow_node, gold_flows, gflow_list) {
+ if (!gflow_node)
+ continue;
+
+ if (gflow_node->rx_pps == 0) {
+ continue;
+ }
+
+ if (idx >= RMNET_SHS_MAX_USRFLOWS) {
+ break;
+ }
+
+ rm_err("SHS_SGOLD: > flow 0x%x with pps %llu on cpu[%d]",
+ gflow_node->hash, gflow_node->rx_pps,
+ gflow_node->cpu_num);
+ num_gold_flows += 1;
+
+
+ /* Update the cached gold flow list */
+ rmnet_shs_wq_gflows_usr[idx].cpu_num = gflow_node->cpu_num;
+ rmnet_shs_wq_gflows_usr[idx].hash = gflow_node->hash;
+ rmnet_shs_wq_gflows_usr[idx].avg_pps = gflow_node->avg_pps;
+ rmnet_shs_wq_gflows_usr[idx].rx_pps = gflow_node->rx_pps;
+ idx += 1;
+ }
+
+ rm_err("SHS_MEM: gflow_dma_ptr = 0x%llx addr = 0x%pK\n",
+ (unsigned long long)virt_to_phys((void *)gflow_shared),
+ gflow_shared);
+
+ if (!gflow_shared) {
+ rm_err("%s", "SHS_WRITE: gflow_shared is NULL");
+ return;
+ }
+
+ rm_err("SHS_SGOLD: num gold flows = %u\n", idx);
+
+ /* Copy num gold flows into first 2 bytes,
+ then copy in the cached gold flow array */
+ memcpy(((char *)gflow_shared->data), &idx, sizeof(idx));
+ memcpy(((char *)gflow_shared->data + sizeof(uint16_t)),
+ (void *) &rmnet_shs_wq_gflows_usr[0],
+ sizeof(rmnet_shs_wq_gflows_usr));
+}
+
+/* Convert the kernel linked list of slow start tcp flows into an array that can be
+ * memcpy'd to shared memory.
+ * > Add number of flows at the beginning of the shared memory address.
+ * > After memcpy is complete, send userspace a message indicating that memcpy
+ * has just completed.
+ * > The ss flow list is sorted: heaviest ss flow is first
+ * | num_flows | flow_1 | flow_2 | ... | flow_n | ... |
+ * | 16 bits | ... |
+ */
+void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows)
+{
+ struct rmnet_shs_wq_ss_flow_s *ssflow_node;
+ uint16_t idx = 0;
+ int num_ss_flows = 0;
+
+ if (!ss_flows) {
+ rm_err("%s", "SHS_SLOW: SS Flows List is NULL");
+ return;
+ }
+
+ rm_err("%s", "SHS_SLOW: List of sorted ss flows:");
+ list_for_each_entry(ssflow_node, ss_flows, ssflow_list) {
+ if (!ssflow_node)
+ continue;
+
+
+ if (ssflow_node->rx_pps == 0) {
+ continue;
+ }
+
+ if (idx >= RMNET_SHS_MAX_USRFLOWS) {
+ break;
+ }
+
+ rm_err("SHS_SLOW: > flow 0x%x with pps %llu on cpu[%d]",
+ ssflow_node->hash, ssflow_node->rx_pps,
+ ssflow_node->cpu_num);
+ num_ss_flows += 1;
+
+ /* Update the cached ss flow list */
+ rmnet_shs_wq_ssflows_usr[idx].cpu_num = ssflow_node->cpu_num;
+ rmnet_shs_wq_ssflows_usr[idx].hash = ssflow_node->hash;
+ rmnet_shs_wq_ssflows_usr[idx].avg_pps = ssflow_node->avg_pps;
+ rmnet_shs_wq_ssflows_usr[idx].rx_pps = ssflow_node->rx_pps;
+ rmnet_shs_wq_ssflows_usr[idx].rx_bps = ssflow_node->rx_bps;
+ idx += 1;
+ }
+
+ rm_err("SHS_MEM: ssflow_dma_ptr = 0x%llx addr = 0x%pK\n",
+ (unsigned long long)virt_to_phys((void *)ssflow_shared),
+ ssflow_shared);
+
+ if (!ssflow_shared) {
+ rm_err("%s", "SHS_WRITE: ssflow_shared is NULL");
+ return;
+ }
+
+ rm_err("SHS_SLOW: num ss flows = %u\n", idx);
+
+ /* Copy num ss flows into first 2 bytes,
+ then copy in the cached gold flow array */
+ memcpy(((char *)ssflow_shared->data), &idx, sizeof(idx));
+ memcpy(((char *)ssflow_shared->data + sizeof(uint16_t)),
+ (void *) &rmnet_shs_wq_ssflows_usr[0],
+ sizeof(rmnet_shs_wq_ssflows_usr));
+}
+
+/* Creates the proc folder and files for shs shared memory */
+void rmnet_shs_wq_mem_init(void)
+{
+ shs_proc_dir = proc_mkdir("shs", NULL);
+
+ proc_create(RMNET_SHS_PROC_CAPS, 0644, shs_proc_dir, &rmnet_shs_caps_fops);
+ proc_create(RMNET_SHS_PROC_G_FLOWS, 0644, shs_proc_dir, &rmnet_shs_g_flows_fops);
+ proc_create(RMNET_SHS_PROC_SS_FLOWS, 0644, shs_proc_dir, &rmnet_shs_ss_flows_fops);
+
+ rmnet_shs_wq_ep_lock_bh();
+ cap_shared = NULL;
+ gflow_shared = NULL;
+ ssflow_shared = NULL;
+ rmnet_shs_wq_ep_unlock_bh();
+}
+
+/* Remove shs files and folders from proc fs */
+void rmnet_shs_wq_mem_deinit(void)
+{
+ remove_proc_entry(RMNET_SHS_PROC_CAPS, shs_proc_dir);
+ remove_proc_entry(RMNET_SHS_PROC_G_FLOWS, shs_proc_dir);
+ remove_proc_entry(RMNET_SHS_PROC_SS_FLOWS, shs_proc_dir);
+ remove_proc_entry(RMNET_SHS_PROC_DIR, NULL);
+
+ rmnet_shs_wq_ep_lock_bh();
+ cap_shared = NULL;
+ gflow_shared = NULL;
+ ssflow_shared = NULL;
+ rmnet_shs_wq_ep_unlock_bh();
+}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.h b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
new file mode 100644
index 0000000..2e5e889
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
@@ -0,0 +1,89 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash stamping solution
+ *
+ */
+
+#ifndef _RMNET_SHS_WQ_MEM_H_
+#define _RMNET_SHS_WQ_MEM_H_
+
+#include "rmnet_shs.h"
+
+/* Shared memory files */
+#define RMNET_SHS_PROC_DIR "shs"
+#define RMNET_SHS_PROC_CAPS "rmnet_shs_caps"
+#define RMNET_SHS_PROC_G_FLOWS "rmnet_shs_flows"
+#define RMNET_SHS_PROC_SS_FLOWS "rmnet_shs_ss_flows"
+
+#define RMNET_SHS_MAX_USRFLOWS (128)
+
+struct __attribute__((__packed__)) rmnet_shs_wq_cpu_cap_usr_s {
+ u64 pps_capacity;
+ u64 avg_pps_capacity;
+ u64 bps_capacity;
+ u16 cpu_num;
+};
+
+struct __attribute__((__packed__)) rmnet_shs_wq_gflows_usr_s {
+ u64 rx_pps;
+ u64 avg_pps;
+ u64 rx_bps;
+ u32 hash;
+ u16 cpu_num;
+};
+
+struct __attribute__((__packed__)) rmnet_shs_wq_ssflows_usr_s {
+ u64 rx_pps;
+ u64 avg_pps;
+ u64 rx_bps;
+ u32 hash;
+ u16 cpu_num;
+};
+
+extern struct list_head gflows;
+extern struct list_head ssflows;
+extern struct list_head cpu_caps;
+
+/* Buffer size for read and write syscalls */
+enum {RMNET_SHS_BUFFER_SIZE = 4096};
+
+struct rmnet_shs_mmap_info {
+ char *data;
+};
+
+/* Function Definitions */
+
+void rmnet_shs_wq_ssflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
+ struct list_head *ss_flows);
+void rmnet_shs_wq_gflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
+ struct list_head *gold_flows);
+
+void rmnet_shs_wq_cleanup_gold_flow_list(struct list_head *gold_flows);
+void rmnet_shs_wq_cleanup_ss_flow_list(struct list_head *ss_flows);
+
+void rmnet_shs_wq_cpu_caps_list_add(
+ struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p,
+ struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_node,
+ struct list_head *cpu_caps);
+
+void rmnet_shs_wq_cleanup_cpu_caps_list(struct list_head *cpu_caps);
+
+void rmnet_shs_wq_mem_update_cached_cpu_caps(struct list_head *cpu_caps);
+
+void rmnet_shs_wq_mem_update_cached_sorted_gold_flows(struct list_head *gold_flows);
+void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows);
+
+void rmnet_shs_wq_mem_init(void);
+
+void rmnet_shs_wq_mem_deinit(void);
+
+#endif /*_RMNET_SHS_WQ_GENL_H_*/