diff options
author | Himanshu Agarwal <himanaga@codeaurora.org> | 2017-08-08 17:12:28 +0530 |
---|---|---|
committer | snandini <snandini@codeaurora.org> | 2017-08-16 17:39:17 -0700 |
commit | c0144381b5ff2d8a72785db0e27b77594705aa54 (patch) | |
tree | eaefcde60663d3a1e8b3ea66e6b99965701246b3 /core | |
parent | 9b402504ade573fb22394c9c84d7f903d8ef9e92 (diff) | |
download | qcacld-c0144381b5ff2d8a72785db0e27b77594705aa54.tar.gz |
qcacld-3.0: Map/Unmap IPA RX buffers at driver load/unload time
Map/Unmap IPA RX buffers at driver load/unload time when WLAN
SMMU is enabled.
Change-Id: Ibe2c5d234cc67a18979aed9af273d2340beb124a
CRs-Fixed: 2088439
Diffstat (limited to 'core')
-rw-r--r-- | core/cds/src/cds_api.c | 8 | ||||
-rw-r--r-- | core/dp/htt/htt.c | 5 | ||||
-rw-r--r-- | core/dp/htt/htt_h2t.c | 10 | ||||
-rw-r--r-- | core/dp/htt/htt_internal.h | 12 | ||||
-rw-r--r-- | core/dp/htt/htt_rx.c | 114 | ||||
-rw-r--r-- | core/dp/htt/htt_tx.c | 68 | ||||
-rw-r--r-- | core/dp/htt/htt_types.h | 2 |
7 files changed, 136 insertions, 83 deletions
diff --git a/core/cds/src/cds_api.c b/core/cds/src/cds_api.c index fccdbe8dd1..5059930fd6 100644 --- a/core/cds/src/cds_api.c +++ b/core/cds/src/cds_api.c @@ -2734,11 +2734,19 @@ void cds_smmu_mem_map_setup(qdf_device_t osdev) osdev->smmu_s1_enabled = true; } +#ifdef IPA_OFFLOAD int cds_smmu_map_unmap(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr) { return hdd_ipa_uc_smmu_map(map, num_buf, buf_arr); } #else +int cds_smmu_map_unmap(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr) +{ + return 0; +} +#endif + +#else void cds_smmu_mem_map_setup(qdf_device_t osdev) { osdev->smmu_s1_enabled = false; diff --git a/core/dp/htt/htt.c b/core/dp/htt/htt.c index 1a71143f80..34a09d0a48 100644 --- a/core/dp/htt/htt.c +++ b/core/dp/htt/htt.c @@ -452,6 +452,10 @@ htt_attach(struct htt_pdev_t *pdev, int desc_pool_size) int i; int ret = 0; + pdev->is_ipa_uc_enabled = false; + if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) + pdev->is_ipa_uc_enabled = true; + ret = htt_tx_attach(pdev, desc_pool_size); if (ret) goto fail1; @@ -782,7 +786,6 @@ int htt_ipa_uc_attach(struct htt_pdev_t *pdev) { int error; - pdev->uc_map_reqd = 0; /* TX resource attach */ error = htt_tx_ipa_uc_attach( pdev, diff --git a/core/dp/htt/htt_h2t.c b/core/dp/htt/htt_h2t.c index bf825f6d4c..df0b9b2639 100644 --- a/core/dp/htt/htt_h2t.c +++ b/core/dp/htt/htt_h2t.c @@ -1249,16 +1249,6 @@ int htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev, pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ - if (qdf_mem_smmu_s1_enabled(pdev->osdev) && uc_active && !is_tx) { - if (htt_rx_ipa_uc_buf_pool_map(pdev)) { - qdf_print("%s: Unable to create mapping for IPA rx buffers\n", - __func__); - htt_htc_pkt_free(pdev, pkt); - return -A_NO_MEMORY; - } - pdev->uc_map_reqd = 1; - } - /* reserve room for HTC header */ msg = qdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ), diff --git a/core/dp/htt/htt_internal.h b/core/dp/htt/htt_internal.h index 9f7f513bdd..adf35add34 100644 --- a/core/dp/htt/htt_internal.h +++ b/core/dp/htt/htt_internal.h @@ -575,13 +575,6 @@ int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev); int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev); -/** - * htt_rx_ipa_uc_buf_pool_map() - create mappings for IPA rx buffers - * @pdev: htt context - * - * Return: 0 success - */ -int htt_rx_ipa_uc_buf_pool_map(struct htt_pdev_t *pdev); #else /** * htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource @@ -624,11 +617,6 @@ static inline int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev) return 0; } -static inline int htt_rx_ipa_uc_buf_pool_map(struct htt_pdev_t *pdev) -{ - return 0; -} - #endif /* IPA_OFFLOAD */ /* Maximum Outstanding Bus Download */ diff --git a/core/dp/htt/htt_rx.c b/core/dp/htt/htt_rx.c index cbbd06ae8d..5635c4b046 100644 --- a/core/dp/htt/htt_rx.c +++ b/core/dp/htt/htt_rx.c @@ -131,10 +131,23 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev) struct htt_rx_hash_entry *hash_entry; struct htt_rx_hash_bucket **hash_table; struct htt_list_node *list_iter = NULL; + qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL; + uint32_t num_unmapped = 0; if (NULL == pdev->rx_ring.hash_table) return; + if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) { + mem_map_table = qdf_mem_map_table_alloc( + pdev->rx_ring.fill_level); + if (!mem_map_table) { + qdf_print("%s: Failed to allocate memory for mem map table\n", + __func__); + return; + } + mem_info = mem_map_table; + } + qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock)); hash_table = pdev->rx_ring.hash_table; pdev->rx_ring.hash_table = NULL; @@ -149,6 +162,16 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev) pdev->rx_ring. listnode_offset); if (hash_entry->netbuf) { + if (qdf_mem_smmu_s1_enabled(pdev->osdev) && + pdev->is_ipa_uc_enabled) { + qdf_update_mem_map_table(pdev->osdev, + mem_info, + QDF_NBUF_CB_PADDR( + hash_entry->netbuf), + HTT_RX_BUF_SIZE); + mem_info++; + num_unmapped++; + } #ifdef DEBUG_DMA_DONE qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf, QDF_DMA_BIDIRECTIONAL); @@ -172,6 +195,12 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev) qdf_spinlock_destroy(&(pdev->rx_ring.rx_hash_lock)); + if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) { + if (num_unmapped) + cds_smmu_map_unmap(false, num_unmapped, + mem_map_table); + qdf_mem_free(mem_map_table); + } } #endif @@ -446,7 +475,7 @@ static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num) int num_alloc = 0; idx = *(pdev->rx_ring.alloc_idx.vaddr); - if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) { + if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) { mem_map_table = qdf_mem_map_table_alloc(num); if (!mem_map_table) { qdf_print("%s: Failed to allocate memory for mem map table\n", @@ -543,7 +572,8 @@ moretofill: pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf; } - if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) { + if (qdf_mem_smmu_s1_enabled(pdev->osdev) && + pdev->is_ipa_uc_enabled) { qdf_update_mem_map_table(pdev->osdev, mem_info, paddr, HTT_RX_BUF_SIZE); mem_info++; @@ -565,7 +595,7 @@ moretofill: } free_mem_map_table: - if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) { + if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) { cds_smmu_map_unmap(true, num_alloc, mem_map_table); qdf_mem_free(mem_map_table); } @@ -702,8 +732,31 @@ void htt_rx_detach(struct htt_pdev_t *pdev) htt_rx_hash_deinit(pdev); } else { int sw_rd_idx = pdev->rx_ring.sw_rd_idx.msdu_payld; - + qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL; + uint32_t num_unmapped = 0; + + if (qdf_mem_smmu_s1_enabled(pdev->osdev) && + pdev->is_ipa_uc_enabled) { + mem_map_table = qdf_mem_map_table_alloc( + pdev->rx_ring.fill_level); + if (!mem_map_table) { + qdf_print("%s: Failed to allocate memory for mem map table\n", + __func__); + return; + } + mem_info = mem_map_table; + } while (sw_rd_idx != *(pdev->rx_ring.alloc_idx.vaddr)) { + if (qdf_mem_smmu_s1_enabled(pdev->osdev) && + pdev->is_ipa_uc_enabled) { + qdf_update_mem_map_table(pdev->osdev, mem_info, + QDF_NBUF_CB_PADDR( + pdev->rx_ring.buf.netbufs_ring[ + sw_rd_idx]), + HTT_RX_BUF_SIZE); + mem_info++; + num_unmapped++; + } #ifdef DEBUG_DMA_DONE qdf_nbuf_unmap(pdev->osdev, pdev->rx_ring.buf. @@ -721,6 +774,14 @@ void htt_rx_detach(struct htt_pdev_t *pdev) sw_rd_idx &= pdev->rx_ring.size_mask; } qdf_mem_free(pdev->rx_ring.buf.netbufs_ring); + + if (qdf_mem_smmu_s1_enabled(pdev->osdev) && + pdev->is_ipa_uc_enabled) { + if (num_unmapped) + cds_smmu_map_unmap(false, num_unmapped, + mem_map_table); + qdf_mem_free(mem_map_table); + } } qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev, @@ -2301,7 +2362,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev, /* Get the total number of MSDUs */ msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1)); HTT_RX_CHECK_MSDU_COUNT(msdu_count); - if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) { + if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) { mem_map_table = qdf_mem_map_table_alloc(msdu_count); if (!mem_map_table) { qdf_print("%s: Failed to allocate memory for mem map table\n", @@ -2334,7 +2395,8 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev, } while (msdu_count > 0) { - if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) { + if (qdf_mem_smmu_s1_enabled(pdev->osdev) && + pdev->is_ipa_uc_enabled) { qdf_update_mem_map_table(pdev->osdev, mem_info, QDF_NBUF_CB_PADDR(msdu), HTT_RX_BUF_SIZE); @@ -2479,7 +2541,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev, } free_mem_map_table: - if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) { + if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) { if (num_unmapped) cds_smmu_map_unmap(false, num_unmapped, mem_map_table); @@ -3880,44 +3942,6 @@ int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev) htt_rx_ipa_uc_free_wdi2_rsc(pdev); return 0; } - -int htt_rx_ipa_uc_buf_pool_map(struct htt_pdev_t *pdev) -{ - struct htt_rx_hash_entry *hash_entry; - struct htt_list_node *list_iter = NULL; - qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL; - uint32_t num_alloc = 0; - uint32_t i; - - mem_map_table = qdf_mem_map_table_alloc(HTT_RX_RING_SIZE_MAX); - if (!mem_map_table) { - qdf_print("%s: Failed to allocate memory for mem map table\n", - __func__); - return 1; - } - mem_info = mem_map_table; - for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) { - list_iter = pdev->rx_ring.hash_table[i]->listhead.next; - while (list_iter != &pdev->rx_ring.hash_table[i]->listhead) { - hash_entry = (struct htt_rx_hash_entry *)( - (char *)list_iter - - pdev->rx_ring.listnode_offset); - if (hash_entry->netbuf) { - qdf_update_mem_map_table(pdev->osdev, - mem_info, - QDF_NBUF_CB_PADDR(hash_entry->netbuf), - HTT_RX_BUF_SIZE); - mem_info++; - num_alloc++; - } - list_iter = list_iter->next; - } - } - cds_smmu_map_unmap(true, num_alloc, mem_map_table); - qdf_mem_free(mem_map_table); - - return 0; -} #endif /* IPA_OFFLOAD */ /** diff --git a/core/dp/htt/htt_tx.c b/core/dp/htt/htt_tx.c index b536f52023..b054d0c90a 100644 --- a/core/dp/htt/htt_tx.c +++ b/core/dp/htt/htt_tx.c @@ -1140,9 +1140,8 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev, ring_vaddr++; if (qdf_mem_smmu_s1_enabled(pdev->osdev)) { - qdf_update_mem_map_table(pdev->osdev, mem_info, - shared_tx_buffer->mem_info.iova, - uc_tx_buf_sz); + *mem_info = pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[ + tx_buffer_count]->mem_info; mem_info++; } } @@ -1192,19 +1191,40 @@ free_mem_map_table: static void htt_tx_buf_pool_free(struct htt_pdev_t *pdev) { uint16_t idx; + qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL; + uint32_t num_unmapped = 0; + + if (qdf_mem_smmu_s1_enabled(pdev->osdev)) { + mem_map_table = qdf_mem_map_table_alloc( + pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt); + if (!mem_map_table) { + qdf_print("%s: Failed to allocate memory for mem map table\n", + __func__); + return; + } + mem_info = mem_map_table; + } for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) { if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) { - if (qdf_mem_smmu_s1_enabled(pdev->osdev)) - cds_smmu_map_unmap(false, 1, - &pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[ - idx]->mem_info); + if (qdf_mem_smmu_s1_enabled(pdev->osdev)) { + *mem_info = pdev->ipa_uc_tx_rsc. + tx_buf_pool_strg[idx]->mem_info; + mem_info++; + num_unmapped++; + } qdf_mem_shared_mem_free(pdev->osdev, pdev->ipa_uc_tx_rsc. tx_buf_pool_strg[idx]); pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] = NULL; } } + + if (qdf_mem_smmu_s1_enabled(pdev->osdev)) { + if (num_unmapped) + cds_smmu_map_unmap(false, num_unmapped, mem_map_table); + qdf_mem_free(mem_map_table); + } } #else static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev, @@ -1276,9 +1296,8 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev, ring_vaddr++; if (qdf_mem_smmu_s1_enabled(pdev->osdev)) { - qdf_update_mem_map_table(pdev->osdev, mem_info, - shared_tx_buffer->mem_info.iova, - uc_tx_buf_sz); + *mem_info = pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[ + tx_buffer_count]->mem_info; mem_info++; } } @@ -1320,19 +1339,40 @@ free_mem_map_table: static void htt_tx_buf_pool_free(struct htt_pdev_t *pdev) { uint16_t idx; + qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL; + uint32_t num_unmapped = 0; + + if (qdf_mem_smmu_s1_enabled(pdev->osdev)) { + mem_map_table = qdf_mem_map_table_alloc( + pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt); + if (!mem_map_table) { + qdf_print("%s: Failed to allocate memory for mem map table\n", + __func__); + return; + } + mem_info = mem_map_table; + } for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) { if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) { - if (qdf_mem_smmu_s1_enabled(pdev->osdev)) - cds_smmu_map_unmap(false, 1, - &pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[ - idx]->mem_info); + if (qdf_mem_smmu_s1_enabled(pdev->osdev)) { + *mem_info = pdev->ipa_uc_tx_rsc. + tx_buf_pool_strg[idx]->mem_info; + mem_info++; + num_unmapped++; + } qdf_mem_shared_mem_free(pdev->osdev, pdev->ipa_uc_tx_rsc. tx_buf_pool_strg[idx]); pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] = NULL; } } + + if (qdf_mem_smmu_s1_enabled(pdev->osdev)) { + if (num_unmapped) + cds_smmu_map_unmap(false, num_unmapped, mem_map_table); + qdf_mem_free(mem_map_table); + } } #endif diff --git a/core/dp/htt/htt_types.h b/core/dp/htt/htt_types.h index c86570cb73..90ea91444f 100644 --- a/core/dp/htt/htt_types.h +++ b/core/dp/htt/htt_types.h @@ -410,7 +410,7 @@ struct htt_pdev_t { struct htt_ipa_uc_tx_resource_t ipa_uc_tx_rsc; struct htt_ipa_uc_rx_resource_t ipa_uc_rx_rsc; - int uc_map_reqd; + int is_ipa_uc_enabled; struct htt_tx_credit_t htt_tx_credit; |