summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjitiphil <jitiphil@codeaurora.org>2018-06-07 22:49:24 +0530
committerRoger Wang <wangroger@google.com>2019-01-29 07:05:21 +0000
commite95d77b89dcbb17d1f9cfa2e1cbfc8cc3f55fdce (patch)
tree47c8f9074e77c8bd9c942172ee45fadca7676165
parent9a43b2f0653fa909fd28d684c02653b0d1fc490d (diff)
downloadqcacld-e95d77b89dcbb17d1f9cfa2e1cbfc8cc3f55fdce.tar.gz
qcacld-3.0: Implement descriptor pool for fw stats
The kernel address is used as cookie to keep track of stats request. This address can be disclosed to target leading to a security vulnerability. Implement a FW stats descriptor pool, and use a descriptor ID to keep track of stats requests, instead of the kernel address, to prevent kernel address leak. Bug: 112277911 Test: Regression Change-Id: Ib49150da899c0b9314f614868a90867f4aa92d3d CRs-Fixed: 2246110 Signed-off-by: Ecco Park <eccopark@google.com>
-rw-r--r--core/dp/htt/htt_h2t.c8
-rw-r--r--core/dp/htt/htt_t2h.c3
-rw-r--r--core/dp/ol/inc/ol_htt_api.h4
-rw-r--r--core/dp/ol/inc/ol_txrx_htt_api.h2
-rw-r--r--core/dp/txrx/ol_txrx.c206
-rw-r--r--core/dp/txrx/ol_txrx.h12
-rw-r--r--core/dp/txrx/ol_txrx_types.h17
7 files changed, 233 insertions, 19 deletions
diff --git a/core/dp/htt/htt_h2t.c b/core/dp/htt/htt_h2t.c
index ddbe9aac5d..1b05904b1f 100644
--- a/core/dp/htt/htt_h2t.c
+++ b/core/dp/htt/htt_h2t.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -767,7 +767,7 @@ int
htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
uint32_t stats_type_upload_mask,
uint32_t stats_type_reset_mask,
- uint8_t cfg_stat_type, uint32_t cfg_val, uint64_t cookie)
+ uint8_t cfg_stat_type, uint32_t cfg_val, uint8_t cookie)
{
struct htt_htc_pkt *pkt;
qdf_nbuf_t msg;
@@ -830,11 +830,11 @@ htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
/* cookie LSBs */
msg_word++;
- *msg_word = cookie & 0xffffffff;
+ *msg_word = cookie;
/* cookie MSBs */
msg_word++;
- *msg_word = cookie >> 32;
+ *msg_word = 0;
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
diff --git a/core/dp/htt/htt_t2h.c b/core/dp/htt/htt_t2h.c
index a0c4b44014..d55f19bab4 100644
--- a/core/dp/htt/htt_t2h.c
+++ b/core/dp/htt/htt_t2h.c
@@ -394,11 +394,10 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
}
case HTT_T2H_MSG_TYPE_STATS_CONF:
{
- uint64_t cookie;
+ uint8_t cookie;
uint8_t *stats_info_list;
cookie = *(msg_word + 1);
- cookie |= ((uint64_t) (*(msg_word + 2))) << 32;
stats_info_list = (uint8_t *) (msg_word + 3);
htc_pm_runtime_put(pdev->htc_pdev);
diff --git a/core/dp/ol/inc/ol_htt_api.h b/core/dp/ol/inc/ol_htt_api.h
index e597dc748b..daa6be3bc7 100644
--- a/core/dp/ol/inc/ol_htt_api.h
+++ b/core/dp/ol/inc/ol_htt_api.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -164,7 +164,7 @@ htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
uint32_t stats_type_upload_mask,
uint32_t stats_type_reset_mask,
uint8_t cfg_stats_type,
- uint32_t cfg_val, uint64_t cookie);
+ uint32_t cfg_val, uint8_t cookie);
/**
* @brief Get the fields from HTT T2H stats upload message's stats info header
diff --git a/core/dp/ol/inc/ol_txrx_htt_api.h b/core/dp/ol/inc/ol_txrx_htt_api.h
index 277569bbe5..78e4387b80 100644
--- a/core/dp/ol/inc/ol_txrx_htt_api.h
+++ b/core/dp/ol/inc/ol_txrx_htt_api.h
@@ -604,7 +604,7 @@ ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
*/
void
ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
- uint64_t cookie, uint8_t *stats_info_list);
+ uint8_t cookie, uint8_t *stats_info_list);
/**
* @brief Process a tx inspect message sent by the target.
diff --git a/core/dp/txrx/ol_txrx.c b/core/dp/txrx/ol_txrx.c
index d02b180630..48bdfb83dd 100644
--- a/core/dp/txrx/ol_txrx.c
+++ b/core/dp/txrx/ol_txrx.c
@@ -1468,6 +1468,7 @@ ol_txrx_pdev_attach(ol_pdev_handle ctrl_pdev,
TXRX_STATS_INIT(pdev);
ol_txrx_tso_stats_init(pdev);
+ ol_txrx_fw_stats_desc_pool_init(pdev, FW_STATS_DESC_POOL_SIZE);
TAILQ_INIT(&pdev->vdev_list);
TAILQ_INIT(&pdev->roam_stale_peer_list);
@@ -1533,6 +1534,7 @@ fail2:
fail1:
ol_txrx_tso_stats_deinit(pdev);
+ ol_txrx_fw_stats_desc_pool_deinit(pdev);
qdf_mem_free(pdev);
fail0:
@@ -2316,6 +2318,7 @@ void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev)
htt_pdev_free(pdev->htt_pdev);
ol_txrx_peer_find_detach(pdev);
ol_txrx_tso_stats_deinit(pdev);
+ ol_txrx_fw_stats_desc_pool_deinit(pdev);
ol_txrx_pdev_txq_log_destroy(pdev);
ol_txrx_pdev_grp_stat_destroy(pdev);
@@ -4141,20 +4144,171 @@ void
ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
uint8_t cfg_stats_type, uint32_t cfg_val)
{
- uint64_t dummy_cookie = 0;
+ uint8_t dummy_cookie = 0;
htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
0 /* reset mask */,
cfg_stats_type, cfg_val, dummy_cookie);
}
+/**
+ * ol_txrx_fw_stats_desc_pool_init() - Initialize the fw stats descriptor pool
+ * @pdev: handle to ol txrx pdev
+ * @pool_size: Size of fw stats descriptor pool
+ *
+ * Return: 0 for success, error code on failure.
+ */
+int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
+ uint8_t pool_size)
+{
+ int i;
+
+ if (!pdev) {
+ ol_txrx_err("%s: pdev is NULL", __func__);
+ return -EINVAL;
+ }
+ pdev->ol_txrx_fw_stats_desc_pool.pool = qdf_mem_malloc(pool_size *
+ sizeof(struct ol_txrx_fw_stats_desc_elem_t));
+ if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
+ ol_txrx_err("%s: failed to allocate desc pool", __func__);
+ return -ENOMEM;
+ }
+ pdev->ol_txrx_fw_stats_desc_pool.freelist =
+ &pdev->ol_txrx_fw_stats_desc_pool.pool[0];
+ pdev->ol_txrx_fw_stats_desc_pool.pool_size = pool_size;
+
+ for (i = 0; i < (pool_size - 1); i++) {
+ pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
+ pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
+ pdev->ol_txrx_fw_stats_desc_pool.pool[i].next =
+ &pdev->ol_txrx_fw_stats_desc_pool.pool[i + 1];
+ }
+ pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
+ pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
+ pdev->ol_txrx_fw_stats_desc_pool.pool[i].next = NULL;
+ qdf_spinlock_create(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+ qdf_atomic_init(&pdev->ol_txrx_fw_stats_desc_pool.initialized);
+ qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 1);
+ return 0;
+}
+
+/**
+ * ol_txrx_fw_stats_desc_pool_deinit() - Deinitialize the
+ * fw stats descriptor pool
+ * @pdev: handle to ol txrx pdev
+ *
+ * Return: None
+ */
+void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev)
+{
+ struct ol_txrx_fw_stats_desc_elem_t *desc;
+ uint8_t i;
+
+ if (!pdev) {
+ ol_txrx_err("%s: pdev is NULL", __func__);
+ return;
+ }
+ if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
+ ol_txrx_err("%s: Pool is not initialized", __func__);
+ return;
+ }
+ if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
+ ol_txrx_err("%s: Pool is not allocated", __func__);
+ return;
+ }
+
+ qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+ qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 0);
+ for (i = 0; i < pdev->ol_txrx_fw_stats_desc_pool.pool_size; i++) {
+ desc = &pdev->ol_txrx_fw_stats_desc_pool.pool[i];
+ if (desc && desc->desc.req)
+ qdf_mem_free(desc->desc.req);
+ }
+ qdf_mem_free(pdev->ol_txrx_fw_stats_desc_pool.pool);
+ pdev->ol_txrx_fw_stats_desc_pool.pool = NULL;
+
+ pdev->ol_txrx_fw_stats_desc_pool.freelist = NULL;
+ pdev->ol_txrx_fw_stats_desc_pool.pool_size = 0;
+ qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+}
+
+/**
+ * ol_txrx_fw_stats_desc_alloc() - Get fw stats descriptor from fw stats
+ * free descriptor pool
+ * @pdev: handle to ol txrx pdev
+ *
+ * Return: pointer to fw stats descriptor, NULL on failure
+ */
+struct ol_txrx_fw_stats_desc_t
+ *ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev)
+{
+ struct ol_txrx_fw_stats_desc_t *desc = NULL;
+
+ qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+ if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
+ qdf_spin_unlock_bh(&pdev->
+ ol_txrx_fw_stats_desc_pool.pool_lock);
+ ol_txrx_err("%s: Pool deinitialized", __func__);
+ return NULL;
+ }
+ if (pdev->ol_txrx_fw_stats_desc_pool.freelist) {
+ desc = &pdev->ol_txrx_fw_stats_desc_pool.freelist->desc;
+ pdev->ol_txrx_fw_stats_desc_pool.freelist =
+ pdev->ol_txrx_fw_stats_desc_pool.freelist->next;
+ }
+ qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+
+ if (desc)
+ ol_txrx_dbg("%s: desc_id %d allocated",
+ __func__, desc->desc_id);
+ else
+ ol_txrx_err("%s: fw stats descriptors are exhausted", __func__);
+
+ return desc;
+}
+
+/**
+ * ol_txrx_fw_stats_desc_get_req() - Put fw stats descriptor
+ * back into free pool
+ * @pdev: handle to ol txrx pdev
+ * @fw_stats_desc: fw_stats_desc_get descriptor
+ *
+ * Return: pointer to request
+ */
+struct ol_txrx_stats_req_internal
+ *ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
+ unsigned char desc_id)
+{
+ struct ol_txrx_fw_stats_desc_elem_t *desc_elem;
+ struct ol_txrx_stats_req_internal *req;
+
+ qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+ if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
+ qdf_spin_unlock_bh(&pdev->
+ ol_txrx_fw_stats_desc_pool.pool_lock);
+ ol_txrx_err("%s: Desc ID %u Pool deinitialized",
+ __func__, desc_id);
+ return NULL;
+ }
+ desc_elem = &pdev->ol_txrx_fw_stats_desc_pool.pool[desc_id];
+ req = desc_elem->desc.req;
+ desc_elem->desc.req = NULL;
+ desc_elem->next =
+ pdev->ol_txrx_fw_stats_desc_pool.freelist;
+ pdev->ol_txrx_fw_stats_desc_pool.freelist = desc_elem;
+ qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+ return req;
+}
+
A_STATUS
ol_txrx_fw_stats_get(ol_txrx_vdev_handle vdev, struct ol_txrx_stats_req *req,
bool per_vdev, bool response_expected)
{
struct ol_txrx_pdev_t *pdev = vdev->pdev;
- uint64_t cookie;
+ uint8_t cookie = FW_STATS_DESC_POOL_SIZE;
struct ol_txrx_stats_req_internal *non_volatile_req;
+ struct ol_txrx_fw_stats_desc_t *desc = NULL;
+ struct ol_txrx_fw_stats_desc_elem_t *elem = NULL;
if (!pdev ||
req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
@@ -4174,11 +4328,16 @@ ol_txrx_fw_stats_get(ol_txrx_vdev_handle vdev, struct ol_txrx_stats_req *req,
non_volatile_req->base = *req;
non_volatile_req->serviced = 0;
non_volatile_req->offset = 0;
-
- /* use the non-volatile request object's address as the cookie */
- cookie = ol_txrx_stats_ptr_to_u64(non_volatile_req);
-
if (response_expected) {
+ desc = ol_txrx_fw_stats_desc_alloc(pdev);
+ if (!desc) {
+ qdf_mem_free(non_volatile_req);
+ return A_ERROR;
+ }
+
+ /* use the desc id as the cookie */
+ cookie = desc->desc_id;
+ desc->req = non_volatile_req;
qdf_spin_lock_bh(&pdev->req_list_spinlock);
TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
pdev->req_list_depth++;
@@ -4192,9 +4351,28 @@ ol_txrx_fw_stats_get(ol_txrx_vdev_handle vdev, struct ol_txrx_stats_req *req,
cookie)) {
if (response_expected) {
qdf_spin_lock_bh(&pdev->req_list_spinlock);
- TAILQ_REMOVE(&pdev->req_list, non_volatile_req, req_list_elem);
+ TAILQ_REMOVE(&pdev->req_list, non_volatile_req,
+ req_list_elem);
pdev->req_list_depth--;
qdf_spin_unlock_bh(&pdev->req_list_spinlock);
+ if (desc) {
+ qdf_spin_lock_bh(&pdev->
+ ol_txrx_fw_stats_desc_pool.
+ pool_lock);
+ desc->req = NULL;
+ elem = container_of(desc,
+ struct
+ ol_txrx_fw_stats_desc_elem_t,
+ desc);
+ elem->next =
+ pdev->ol_txrx_fw_stats_desc_pool.
+ freelist;
+ pdev->ol_txrx_fw_stats_desc_pool.
+ freelist = elem;
+ qdf_spin_unlock_bh(&pdev->
+ ol_txrx_fw_stats_desc_pool.
+ pool_lock);
+ }
}
qdf_mem_free(non_volatile_req);
@@ -4209,7 +4387,7 @@ ol_txrx_fw_stats_get(ol_txrx_vdev_handle vdev, struct ol_txrx_stats_req *req,
void
ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
- uint64_t cookie, uint8_t *stats_info_list)
+ uint8_t cookie, uint8_t *stats_info_list)
{
enum htt_dbg_stats_type type;
enum htt_dbg_stats_status status;
@@ -4219,8 +4397,16 @@ ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
int more = 0;
int found = 0;
- req = ol_txrx_u64_to_stats_ptr(cookie);
-
+ if (cookie >= FW_STATS_DESC_POOL_SIZE) {
+ ol_txrx_err("%s: Cookie is not valid", __func__);
+ return;
+ }
+ req = ol_txrx_fw_stats_desc_get_req(pdev, (uint8_t)cookie);
+ if (!req) {
+ ol_txrx_err("%s: Request not retrieved for cookie %u", __func__,
+ (uint8_t)cookie);
+ return;
+ }
qdf_spin_lock_bh(&pdev->req_list_spinlock);
TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
if (req == tmp) {
diff --git a/core/dp/txrx/ol_txrx.h b/core/dp/txrx/ol_txrx.h
index dd9a67e1d2..bde9ae2f93 100644
--- a/core/dp/txrx/ol_txrx.h
+++ b/core/dp/txrx/ol_txrx.h
@@ -78,6 +78,9 @@ ol_tx_desc_pool_size_hl(ol_pdev_handle ctrl_pdev);
#define OL_TX_DESC_POOL_SIZE_MAX_HL 5000
#endif
+#ifndef FW_STATS_DESC_POOL_SIZE
+#define FW_STATS_DESC_POOL_SIZE 10
+#endif
#ifdef CONFIG_PER_VDEV_TX_DESC_POOL
#define TXRX_HL_TX_FLOW_CTRL_VDEV_LOW_WATER_MARK 400
@@ -206,4 +209,13 @@ void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id);
void ol_txrx_peer_detach_force_delete(ol_txrx_peer_handle peer);
void peer_unmap_timer_handler(unsigned long data);
+int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
+ uint8_t pool_size);
+void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev);
+struct ol_txrx_fw_stats_desc_t
+ *ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t
+ *pdev);
+struct ol_txrx_stats_req_internal *ol_txrx_fw_stats_desc_get_req(struct
+ ol_txrx_pdev_t *pdev, uint8_t desc_id);
+
#endif /* _OL_TXRX__H_ */
diff --git a/core/dp/txrx/ol_txrx_types.h b/core/dp/txrx/ol_txrx_types.h
index df7877d958..a678bfc1d6 100644
--- a/core/dp/txrx/ol_txrx_types.h
+++ b/core/dp/txrx/ol_txrx_types.h
@@ -556,6 +556,15 @@ struct ol_txrx_stats_req_internal {
int offset;
};
+struct ol_txrx_fw_stats_desc_t {
+ struct ol_txrx_stats_req_internal *req;
+ unsigned char desc_id;
+};
+
+struct ol_txrx_fw_stats_desc_elem_t {
+ struct ol_txrx_fw_stats_desc_elem_t *next;
+ struct ol_txrx_fw_stats_desc_t desc;
+};
/*
* As depicted in the diagram below, the pdev contains an array of
@@ -668,6 +677,14 @@ struct ol_txrx_pdev_t {
qdf_atomic_t target_tx_credit;
qdf_atomic_t orig_target_tx_credit;
+ struct {
+ uint16_t pool_size;
+ struct ol_txrx_fw_stats_desc_elem_t *pool;
+ struct ol_txrx_fw_stats_desc_elem_t *freelist;
+ qdf_spinlock_t pool_lock;
+ qdf_atomic_t initialized;
+ } ol_txrx_fw_stats_desc_pool;
+
/* Peer mac address to staid mapping */
struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3];