qcacld-3.0: Fix QCA_LL_PDEV_TX_FLOW_CONTROL invalid memory access

Add QCA_LL_PDEV_TX_FLOW_CONTROL for both
QCA_LL_LEGACY_TX_FLOW_CONTROL and QCA_LL_TX_FLOW_CONTROL_V2
disabled platform, avoid frame drop in driver which leads to bad TCP
TX throughput. Change NUM_TX_QUEUES to 5 for this case to avoid invalid
memory access in wlan_hdd_netif_queue_control().

Change-Id: Ifa649e31a41d1bf89eadc8cc7e9520f0e27b9fe4
CRs-Fixed: 2466996
This commit is contained in:
hangtian 2019-06-07 10:39:38 +08:00 committed by nshrivas
parent 67888e2e8d
commit b9c9136eaf
14 changed files with 201 additions and 89 deletions

1
Kbuild
View File

@ -2087,6 +2087,7 @@ cppflags-y += -DQCA_SUPPORT_TXRX_LOCAL_PEER_ID
cppflags-$(CONFIG_WLAN_TX_FLOW_CONTROL_V2) += -DQCA_LL_TX_FLOW_CONTROL_V2
cppflags-$(CONFIG_WLAN_TX_FLOW_CONTROL_V2) += -DQCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
cppflags-$(CONFIG_WLAN_TX_FLOW_CONTROL_LEGACY) += -DQCA_LL_LEGACY_TX_FLOW_CONTROL
cppflags-$(CONFIG_WLAN_PDEV_TX_FLOW_CONTROL) += -DQCA_LL_PDEV_TX_FLOW_CONTROL
ifeq ($(BUILD_DEBUG_VERSION), y)
cppflags-y += -DWLAN_DEBUG

View File

@ -255,6 +255,10 @@ endif
ifeq ($(CONFIG_ARCH_QCS405), y)
CONFIG_WLAN_TX_FLOW_CONTROL_V2 := n
# Flag to improve TCP TX throughput for both
# CONFIG_WLAN_TX_FLOW_CONTROL_LEGACY and CONFIG_WLAN_TX_FLOW_CONTROL_V2
# disabled platform, avoid frame drop in driver
CONFIG_WLAN_PDEV_TX_FLOW_CONTROL := y
endif
# Flag to enable LFR Subnet Detection

View File

@ -52,7 +52,7 @@ CONFIG_HIF_SNOC:= y
CONFIG_WLAN_FASTPATH := y
CONFIG_FEATURE_TSO := y
CONFIG_WLAN_NAPI := y
CONFIG_WLAN_TX_FLOW_CONTROL_V2 := y
CONFIG_WLAN_TX_FLOW_CONTROL_V2 := n
CONFIG_ATH_11AC_TXCOMPACT := y
CONFIG_TX_CREDIT_RECLAIM_SUPPORT := n
CONFIG_CHECKSUM_OFFLOAD := y
@ -64,6 +64,7 @@ CONFIG_DEBUG_RX_RING_BUFFER := n
CONFIG_RX_PERFORMANCE := y
CONFIG_QCS403_MEM_OPTIMIZE := y
CONFIG_TGT_NUM_MSDU_DESC := 900
CONFIG_WLAN_PDEV_TX_FLOW_CONTROL := y
ifeq ($(CONFIG_INET_LRO), y)
CONFIG_WLAN_LRO := y

View File

@ -84,8 +84,10 @@ struct cds_config_info {
uint8_t reorder_offload;
uint8_t uc_offload_enabled;
bool enable_rxthread;
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
uint32_t tx_flow_stop_queue_th;
uint32_t tx_flow_start_queue_offset;
#endif
uint8_t enable_dp_rx_threads;
#ifdef WLAN_FEATURE_LPSS
bool is_lpass_enabled;

View File

@ -312,6 +312,7 @@ cds_cfg_update_ac_specs_params(struct txrx_pdev_cfg_param_t *olcfg,
}
}
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
static inline void
cds_cdp_set_flow_control_params(struct wlan_objmgr_psoc *psoc,
struct txrx_pdev_cfg_param_t *cdp_cfg)
@ -321,6 +322,12 @@ cds_cdp_set_flow_control_params(struct wlan_objmgr_psoc *psoc,
cdp_cfg->tx_flow_start_queue_offset =
cfg_get(psoc, CFG_DP_TX_FLOW_START_QUEUE_OFFSET);
}
#else
static inline void
cds_cdp_set_flow_control_params(struct wlan_objmgr_psoc *psoc,
struct txrx_pdev_cfg_param_t *cdp_cfg)
{}
#endif
/**
* cds_cdp_cfg_attach() - attach data path config module

View File

@ -92,8 +92,10 @@ struct txrx_pdev_cfg_t {
bool ip_tcp_udp_checksum_offload;
bool enable_rxthread;
bool ce_classify_enabled;
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
uint32_t tx_flow_stop_queue_th;
uint32_t tx_flow_start_queue_offset;
#endif
bool flow_steering_enabled;
/*
* To track if credit reporting through
@ -126,8 +128,16 @@ struct txrx_pdev_cfg_t {
*
* Return: none
*/
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
void ol_tx_set_flow_control_parameters(struct cdp_cfg *cfg_ctx,
struct txrx_pdev_cfg_param_t *cfg_param);
#else
static inline
void ol_tx_set_flow_control_parameters(struct cdp_cfg *cfg_ctx,
struct txrx_pdev_cfg_param_t *cfg_param)
{
}
#endif
/**
* ol_pdev_cfg_attach - setup configuration parameters
@ -480,9 +490,11 @@ int ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(struct cdp_cfg *cfg_pdev)
}
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
int ol_cfg_get_tx_flow_stop_queue_th(struct cdp_cfg *cfg_pdev);
int ol_cfg_get_tx_flow_start_queue_offset(struct cdp_cfg *cfg_pdev);
#endif
bool ol_cfg_is_ce_classify_enabled(struct cdp_cfg *cfg_pdev);

View File

@ -23,6 +23,7 @@
unsigned int vow_config;
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
/**
* ol_tx_set_flow_control_parameters() - set flow control parameters
* @cfg_ctx: cfg context
@ -40,6 +41,7 @@ void ol_tx_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
cfg_ctx->tx_flow_stop_queue_th =
cfg_param->tx_flow_stop_queue_th;
}
#endif
#ifdef CONFIG_HL_SUPPORT
@ -407,6 +409,7 @@ int ol_cfg_is_rx_thread_enabled(struct cdp_cfg *cfg_pdev)
return cfg->enable_rxthread;
}
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
/**
* ol_cfg_get_tx_flow_stop_queue_th() - return stop queue threshold
* @pdev : handle to the physical device
@ -432,7 +435,7 @@ int ol_cfg_get_tx_flow_start_queue_offset(struct cdp_cfg *cfg_pdev)
return cfg->tx_flow_start_queue_offset;
}
#endif
#ifdef IPA_OFFLOAD
unsigned int ol_cfg_ipa_uc_offload_enabled(struct cdp_cfg *cfg_pdev)

View File

@ -129,7 +129,99 @@ ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
#endif
#ifndef QCA_LL_TX_FLOW_CONTROL_V2
#ifdef QCA_LL_PDEV_TX_FLOW_CONTROL
/**
* ol_tx_do_pdev_flow_control_pause - pause queues when stop_th reached.
* @pdev: pdev handle
*
* Return: void
*/
static void ol_tx_do_pdev_flow_control_pause(struct ol_txrx_pdev_t *pdev)
{
struct ol_txrx_vdev_t *vdev;
if (qdf_unlikely(pdev->tx_desc.num_free <
pdev->tx_desc.stop_th &&
pdev->tx_desc.num_free >=
pdev->tx_desc.stop_priority_th &&
pdev->tx_desc.status ==
FLOW_POOL_ACTIVE_UNPAUSED)) {
pdev->tx_desc.status = FLOW_POOL_NON_PRIO_PAUSED;
/* pause network NON PRIORITY queues */
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
pdev->pause_cb(vdev->vdev_id,
WLAN_STOP_NON_PRIORITY_QUEUE,
WLAN_DATA_FLOW_CONTROL);
}
} else if (qdf_unlikely((pdev->tx_desc.num_free <
pdev->tx_desc.stop_priority_th) &&
pdev->tx_desc.status ==
FLOW_POOL_NON_PRIO_PAUSED)) {
pdev->tx_desc.status = FLOW_POOL_ACTIVE_PAUSED;
/* pause priority queue */
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
pdev->pause_cb(vdev->vdev_id,
WLAN_NETIF_PRIORITY_QUEUE_OFF,
WLAN_DATA_FLOW_CONTROL_PRIORITY);
}
}
}
/**
* ol_tx_do_pdev_flow_control_unpause - unpause queues when start_th restored.
* @pdev: pdev handle
*
* Return: void
*/
static void ol_tx_do_pdev_flow_control_unpause(struct ol_txrx_pdev_t *pdev)
{
struct ol_txrx_vdev_t *vdev;
switch (pdev->tx_desc.status) {
case FLOW_POOL_ACTIVE_PAUSED:
if (pdev->tx_desc.num_free >
pdev->tx_desc.start_priority_th) {
/* unpause priority queue */
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
pdev->pause_cb(vdev->vdev_id,
WLAN_NETIF_PRIORITY_QUEUE_ON,
WLAN_DATA_FLOW_CONTROL_PRIORITY);
}
pdev->tx_desc.status = FLOW_POOL_NON_PRIO_PAUSED;
}
break;
case FLOW_POOL_NON_PRIO_PAUSED:
if (pdev->tx_desc.num_free > pdev->tx_desc.start_th) {
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
pdev->pause_cb(vdev->vdev_id,
WLAN_WAKE_NON_PRIORITY_QUEUE,
WLAN_DATA_FLOW_CONTROL);
}
pdev->tx_desc.status = FLOW_POOL_ACTIVE_UNPAUSED;
}
break;
case FLOW_POOL_INVALID:
if (pdev->tx_desc.num_free == pdev->tx_desc.pool_size)
ol_txrx_err("pool is INVALID State!!");
break;
case FLOW_POOL_ACTIVE_UNPAUSED:
break;
default:
ol_txrx_err("pool is INACTIVE State!!\n");
break;
};
}
#else
static inline void
ol_tx_do_pdev_flow_control_pause(struct ol_txrx_pdev_t *pdev)
{
}
static inline void
ol_tx_do_pdev_flow_control_unpause(struct ol_txrx_pdev_t *pdev)
{
}
#endif
/**
* ol_tx_desc_alloc() - allocate descriptor from freelist
* @pdev: pdev handle
@ -142,7 +234,6 @@ struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
struct ol_txrx_vdev_t *vdev)
{
struct ol_tx_desc_t *tx_desc = NULL;
struct ol_txrx_vdev_t *vd;
qdf_spin_lock_bh(&pdev->tx_mutex);
if (pdev->tx_desc.freelist) {
@ -152,31 +243,7 @@ struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
return NULL;
}
ol_tx_desc_dup_detect_set(pdev, tx_desc);
if (qdf_unlikely(pdev->tx_desc.num_free <
pdev->tx_desc.stop_th &&
pdev->tx_desc.num_free >=
pdev->tx_desc.stop_priority_th &&
pdev->tx_desc.status ==
FLOW_POOL_ACTIVE_UNPAUSED)) {
pdev->tx_desc.status = FLOW_POOL_NON_PRIO_PAUSED;
/* pause network NON PRIORITY queues */
TAILQ_FOREACH(vd, &pdev->vdev_list, vdev_list_elem) {
pdev->pause_cb(vd->vdev_id,
WLAN_STOP_NON_PRIORITY_QUEUE,
WLAN_DATA_FLOW_CONTROL);
}
} else if (qdf_unlikely((pdev->tx_desc.num_free <
pdev->tx_desc.stop_priority_th) &&
pdev->tx_desc.status ==
FLOW_POOL_NON_PRIO_PAUSED)) {
pdev->tx_desc.status = FLOW_POOL_ACTIVE_PAUSED;
/* pause priority queue */
TAILQ_FOREACH(vd, &pdev->vdev_list, vdev_list_elem) {
pdev->pause_cb(vd->vdev_id,
WLAN_NETIF_PRIORITY_QUEUE_OFF,
WLAN_DATA_FLOW_CONTROL_PRIORITY);
}
}
ol_tx_do_pdev_flow_control_pause(pdev);
ol_tx_desc_sanity_checks(pdev, tx_desc);
ol_tx_desc_compute_delay(tx_desc);
ol_tx_desc_vdev_update(tx_desc, vdev);
@ -466,47 +533,13 @@ static void ol_tx_desc_free_common(struct ol_txrx_pdev_t *pdev,
*/
void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
{
struct ol_txrx_vdev_t *vdev;
qdf_spin_lock_bh(&pdev->tx_mutex);
ol_tx_desc_free_common(pdev, tx_desc);
ol_tx_put_desc_global_pool(pdev, tx_desc);
ol_tx_desc_vdev_rm(tx_desc);
switch (pdev->tx_desc.status) {
case FLOW_POOL_ACTIVE_PAUSED:
if (pdev->tx_desc.num_free > pdev->tx_desc.start_priority_th) {
/* unpause priority queue */
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
pdev->pause_cb(vdev->vdev_id,
WLAN_NETIF_PRIORITY_QUEUE_ON,
WLAN_DATA_FLOW_CONTROL_PRIORITY);
}
pdev->tx_desc.status = FLOW_POOL_NON_PRIO_PAUSED;
}
break;
case FLOW_POOL_NON_PRIO_PAUSED:
if (pdev->tx_desc.num_free > pdev->tx_desc.start_th) {
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
pdev->pause_cb(vdev->vdev_id,
WLAN_WAKE_NON_PRIORITY_QUEUE,
WLAN_DATA_FLOW_CONTROL);
}
pdev->tx_desc.status = FLOW_POOL_ACTIVE_UNPAUSED;
}
break;
case FLOW_POOL_INVALID:
if (pdev->tx_desc.num_free == pdev->tx_desc.pool_size)
ol_txrx_err("pool is INVALID State!!");
break;
case FLOW_POOL_ACTIVE_UNPAUSED:
break;
default:
ol_txrx_err("pool is INACTIVE State!!\n");
break;
};
ol_tx_do_pdev_flow_control_unpause(pdev);
qdf_spin_unlock_bh(&pdev->tx_mutex);
}

View File

@ -850,6 +850,41 @@ void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *ol_sc) { }
static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle) { }
#endif
#ifdef QCA_LL_PDEV_TX_FLOW_CONTROL
/**
* ol_txrx_pdev_set_threshold() - set pdev pool stop/start threshold
* @pdev: txrx pdev
*
* Return: void
*/
static void ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t *pdev)
{
uint32_t stop_threshold;
uint32_t start_threshold;
uint16_t desc_pool_size = pdev->tx_desc.pool_size;
stop_threshold = ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
start_threshold = stop_threshold +
ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
pdev->tx_desc.start_th = (start_threshold * desc_pool_size) / 100;
pdev->tx_desc.stop_th = (stop_threshold * desc_pool_size) / 100;
pdev->tx_desc.stop_priority_th =
(TX_PRIORITY_TH * pdev->tx_desc.stop_th) / 100;
if (pdev->tx_desc.stop_priority_th >= MAX_TSO_SEGMENT_DESC)
pdev->tx_desc.stop_priority_th -= MAX_TSO_SEGMENT_DESC;
pdev->tx_desc.start_priority_th =
(TX_PRIORITY_TH * pdev->tx_desc.start_th) / 100;
if (pdev->tx_desc.start_priority_th >= MAX_TSO_SEGMENT_DESC)
pdev->tx_desc.start_priority_th -= MAX_TSO_SEGMENT_DESC;
pdev->tx_desc.status = FLOW_POOL_ACTIVE_UNPAUSED;
}
#else
static inline void ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t *pdev)
{
}
#endif
/**
* ol_txrx_pdev_post_attach() - attach txrx pdev
* @pdev: txrx pdev
@ -870,8 +905,6 @@ ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
union ol_tx_desc_list_elem_t *c_element;
unsigned int sig_bit;
uint16_t desc_per_page;
uint32_t stop_threshold;
uint32_t start_threshold;
if (!osc) {
ret = -EINVAL;
@ -1015,21 +1048,7 @@ ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
(uint32_t *)pdev->tx_desc.freelist,
(uint32_t *)(pdev->tx_desc.freelist + desc_pool_size));
stop_threshold = ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
start_threshold = stop_threshold +
ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
pdev->tx_desc.start_th = (start_threshold * desc_pool_size) / 100;
pdev->tx_desc.stop_th = (stop_threshold * desc_pool_size) / 100;
pdev->tx_desc.stop_priority_th =
(TX_PRIORITY_TH * pdev->tx_desc.stop_th) / 100;
if (pdev->tx_desc.stop_priority_th >= MAX_TSO_SEGMENT_DESC)
pdev->tx_desc.stop_priority_th -= MAX_TSO_SEGMENT_DESC;
pdev->tx_desc.start_priority_th =
(TX_PRIORITY_TH * pdev->tx_desc.start_th) / 100;
if (pdev->tx_desc.start_priority_th >= MAX_TSO_SEGMENT_DESC)
pdev->tx_desc.start_priority_th -= MAX_TSO_SEGMENT_DESC;
pdev->tx_desc.status = FLOW_POOL_ACTIVE_UNPAUSED;
ol_txrx_pdev_set_threshold(pdev);
/* check what format of frames are expected to be delivered by the OS */
pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);

View File

@ -456,6 +456,7 @@ struct ol_tx_group_credit_stats_t {
};
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
/**
* enum flow_pool_status - flow pool status
* @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
@ -527,7 +528,7 @@ struct ol_tx_flow_pool_t {
uint16_t stop_priority_th;
uint16_t start_priority_th;
};
#endif
#define OL_TXRX_INVALID_PEER_UNMAP_COUNT 0xF
/*
@ -774,11 +775,13 @@ struct ol_txrx_pdev_t {
#ifdef DESC_DUP_DETECT_DEBUG
unsigned long *free_list_bitmap;
#endif
#ifdef QCA_LL_PDEV_TX_FLOW_CONTROL
uint16_t stop_th;
uint16_t start_th;
uint16_t stop_priority_th;
uint16_t start_priority_th;
enum flow_pool_status status;
#endif
} tx_desc;
uint8_t is_mgmt_over_wmi_enabled;

View File

@ -137,7 +137,9 @@ struct hdd_apf_context {
#endif /* FEATURE_WLAN_APF */
/** Number of Tx Queues */
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_HL_NETDEV_FLOW_CONTROL)
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || \
defined(QCA_HL_NETDEV_FLOW_CONTROL) || \
defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
#define NUM_TX_QUEUES 5
#else
#define NUM_TX_QUEUES 4

View File

@ -9789,6 +9789,7 @@ int hdd_start_ap_adapter(struct hdd_adapter *adapter)
return 0;
}
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
/**
* hdd_txrx_populate_cds_config() - Populate txrx cds configuration
* @cds_cfg: CDS Configuration
@ -9807,6 +9808,13 @@ static inline void hdd_txrx_populate_cds_config(struct cds_config_info
/* configuration for DP RX Threads */
cds_cfg->enable_dp_rx_threads = hdd_ctx->enable_dp_rx_threads;
}
#else
static inline void hdd_txrx_populate_cds_config(struct cds_config_info
*cds_cfg,
struct hdd_context *hdd_ctx)
{
}
#endif
/**
* hdd_update_cds_config() - API to update cds configuration parameters
@ -14306,6 +14314,25 @@ void hdd_update_dp_config_rx_softirq_limits(struct hdd_context *hdd_ctx,
}
#endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
static void
hdd_update_dp_config_queue_threshold(struct hdd_context *hdd_ctx,
struct cdp_config_params *params)
{
params->tx_flow_stop_queue_threshold =
cfg_get(hdd_ctx->psoc, CFG_DP_TX_FLOW_STOP_QUEUE_TH);
params->tx_flow_start_queue_offset =
cfg_get(hdd_ctx->psoc,
CFG_DP_TX_FLOW_START_QUEUE_OFFSET);
}
#else
static inline void
hdd_update_dp_config_queue_threshold(struct hdd_context *hdd_ctx,
struct cdp_config_params *params)
{
}
#endif
/**
* hdd_update_dp_config() - Propagate config parameters to Lithium
* datapath
@ -14322,11 +14349,7 @@ static int hdd_update_dp_config(struct hdd_context *hdd_ctx)
soc = cds_get_context(QDF_MODULE_ID_SOC);
params.tso_enable = cfg_get(hdd_ctx->psoc, CFG_DP_TSO);
params.lro_enable = cfg_get(hdd_ctx->psoc, CFG_DP_LRO);
params.tx_flow_stop_queue_threshold =
cfg_get(hdd_ctx->psoc, CFG_DP_TX_FLOW_STOP_QUEUE_TH);
params.tx_flow_start_queue_offset =
cfg_get(hdd_ctx->psoc,
CFG_DP_TX_FLOW_START_QUEUE_OFFSET);
hdd_update_dp_config_queue_threshold(hdd_ctx, &params);
params.flow_steering_enable =
cfg_get(hdd_ctx->psoc, CFG_DP_FLOW_STEERING_ENABLED);
params.napi_enable = hdd_ctx->napi_enable;

View File

@ -64,7 +64,7 @@
#include "target_type.h"
#include "wlan_hdd_object_manager.h"
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
/*
* Mapping Linux AC interpretation to SME AC.
* Host has 5 tx queues, 4 flow-controlled queues for regular traffic and

View File

@ -1732,7 +1732,9 @@ static uint16_t __hdd_get_queue_index(uint16_t up)
return hdd_linux_up_to_ac_map[up];
}
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_HL_NETDEV_FLOW_CONTROL)
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || \
defined(QCA_HL_NETDEV_FLOW_CONTROL) || \
defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
/**
* hdd_get_queue_index() - get queue index
* @up: user priority