Add 'qcom/opensource/wlan/qca-wifi-host-cmn/' from commit 'fec800ab539956671af604bdd6e1ee3b84eef491'
git-subtree-dir: qcom/opensource/wlan/qca-wifi-host-cmn git-subtree-mainline:5add812a59
git-subtree-split:fec800ab53
Change-Id: repo: https://git.codelinaro.org/clo/la/platform/vendor/qcom-opensource/wlan/qca-wifi-host-cmn tag: LA.VENDOR.14.3.0.r1-17300-lanai.QSSI15.0
This commit is contained in:
commit
af9c28c32a
1
qcom/opensource/wlan/qca-wifi-host-cmn/README.txt
Normal file
1
qcom/opensource/wlan/qca-wifi-host-cmn/README.txt
Normal file
@ -0,0 +1 @@
|
||||
This is CNSS WLAN Host Driver for products starting from iHelium
|
2
qcom/opensource/wlan/qca-wifi-host-cmn/VERSION.txt
Normal file
2
qcom/opensource/wlan/qca-wifi-host-cmn/VERSION.txt
Normal file
@ -0,0 +1,2 @@
|
||||
Current Component wlan-cmn.driver.lnx.1.0 version 5.1.1.17I
|
||||
Matches Component wlan-cld3.driver.lnx.1.1 version 5.1.0.22C
|
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021,2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: This file contains centralized definitions of converged configuration.
|
||||
*/
|
||||
|
||||
#ifndef __CFG_CONVERGED_H
|
||||
#define __CFG_CONVERGED_H
|
||||
|
||||
#include <cfg_scan.h>
|
||||
#include "cfg_mlme_score_params.h"
|
||||
#include "cfg_dp.h"
|
||||
#include "cfg_hif.h"
|
||||
#include <cfg_extscan.h>
|
||||
#include <include/cfg_cmn_mlme.h>
|
||||
#ifdef WLAN_SUPPORT_GREEN_AP
|
||||
#include "cfg_green_ap_params.h"
|
||||
#else
|
||||
#define CFG_GREEN_AP_ALL
|
||||
#endif
|
||||
#include <cfg_spectral.h>
|
||||
#ifdef DCS_INTERFERENCE_DETECTION
|
||||
#include "cfg_dcs.h"
|
||||
#else
|
||||
#define CFG_DCS_ALL
|
||||
#endif
|
||||
#ifdef WLAN_CFR_ENABLE
|
||||
#include "cfr_cfg.h"
|
||||
#else
|
||||
#define CFG_CFR_ALL
|
||||
#endif
|
||||
#ifdef FEATURE_CM_UTF_ENABLE
|
||||
#include <wlan_cm_utf.h>
|
||||
#else
|
||||
#define CFG_WLAN_CM_UTF_PARAM
|
||||
#endif
|
||||
#include <cfg_cp_stats.h>
|
||||
#include <cfg_mgmt_txrx.h>
|
||||
#include <cfg_ipa.h>
|
||||
|
||||
#define CFG_CONVERGED_ALL \
|
||||
CFG_SCAN_ALL \
|
||||
CFG_DP \
|
||||
CFG_EXTSCAN_ALL \
|
||||
CFG_GREEN_AP_ALL \
|
||||
CFG_SPECTRAL_ALL \
|
||||
CFG_HIF \
|
||||
CFG_DCS_ALL \
|
||||
CFG_CFR_ALL \
|
||||
CFG_MLME_SCORE_ALL \
|
||||
CFG_WLAN_CM_UTF_PARAM \
|
||||
CFG_CMN_MLME_ALL \
|
||||
CFG_MGMT_TXRX_ALL \
|
||||
CFG_IPA \
|
||||
CFG_CP_STATS_ALL
|
||||
|
||||
#endif /* __CFG_CONVERGED_H */
|
||||
|
164
qcom/opensource/wlan/qca-wifi-host-cmn/cfg/inc/cfg_define.h
Normal file
164
qcom/opensource/wlan/qca-wifi-host-cmn/cfg/inc/cfg_define.h
Normal file
@ -0,0 +1,164 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: APIs and macros for defining configuration.
|
||||
*/
|
||||
|
||||
#ifndef __CFG_DEFINE_H
|
||||
#define __CFG_DEFINE_H
|
||||
|
||||
enum cfg_fallback_behavior {
|
||||
CFG_VALUE_OR_CLAMP,
|
||||
CFG_VALUE_OR_DEFAULT,
|
||||
};
|
||||
|
||||
#define rm_parens(...) __VA_ARGS__
|
||||
#define __CFG(id, is_ini, mtype, args...) \
|
||||
__CFG_##is_ini##_##mtype(id, mtype, args)
|
||||
#define _CFG(id, args) __CFG(id, args)
|
||||
#define CFG(id) _CFG(__##id, rm_parens id)
|
||||
|
||||
#define __CFG_INI_INT(args...) __CFG_INI(args)
|
||||
#define __CFG_INI_UINT(args...) __CFG_INI(args)
|
||||
#define __CFG_INI_BOOL(args...) __CFG_INI(args)
|
||||
#define __CFG_INI_STRING(args...) __CFG_INI(args)
|
||||
#define __CFG_INI_MAC(args...) __CFG_INI(args)
|
||||
#define __CFG_INI_IPV4(args...) __CFG_INI(args)
|
||||
#define __CFG_INI_IPV6(args...) __CFG_INI(args)
|
||||
#define __CFG_INI(args...) (args)
|
||||
|
||||
#define __CFG_NON_INI_INT(args...) __CFG_NON_INI(args)
|
||||
#define __CFG_NON_INI_UINT(args...) __CFG_NON_INI(args)
|
||||
#define __CFG_NON_INI_BOOL(args...) __CFG_NON_INI(args)
|
||||
#define __CFG_NON_INI_STRING(args...) __CFG_NON_INI(args)
|
||||
#define __CFG_NON_INI_MAC(args...) __CFG_NON_INI(args)
|
||||
#define __CFG_NON_INI_IPV4(args...) __CFG_NON_INI(args)
|
||||
#define __CFG_NON_INI_IPV6(args...) __CFG_NON_INI(args)
|
||||
#define __CFG_NON_INI(args...)
|
||||
|
||||
/* configuration available in ini */
|
||||
#define CFG_INI_INT(name, min, max, def, fallback, desc) \
|
||||
(INI, INT, int32_t, name, min, max, fallback, desc, def)
|
||||
#define CFG_INI_UINT(name, min, max, def, fallback, desc) \
|
||||
(INI, UINT, uint32_t, name, min, max, fallback, desc, def)
|
||||
#define CFG_INI_BOOL(name, def, desc) \
|
||||
(INI, BOOL, bool, name, false, true, -1, desc, def)
|
||||
#define CFG_INI_STRING(name, min_len, max_len, def, desc) \
|
||||
(INI, STRING, char *, name, min_len, max_len, -1, desc, def)
|
||||
#define CFG_INI_MAC(name, def, desc) \
|
||||
(INI, MAC, struct qdf_mac_addr, name, -1, -1, -1, desc, def)
|
||||
#define CFG_INI_IPV4(name, def, desc) \
|
||||
(INI, IPV4, struct qdf_ipv4_addr, name, -1, -1, -1, desc, def)
|
||||
#define CFG_INI_IPV6(name, def, desc) \
|
||||
(INI, IPV6, struct qdf_ipv6_addr, name, -1, -1, -1, desc, def)
|
||||
|
||||
/* configuration *not* available in ini */
|
||||
#define CFG_INT(name, min, max, def, fallback, desc) \
|
||||
(NON_INI, INT, int32_t, name, min, max, fallback, desc, def)
|
||||
#define CFG_UINT(name, min, max, def, fallback, desc) \
|
||||
(NON_INI, UINT, uint32_t, name, min, max, fallback, desc, def)
|
||||
#define CFG_BOOL(name, def, desc) \
|
||||
(NON_INI, BOOL, bool, name, false, true, false, desc, def)
|
||||
#define CFG_STRING(name, min_len, max_len, def, desc) \
|
||||
(NON_INI, STRING, char *, name, min_len, max_len, -1, desc, def)
|
||||
#define CFG_MAC(name, def, desc) \
|
||||
(NON_INI, MAC, struct qdf_mac_addr, name, -1, -1, -1, desc, def)
|
||||
#define CFG_IPV4(name, def, desc) \
|
||||
(NON_INI, IPV4, struct qdf_ipv4_addr, name, -1, -1, -1, desc, def)
|
||||
#define CFG_IPV6(name, def, desc) \
|
||||
(NON_INI, IPV6, struct qdf_ipv6_addr, name, -1, -1, -1, desc, def)
|
||||
|
||||
/* utility macros/functions */
|
||||
#ifdef CONFIG_AP_PLATFORM
|
||||
#define PLATFORM_VALUE(non_ap_value, ap_value) ap_value
|
||||
#else
|
||||
#define PLATFORM_VALUE(non_ap_value, ap_value) non_ap_value
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_USE_CONFIG_PARAMS
|
||||
/* Section Parsing - section names to be parsed */
|
||||
#define CFG_256M_SECTION "256M"
|
||||
#define CFG_512M_SECTION "512M"
|
||||
#define CFG_512M_E_SECTION "512M-E"
|
||||
#define CFG_512M_P_SECTION "512M-P"
|
||||
|
||||
#define CFG_2G_SECTION "2G"
|
||||
#define CFG_5G_SECTION "5G"
|
||||
#define CFG_5G_LOW_SECTION "5GL"
|
||||
#define CFG_5G_HIGH_SECTION "5GH"
|
||||
#define CFG_6G_HIGH_SECTION "6GH"
|
||||
#define CFG_6G_LOW_SECTION "6GL"
|
||||
#define CFG_6G_SECTION "6G"
|
||||
|
||||
#define CFG_256M_2G_SECTION "2G-256M"
|
||||
#define CFG_256M_5G_SECTION "5G-256M"
|
||||
#define CFG_256M_5G_LOW_SECTION "5GL-256M"
|
||||
#define CFG_256M_5G_HIGH_SECTION "5GH-256M"
|
||||
#define CFG_256M_6G_LOW_SECTION "6GL-256M"
|
||||
#define CFG_256M_6G_HIGH_SECTION "6GH-256M"
|
||||
#define CFG_256M_6G_SECTION "6G-256M"
|
||||
|
||||
#define CFG_512ME_2G_SECTION "2G-512M-E"
|
||||
#define CFG_512ME_5G_SECTION "5G-512M-E"
|
||||
#define CFG_512ME_5G_LOW_SECTION "5GL-512M-E"
|
||||
#define CFG_512ME_5G_LOW_SECTION "5GL-512M-E"
|
||||
#define CFG_512ME_6G_HIGH_SECTION "6GH-512M-E"
|
||||
#define CFG_512ME_6G_HIGH_SECTION "6GH-512M-E"
|
||||
#define CFG_512ME_6G_SECTION "6G-512M-E"
|
||||
|
||||
#define CFG_512MP_2G_SECTION "2G-512M-P"
|
||||
#define CFG_512MP_5G_LOW_SECTION "5GL-512M-P"
|
||||
#define CFG_512MP_5G_HIGH_SECTION "5GH-512M-P"
|
||||
#define CFG_512MP_5G_SECTION "5G-512M-P"
|
||||
#define CFG_512MP_6G_LOW_SECTION "6GL-512M-P"
|
||||
#define CFG_512MP_6G_HIGH_SECTION "6GH-512M-P"
|
||||
#define CFG_512MP_6G_SECTION "6G-512M-P"
|
||||
|
||||
#define CFG_1G_2G_SECTION "1G-2G"
|
||||
#define CFG_1G_5G_SECTION "1G-5G"
|
||||
#define CFG_1G_5G_LOW_SECTION "1G-5GL"
|
||||
#define CFG_1G_5G_HIGH_SECTION "1G-5GH"
|
||||
#define CFG_1G_6G_LOW_SECTION "1G-6GL"
|
||||
#define CFG_1G_6G_HIGH_SECTION "1G-6GH"
|
||||
#define CFG_1G_6G_SECTION "1G-6G"
|
||||
|
||||
#define CFG_SCAN_RADIO_SECTION "SCAN-RADIO"
|
||||
|
||||
#define CFG_SBS_NSS_RING_SECTION "SBS-NSS-RING"
|
||||
#define CFG_DBS_NSS_RING_SECTION "DBS-NSS-RING"
|
||||
|
||||
#define CFG_DP_TX_DESC_512P_SECTION "512M_REDUCED_DESC"
|
||||
#define CFG_DP_TX_DESC_1G_SECTION "1G-TX-DESC"
|
||||
#define CFG_DP_MON_512M_SECTION "DP_MON_512M_RING"
|
||||
#define CFG_NSS_3DEV_RING_SECTION "DP_NSS_3DEV_RING_SIZE"
|
||||
#define CFG_DP_4RADIO_REO_SECTION "DP_NSS_4RADIO_REO_MAP"
|
||||
|
||||
#define CFG_512M_OR_4CHAIN_SECTION "512M_OR_DP_MON_4CHAIN"
|
||||
#define CFG_DP_MON_2CHAIN_SECTION "DP_MON_2CHAIN"
|
||||
|
||||
#define CFG_SOC_SINGLE_PHY_2G_SECTION "SINGLE_PHY_2G"
|
||||
#define CFG_SOC_SINGLE_PHY_5G_SECTION "SINGLE_PHY_5G"
|
||||
#define CFG_SOC_SINGLE_PHY_6G_SECTION "SINGLE_PHY_6G"
|
||||
#define CFG_SOC_SPLIT_PHY_2G_5G_LOW_SECTION "SPLIT_PHY_2G_5G_LOW"
|
||||
#define CFG_SOC_SPLIT_PHY_6G_5G_HIGH_SECTION "SPLIT_PHY_6G_5G_HIGH"
|
||||
#endif /* WLAN_USE_CONFIG_PARAMS */
|
||||
|
||||
#endif /* __CFG_DEFINE_H */
|
||||
|
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: Dispatcher related handler APIs for the configuration component
|
||||
*/
|
||||
#ifndef __CFG_DISPATCHER_H_
|
||||
#define __CFG_DISPATCHER_H_
|
||||
|
||||
#include <qdf_status.h>
|
||||
|
||||
/**
|
||||
* cfg_dispatcher_init() - Configuration component global init handler
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS cfg_dispatcher_init(void);
|
||||
|
||||
/**
|
||||
* cfg_dispatcher_deinit() - Configuration component global deinit handler
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS cfg_dispatcher_deinit(void);
|
||||
|
||||
#endif /* __CFG_DISPATCHER_H */
|
307
qcom/opensource/wlan/qca-wifi-host-cmn/cfg/inc/cfg_ucfg_api.h
Normal file
307
qcom/opensource/wlan/qca-wifi-host-cmn/cfg/inc/cfg_ucfg_api.h
Normal file
@ -0,0 +1,307 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: UCFG APIs for the configuration component.
|
||||
*
|
||||
* Logically, configuration exists at the psoc level. This means, each psoc can
|
||||
* have its own custom configuration, and calls to lookup configuration take a
|
||||
* psoc parameter for reference. E.g.
|
||||
*
|
||||
* int32_t value = cfg_get(psoc, WLAN_SOME_INTEGER_CONFIG_ID);
|
||||
*
|
||||
* Configuration is cascading, and lookups happen in this order:
|
||||
*
|
||||
* 1) use psoc value, if configured
|
||||
* 2) use global value, if configured
|
||||
* 3) fallback to the default value for the configuration item
|
||||
*
|
||||
* This means a psoc configuration is a specialization of the global
|
||||
* configuration, and does not need to explicitly set the same values if they
|
||||
* would match the global config.
|
||||
*
|
||||
* In order to load and parse the global config, call cfg_parse(). In order to
|
||||
* load and parse psoc configs, call cfg_psoc_parse(). cfg_parse() MUST be
|
||||
* called before cfg_psoc_parse(), as global configuration will be consulted
|
||||
* during the psoc parsing process.
|
||||
*
|
||||
* There are two basic lifecycles supported:
|
||||
*
|
||||
* 1) The type and number of psocs is *not* known at load time
|
||||
*
|
||||
* // driver is loading
|
||||
* cfg_parse("/path/to/config");
|
||||
*
|
||||
* ...
|
||||
*
|
||||
* // a psoc has just been created
|
||||
* cfg_psoc_parse(psoc, "/path/to/psoc/config");
|
||||
*
|
||||
* ...
|
||||
*
|
||||
* // driver is unloading
|
||||
* cfg_release();
|
||||
*
|
||||
* 2) The type and number of psocs *is* known at load time
|
||||
*
|
||||
* // driver is loading
|
||||
* cfg_parse("/path/to/config");
|
||||
*
|
||||
* ...
|
||||
*
|
||||
* // for each psoc
|
||||
* cfg_psoc_parse(psoc, "/path/to/psoc/config");
|
||||
*
|
||||
* // no further psocs will be created after this point
|
||||
* cfg_release();
|
||||
*
|
||||
* ...
|
||||
*
|
||||
* // driver is unloaded later
|
||||
*
|
||||
* Each configuration store is reference counted to reduce memory footprint, and
|
||||
* the configuration component itself will hold one ref count on the global
|
||||
* config store. All psocs for which psoc-specific configurations have *not*
|
||||
* been provided will reference the global config store. Psocs for which psoc-
|
||||
* specific configurations *have* been provided will check for existings stores
|
||||
* with a matching path to use, before parsing the specified configuration file.
|
||||
*
|
||||
* If, at some point in time, it is known that no further psocs will ever be
|
||||
* created, a call to cfg_release() will release the global ref count held by
|
||||
* the configuration component. For systems which specify psoc-specific configs
|
||||
* for all psocs, this will release the unnecessary memory used by the global
|
||||
* config store. Otherwise, calling cfg_release() at unload time will ensure
|
||||
* the global config store is properly freed.
|
||||
*/
|
||||
|
||||
#ifndef __CFG_UCFG_H
|
||||
#define __CFG_UCFG_H
|
||||
|
||||
#include "cfg_all.h"
|
||||
#include "cfg_define.h"
|
||||
#include "i_cfg.h"
|
||||
#include "qdf_status.h"
|
||||
#include "qdf_str.h"
|
||||
#include "qdf_types.h"
|
||||
#include "wlan_objmgr_psoc_obj.h"
|
||||
|
||||
/**
|
||||
* cfg_parse() - parse an ini file, and populate the global config storei
|
||||
* @path: The full file path of the ini file to parse
|
||||
*
|
||||
* Note: A matching cfg_release() call is required to release allocated
|
||||
* resources.
|
||||
*
|
||||
* The *.ini file format is a simple format consisting of a list of key/value
|
||||
* pairs, separated by an '=' character. e.g.
|
||||
*
|
||||
* gConfigItem1=some string value
|
||||
* gConfigItem2=0xabc
|
||||
*
|
||||
* Comments are also supported, initiated with the '#' character:
|
||||
*
|
||||
* # This is a comment. It will be ignored by the *.ini parser
|
||||
* gConfigItem3=aa:bb:cc:dd:ee:ff # this is also a comment
|
||||
*
|
||||
* Several datatypes are natively supported:
|
||||
*
|
||||
* gInt=-123 # bin (0b), octal (0o), hex (0x), and decimal supported
|
||||
* gUint=123 # a non-negative integer value
|
||||
* gBool=y # (1, Y, y) -> true; (0, N, n) -> false
|
||||
* gString=any string # strings are useful for representing complex types
|
||||
* gMacAddr=aa:bb:cc:dd:ee:ff # colons are optional, upper and lower case
|
||||
* gIpv4Addr=127.0.0.1 # uses typical dot-decimal notation
|
||||
* gIpv6Addr=::1 # typical notation, supporting zero-compression
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS cfg_parse(const char *path);
|
||||
|
||||
/**
|
||||
* cfg_release() - release the global configuration store
|
||||
*
|
||||
* This API releases the configuration component's reference to the global
|
||||
* config store.
|
||||
*
|
||||
* See also: this file's DOC section.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void cfg_release(void);
|
||||
|
||||
/**
|
||||
* cfg_psoc_parse() - specialize the config store for @psoc by parsing @path
|
||||
* @psoc: The psoc whose config store should be specialized
|
||||
* @path: The full file path of the ini file to parse
|
||||
*
|
||||
* See also: cfg_parse(), and this file's DOC section.
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS cfg_psoc_parse(struct wlan_objmgr_psoc *psoc, const char *path);
|
||||
|
||||
/**
|
||||
* cfg_parse_to_psoc_store() - Parse file @path and update psoc ini store
|
||||
* @psoc: The psoc whose config store should be updated
|
||||
* @path: The full file path of the ini file to parse
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS cfg_parse_to_psoc_store(struct wlan_objmgr_psoc *psoc,
|
||||
const char *path);
|
||||
|
||||
/**
|
||||
* cfg_section_parse_to_psoc_store() - Parse specific section from file @path
|
||||
* and update psoc ini store
|
||||
* @psoc: The psoc whose config store should be updated
|
||||
* @path: The full file path of the ini file to parse
|
||||
* @section_name: Section name to be parsed
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS cfg_section_parse_to_psoc_store(struct wlan_objmgr_psoc *psoc,
|
||||
const char *path,
|
||||
const char *section_name);
|
||||
|
||||
/**
|
||||
* cfg_parse_to_global_store() - Parse file @path and update global ini store
|
||||
* @path: The full file path of the ini file to parse
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS cfg_parse_to_global_store(const char *path);
|
||||
|
||||
/**
|
||||
* ucfg_cfg_store_print() - prints the cfg ini/non ini logs
|
||||
* @psoc: psoc
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS ucfg_cfg_store_print(struct wlan_objmgr_psoc *psoc);
|
||||
|
||||
/**
|
||||
* ucfg_cfg_ini_config_print() - prints the cfg ini/non ini to buffer
|
||||
* @psoc: psoc
|
||||
* @buf: cache to save ini config
|
||||
* @plen: the pointer to length
|
||||
* @buflen: total buf length
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS ucfg_cfg_ini_config_print(struct wlan_objmgr_psoc *psoc,
|
||||
uint8_t *buf, ssize_t *plen,
|
||||
ssize_t buflen);
|
||||
/**
|
||||
* cfg_valid_ini_check() - check ini file for invalid characters
|
||||
* @path: path to ini file
|
||||
*
|
||||
* Return: true if no invalid characters found, false otherwise
|
||||
*/
|
||||
bool cfg_valid_ini_check(const char *path);
|
||||
|
||||
/**
|
||||
* cfg_get() - lookup the configured value for @id from @psoc
|
||||
* @psoc: The psoc from which to lookup the configured value
|
||||
* @id: The id of the configured value to lookup
|
||||
*
|
||||
* E.g.
|
||||
*
|
||||
* int32_t value = cfg_get(psoc, WLAN_SOME_INTEGER_CONFIG_ID);
|
||||
*
|
||||
* Return: The configured value
|
||||
*/
|
||||
#define cfg_get(psoc, id) __cfg_get(psoc, __##id)
|
||||
|
||||
/* Configuration Access APIs */
|
||||
#define __do_call(op, args...) op(args)
|
||||
#define do_call(op, args) __do_call(op, rm_parens args)
|
||||
|
||||
#define cfg_id(id) #id
|
||||
|
||||
#define __cfg_mtype(ini, mtype, ctype, name, min, max, fallback, desc, def...) \
|
||||
mtype
|
||||
#define cfg_mtype(id) do_call(__cfg_mtype, id)
|
||||
|
||||
#define __cfg_type(ini, mtype, ctype, name, min, max, fallback, desc, def...) \
|
||||
ctype
|
||||
#define cfg_type(id) do_call(__cfg_type, id)
|
||||
|
||||
#define __cfg_name(ini, mtype, ctype, name, min, max, fallback, desc, def...) \
|
||||
name
|
||||
#define cfg_name(id) do_call(__cfg_name, id)
|
||||
|
||||
#define __cfg_min(ini, mtype, ctype, name, min, max, fallback, desc, def...) \
|
||||
min
|
||||
#define cfg_min(id) do_call(__cfg_min, id)
|
||||
|
||||
#define __cfg_max(ini, mtype, ctype, name, min, max, fallback, desc, def...) \
|
||||
max
|
||||
#define cfg_max(id) do_call(__cfg_max, id)
|
||||
|
||||
#define __cfg_fb(ini, mtype, ctype, name, min, max, fallback, desc, def...) \
|
||||
fallback
|
||||
#define cfg_fallback(id) do_call(__cfg_fb, id)
|
||||
|
||||
#define __cfg_desc(ini, mtype, ctype, name, min, max, fallback, desc, def...) \
|
||||
desc
|
||||
#define cfg_description(id) do_call(__cfg_desc, id)
|
||||
|
||||
#define __cfg_def(ini, mtype, ctype, name, min, max, fallback, desc, def...) \
|
||||
def
|
||||
#define cfg_default(id) do_call(__cfg_def, id)
|
||||
|
||||
#define __cfg_str(id...) #id
|
||||
#define cfg_str(id) #id __cfg_str(id)
|
||||
|
||||
/* validate APIs */
|
||||
static inline bool
|
||||
cfg_string_in_range(const char *value, qdf_size_t min_len, qdf_size_t max_len)
|
||||
{
|
||||
qdf_size_t len = qdf_str_len(value);
|
||||
|
||||
return len >= min_len && len <= max_len;
|
||||
}
|
||||
|
||||
#define __cfg_INT_in_range(value, min, max) (value >= min && value <= max)
|
||||
#define __cfg_UINT_in_range(value, min, max) (value >= min && value <= max)
|
||||
#define __cfg_STRING_in_range(value, min_len, max_len) \
|
||||
cfg_string_in_range(value, min_len, max_len)
|
||||
|
||||
#define __cfg_in_range(id, value, mtype) \
|
||||
__cfg_ ## mtype ## _in_range(value, cfg_min(id), cfg_max(id))
|
||||
|
||||
/* this may look redundant, but forces @mtype to be expanded */
|
||||
#define __cfg_in_range_type(id, value, mtype) \
|
||||
__cfg_in_range(id, value, mtype)
|
||||
|
||||
#define cfg_in_range(id, value) __cfg_in_range_type(id, value, cfg_mtype(id))
|
||||
|
||||
/* Value-or-Default APIs */
|
||||
#define __cfg_value_or_default(id, value, def) \
|
||||
(cfg_in_range(id, value) ? value : def)
|
||||
|
||||
#define cfg_value_or_default(id, value) \
|
||||
__cfg_value_or_default(id, value, cfg_default(id))
|
||||
|
||||
/* Value-or-Clamped APIs */
|
||||
#define __cfg_clamp(val, min, max) (val < min ? min : (val > max ? max : val))
|
||||
#define cfg_clamp(id, value) __cfg_clamp(value, cfg_min(id), cfg_max(id))
|
||||
|
||||
#endif /* __CFG_UCFG_H */
|
||||
|
77
qcom/opensource/wlan/qca-wifi-host-cmn/cfg/inc/i_cfg.h
Normal file
77
qcom/opensource/wlan/qca-wifi-host-cmn/cfg/inc/i_cfg.h
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: Internal APIs for the configuration component.
|
||||
*/
|
||||
|
||||
#ifndef __I_CFG_H
|
||||
#define __I_CFG_H
|
||||
|
||||
#include "cfg_define.h"
|
||||
#include "qdf_trace.h"
|
||||
#include "qdf_types.h"
|
||||
#include "wlan_objmgr_psoc_obj.h"
|
||||
|
||||
#define cfg_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_CONFIG, params)
|
||||
#define cfg_info(params...) QDF_TRACE_INFO(QDF_MODULE_ID_CONFIG, params)
|
||||
#define cfg_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_CONFIG, params)
|
||||
#define cfg_enter() QDF_TRACE_ENTER(QDF_MODULE_ID_CONFIG, "enter")
|
||||
#define cfg_exit() QDF_TRACE_EXIT(QDF_MODULE_ID_CONFIG, "exit")
|
||||
|
||||
#define cfg_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_CONFIG, params)
|
||||
#define cfg_warn_rl(params...) QDF_TRACE_WARN_RL(QDF_MODULE_ID_CONFIG, params)
|
||||
#define cfg_info_rl(params...) QDF_TRACE_INFO_RL(QDF_MODULE_ID_CONFIG, params)
|
||||
#define cfg_debug_rl(params...) QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_CONFIG, params)
|
||||
|
||||
#define cfg_nofl_err(params...) \
|
||||
QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_CONFIG, params)
|
||||
#define cfg_nofl_warn(params...) \
|
||||
QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_CONFIG, params)
|
||||
#define cfg_nofl_info(params...) \
|
||||
QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_CONFIG, params)
|
||||
#define cfg_nofl_debug(params...) \
|
||||
QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_CONFIG, params)
|
||||
|
||||
/* define global config values structure */
|
||||
|
||||
#undef __CFG_INI_STRING
|
||||
#define __CFG_INI_STRING(id, mtype, ctype, name, min, max, fallback, desc, \
|
||||
def...) \
|
||||
const char id##_internal[(max) + 1];
|
||||
#undef __CFG_INI
|
||||
#define __CFG_INI(id, mtype, ctype, name, min, max, fallback, desc, def...) \
|
||||
const ctype id##_internal;
|
||||
|
||||
struct cfg_values {
|
||||
/* e.g. const int32_t __CFG_SCAN_DWELL_TIME_internal; */
|
||||
CFG_ALL
|
||||
};
|
||||
|
||||
#undef __CFG_INI_STRING
|
||||
#define __CFG_INI_STRING(args...) __CFG_INI(args)
|
||||
#undef __CFG_INI
|
||||
#define __CFG_INI(args...) (args)
|
||||
|
||||
struct cfg_values *cfg_psoc_get_values(struct wlan_objmgr_psoc *psoc);
|
||||
|
||||
#define __cfg_get(psoc, id) (cfg_psoc_get_values( \
|
||||
(struct wlan_objmgr_psoc *)psoc)->id##_internal)
|
||||
|
||||
#endif /* __I_CFG_H */
|
||||
|
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: This file contains various object manager related wrappers and helpers
|
||||
*/
|
||||
|
||||
#ifndef __CFG_OBJMGR_H
|
||||
#define __CFG_OBJMGR_H
|
||||
|
||||
#include "wlan_cmn.h"
|
||||
#include "wlan_objmgr_global_obj.h"
|
||||
#include "wlan_objmgr_psoc_obj.h"
|
||||
|
||||
/* Private Data */
|
||||
|
||||
#define cfg_psoc_get_priv(psoc) \
|
||||
wlan_objmgr_psoc_get_comp_private_obj((psoc), WLAN_UMAC_COMP_CONFIG)
|
||||
#define cfg_psoc_set_priv(psoc, priv) \
|
||||
wlan_objmgr_psoc_component_obj_attach((psoc), WLAN_UMAC_COMP_CONFIG, \
|
||||
(priv), QDF_STATUS_SUCCESS)
|
||||
#define cfg_psoc_unset_priv(psoc, priv) \
|
||||
wlan_objmgr_psoc_component_obj_detach((psoc), WLAN_UMAC_COMP_CONFIG, \
|
||||
(priv))
|
||||
|
||||
/* event registration */
|
||||
|
||||
#define cfg_psoc_register_create(callback) \
|
||||
wlan_objmgr_register_psoc_create_handler(WLAN_UMAC_COMP_CONFIG, \
|
||||
(callback), NULL)
|
||||
#define cfg_psoc_register_destroy(callback) \
|
||||
wlan_objmgr_register_psoc_destroy_handler(WLAN_UMAC_COMP_CONFIG, \
|
||||
(callback), NULL)
|
||||
|
||||
/* event de-registration */
|
||||
|
||||
#define cfg_psoc_unregister_create(callback) \
|
||||
wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_CONFIG, \
|
||||
(callback), NULL)
|
||||
#define cfg_psoc_unregister_destroy(callback) \
|
||||
wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_CONFIG, \
|
||||
(callback), NULL)
|
||||
|
||||
#endif /* __CFG_OBJMGR_H */
|
977
qcom/opensource/wlan/qca-wifi-host-cmn/cfg/src/cfg.c
Normal file
977
qcom/opensource/wlan/qca-wifi-host-cmn/cfg/src/cfg.c
Normal file
@ -0,0 +1,977 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "cfg_all.h"
|
||||
#include "cfg_define.h"
|
||||
#include "cfg_dispatcher.h"
|
||||
#include "cfg_ucfg_api.h"
|
||||
#include "i_cfg.h"
|
||||
#include "i_cfg_objmgr.h"
|
||||
#include "qdf_atomic.h"
|
||||
#include "qdf_list.h"
|
||||
#include "qdf_mem.h"
|
||||
#include "qdf_module.h"
|
||||
#include "qdf_parse.h"
|
||||
#include "qdf_status.h"
|
||||
#include "qdf_str.h"
|
||||
#include "qdf_trace.h"
|
||||
#include "qdf_types.h"
|
||||
#include "wlan_objmgr_psoc_obj.h"
|
||||
|
||||
/**
|
||||
* struct cfg_value_store - backing store for an ini file
|
||||
* @path: file path of the ini file
|
||||
* @node: internal list node for keeping track of all the allocated stores
|
||||
* @users: number of references on the store
|
||||
* @values: a values struct containing the parsed values from the ini file
|
||||
*/
|
||||
struct cfg_value_store {
|
||||
char *path;
|
||||
qdf_list_node_t node;
|
||||
qdf_atomic_t users;
|
||||
struct cfg_values values;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum cfg_type - Enum for CFG/INI types
|
||||
* @CFG_INT_ITEM: Integer CFG/INI
|
||||
* @CFG_UINT_ITEM: Unsigned integer CFG/INI
|
||||
* @CFG_BOOL_ITEM: Boolean CFG/INI
|
||||
* @CFG_STRING_ITEM: String CFG/INI
|
||||
* @CFG_MAC_ITEM: Mac address CFG/INI
|
||||
* @CFG_IPV4_ITEM: IPV4 address CFG/INI
|
||||
* @CFG_IPV6_ITEM: IPV6 address CFG/INI
|
||||
* @CFG_MAX_ITEM: Max CFG type
|
||||
*/
|
||||
enum cfg_type {
|
||||
CFG_INT_ITEM,
|
||||
CFG_UINT_ITEM,
|
||||
CFG_BOOL_ITEM,
|
||||
CFG_STRING_ITEM,
|
||||
CFG_MAC_ITEM,
|
||||
CFG_IPV4_ITEM,
|
||||
CFG_IPV6_ITEM,
|
||||
CFG_MAX_ITEM,
|
||||
};
|
||||
|
||||
#define CFG_META_NAME_LENGTH_MAX 256
|
||||
#define CFG_INI_LENGTH_MAX 128
|
||||
|
||||
/* define/populate dynamic metadata lookup table */
|
||||
|
||||
/**
|
||||
* struct cfg_meta - configuration item metadata for dynamic lookup during parse
|
||||
* @name: name of the config item used in the ini file (i.e. "gScanDwellTime")
|
||||
* @item_handler: parsing callback based on the type of the config item
|
||||
* @min: minimum value for use in bounds checking (min_len for strings)
|
||||
* @max: maximum value for use in bounds checking (max_len for strings)
|
||||
* @fallback: the fallback behavior to use when configured values are invalid
|
||||
*/
|
||||
struct cfg_meta {
|
||||
const char *name;
|
||||
const uint32_t field_offset;
|
||||
const enum cfg_type cfg_type;
|
||||
void (*const item_handler)(struct cfg_value_store *store,
|
||||
const struct cfg_meta *meta,
|
||||
const char *value);
|
||||
const int32_t min;
|
||||
const int32_t max;
|
||||
const enum cfg_fallback_behavior fallback;
|
||||
};
|
||||
|
||||
/* ini item handler functions */
|
||||
|
||||
#define cfg_value_ptr(store, meta) \
|
||||
((void *)&(store)->values + (meta)->field_offset)
|
||||
|
||||
static __attribute__((unused)) void
|
||||
cfg_int_item_handler(struct cfg_value_store *store,
|
||||
const struct cfg_meta *meta,
|
||||
const char *str_value)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
int32_t *store_value = cfg_value_ptr(store, meta);
|
||||
int32_t value;
|
||||
|
||||
status = qdf_int32_parse(str_value, &value);
|
||||
if (QDF_IS_STATUS_ERROR(status)) {
|
||||
cfg_err("%s=%s - Invalid format (status %d); Using default %d",
|
||||
meta->name, str_value, status, *store_value);
|
||||
return;
|
||||
}
|
||||
|
||||
QDF_BUG(meta->min <= meta->max);
|
||||
if (meta->min > meta->max) {
|
||||
cfg_err("Invalid config item meta for %s", meta->name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (value >= meta->min && value <= meta->max) {
|
||||
*store_value = value;
|
||||
return;
|
||||
}
|
||||
|
||||
switch (meta->fallback) {
|
||||
default:
|
||||
QDF_DEBUG_PANIC("Unknown fallback method %d for cfg item '%s'",
|
||||
meta->fallback, meta->name);
|
||||
fallthrough;
|
||||
case CFG_VALUE_OR_DEFAULT:
|
||||
/* store already contains default */
|
||||
break;
|
||||
case CFG_VALUE_OR_CLAMP:
|
||||
*store_value = __cfg_clamp(value, meta->min, meta->max);
|
||||
break;
|
||||
}
|
||||
|
||||
cfg_err("%s=%d - Out of range [%d, %d]; Using %d",
|
||||
meta->name, value, meta->min, meta->max, *store_value);
|
||||
}
|
||||
|
||||
static __attribute__((unused)) void
|
||||
cfg_uint_item_handler(struct cfg_value_store *store,
|
||||
const struct cfg_meta *meta,
|
||||
const char *str_value)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
uint32_t *store_value = cfg_value_ptr(store, meta);
|
||||
uint32_t value;
|
||||
uint32_t min;
|
||||
uint32_t max;
|
||||
|
||||
/**
|
||||
* Since meta min and max are of type int32_t
|
||||
* We need explicit type casting to avoid
|
||||
* implicit wrap around for uint32_t type cfg data.
|
||||
*/
|
||||
min = (uint32_t)meta->min;
|
||||
max = (uint32_t)meta->max;
|
||||
|
||||
status = qdf_uint32_parse(str_value, &value);
|
||||
if (QDF_IS_STATUS_ERROR(status)) {
|
||||
cfg_err("%s=%s - Invalid format (status %d); Using default %u",
|
||||
meta->name, str_value, status, *store_value);
|
||||
return;
|
||||
}
|
||||
|
||||
QDF_BUG(min <= max);
|
||||
if (min > max) {
|
||||
cfg_err("Invalid config item meta for %s", meta->name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (value >= min && value <= max) {
|
||||
*store_value = value;
|
||||
return;
|
||||
}
|
||||
|
||||
switch (meta->fallback) {
|
||||
default:
|
||||
QDF_DEBUG_PANIC("Unknown fallback method %d for cfg item '%s'",
|
||||
meta->fallback, meta->name);
|
||||
fallthrough;
|
||||
case CFG_VALUE_OR_DEFAULT:
|
||||
/* store already contains default */
|
||||
break;
|
||||
case CFG_VALUE_OR_CLAMP:
|
||||
*store_value = __cfg_clamp(value, min, max);
|
||||
break;
|
||||
}
|
||||
|
||||
cfg_err("%s=%u - Out of range [%d, %d]; Using %u",
|
||||
meta->name, value, min, max, *store_value);
|
||||
}
|
||||
|
||||
static __attribute__((unused)) void
|
||||
cfg_bool_item_handler(struct cfg_value_store *store,
|
||||
const struct cfg_meta *meta,
|
||||
const char *str_value)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
bool *store_value = cfg_value_ptr(store, meta);
|
||||
|
||||
status = qdf_bool_parse(str_value, store_value);
|
||||
if (QDF_IS_STATUS_SUCCESS(status))
|
||||
return;
|
||||
|
||||
cfg_err("%s=%s - Invalid format (status %d); Using default '%s'",
|
||||
meta->name, str_value, status, *store_value ? "true" : "false");
|
||||
}
|
||||
|
||||
static __attribute__((unused)) void
|
||||
cfg_string_item_handler(struct cfg_value_store *store,
|
||||
const struct cfg_meta *meta,
|
||||
const char *str_value)
|
||||
{
|
||||
char *store_value = cfg_value_ptr(store, meta);
|
||||
qdf_size_t len;
|
||||
|
||||
QDF_BUG(meta->min >= 0);
|
||||
QDF_BUG(meta->min <= meta->max);
|
||||
if (meta->min < 0 || meta->min > meta->max) {
|
||||
cfg_err("Invalid config item meta for %s", meta->name);
|
||||
return;
|
||||
}
|
||||
|
||||
/* ensure min length */
|
||||
len = qdf_str_nlen(str_value, meta->min);
|
||||
if (len < meta->min) {
|
||||
cfg_err("%s=%s - Too short; Using default '%s'",
|
||||
meta->name, str_value, store_value);
|
||||
return;
|
||||
}
|
||||
|
||||
/* check max length */
|
||||
len += qdf_str_nlen(str_value + meta->min, meta->max - meta->min + 1);
|
||||
if (len > meta->max) {
|
||||
cfg_err("%s=%s - Too long; Using default '%s'",
|
||||
meta->name, str_value, store_value);
|
||||
return;
|
||||
}
|
||||
|
||||
qdf_str_lcopy(store_value, str_value, meta->max + 1);
|
||||
}
|
||||
|
||||
static __attribute__((unused)) void
|
||||
cfg_mac_item_handler(struct cfg_value_store *store,
|
||||
const struct cfg_meta *meta,
|
||||
const char *str_value)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
struct qdf_mac_addr *store_value = cfg_value_ptr(store, meta);
|
||||
|
||||
status = qdf_mac_parse(str_value, store_value);
|
||||
if (QDF_IS_STATUS_SUCCESS(status))
|
||||
return;
|
||||
|
||||
cfg_err("%s=%s - Invalid format (status %d); Using default "
|
||||
QDF_MAC_ADDR_FMT, meta->name, str_value, status,
|
||||
QDF_MAC_ADDR_REF(store_value->bytes));
|
||||
}
|
||||
|
||||
static __attribute__((unused)) void
|
||||
cfg_ipv4_item_handler(struct cfg_value_store *store,
|
||||
const struct cfg_meta *meta,
|
||||
const char *str_value)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
struct qdf_ipv4_addr *store_value = cfg_value_ptr(store, meta);
|
||||
|
||||
status = qdf_ipv4_parse(str_value, store_value);
|
||||
if (QDF_IS_STATUS_SUCCESS(status))
|
||||
return;
|
||||
|
||||
cfg_err("%s=%s - Invalid format (status %d); Using default "
|
||||
QDF_IPV4_ADDR_STR, meta->name, str_value, status,
|
||||
QDF_IPV4_ADDR_ARRAY(store_value->bytes));
|
||||
}
|
||||
|
||||
static __attribute__((unused)) void
|
||||
cfg_ipv6_item_handler(struct cfg_value_store *store,
|
||||
const struct cfg_meta *meta,
|
||||
const char *str_value)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
struct qdf_ipv6_addr *store_value = cfg_value_ptr(store, meta);
|
||||
|
||||
status = qdf_ipv6_parse(str_value, store_value);
|
||||
if (QDF_IS_STATUS_SUCCESS(status))
|
||||
return;
|
||||
|
||||
cfg_err("%s=%s - Invalid format (status %d); Using default "
|
||||
QDF_IPV6_ADDR_STR, meta->name, str_value, status,
|
||||
QDF_IPV6_ADDR_ARRAY(store_value->bytes));
|
||||
}
|
||||
|
||||
/* populate metadata lookup table */
|
||||
#undef __CFG_INI
|
||||
#define __CFG_INI(_id, _mtype, _ctype, _name, _min, _max, _fallback, ...) \
|
||||
{ \
|
||||
.name = _name, \
|
||||
.field_offset = qdf_offsetof(struct cfg_values, _id##_internal), \
|
||||
.cfg_type = CFG_ ##_mtype ## _ITEM, \
|
||||
.item_handler = cfg_ ## _mtype ## _item_handler, \
|
||||
.min = _min, \
|
||||
.max = _max, \
|
||||
.fallback = _fallback, \
|
||||
},
|
||||
|
||||
#define cfg_INT_item_handler cfg_int_item_handler
|
||||
#define cfg_UINT_item_handler cfg_uint_item_handler
|
||||
#define cfg_BOOL_item_handler cfg_bool_item_handler
|
||||
#define cfg_STRING_item_handler cfg_string_item_handler
|
||||
#define cfg_MAC_item_handler cfg_mac_item_handler
|
||||
#define cfg_IPV4_item_handler cfg_ipv4_item_handler
|
||||
#define cfg_IPV6_item_handler cfg_ipv6_item_handler
|
||||
|
||||
static const struct cfg_meta cfg_meta_lookup_table[] = {
|
||||
CFG_ALL
|
||||
};
|
||||
|
||||
/* default store initializer */
|
||||
|
||||
static void cfg_store_set_defaults(struct cfg_value_store *store)
|
||||
{
|
||||
#undef __CFG_INI
|
||||
#define __CFG_INI(id, mtype, ctype, name, min, max, fallback, desc, def...) \
|
||||
ctype id = def;
|
||||
|
||||
CFG_ALL
|
||||
|
||||
#undef __CFG_INI_STRING
|
||||
#define __CFG_INI_STRING(id, mtype, ctype, name, min_len, max_len, ...) \
|
||||
qdf_str_lcopy((char *)&store->values.id##_internal, id, (max_len) + 1);
|
||||
|
||||
#undef __CFG_INI
|
||||
#define __CFG_INI(id, mtype, ctype, name, min, max, fallback, desc, def...) \
|
||||
*(ctype *)&store->values.id##_internal = id;
|
||||
|
||||
CFG_ALL
|
||||
}
|
||||
|
||||
static const struct cfg_meta *cfg_lookup_meta(const char *name)
|
||||
{
|
||||
int i;
|
||||
char *param1;
|
||||
char param[CFG_META_NAME_LENGTH_MAX];
|
||||
uint8_t ini_name[CFG_INI_LENGTH_MAX];
|
||||
|
||||
QDF_BUG(name);
|
||||
if (!name)
|
||||
return NULL;
|
||||
|
||||
/* linear search for now; optimize in the future if needed */
|
||||
for (i = 0; i < QDF_ARRAY_SIZE(cfg_meta_lookup_table); i++) {
|
||||
const struct cfg_meta *meta = &cfg_meta_lookup_table[i];
|
||||
|
||||
qdf_mem_zero(ini_name, CFG_INI_LENGTH_MAX);
|
||||
|
||||
qdf_mem_zero(param, CFG_META_NAME_LENGTH_MAX);
|
||||
if (strlen(meta->name) >= CFG_META_NAME_LENGTH_MAX) {
|
||||
cfg_err("Invalid meta name %s", meta->name);
|
||||
continue;
|
||||
}
|
||||
|
||||
qdf_mem_copy(param, meta->name, strlen(meta->name));
|
||||
param[strlen(meta->name)] = '\0';
|
||||
param1 = param;
|
||||
if (!sscanf(param1, "%s", ini_name)) {
|
||||
cfg_err("Cannot get ini name %s", param1);
|
||||
return NULL;
|
||||
}
|
||||
if (qdf_str_eq(name, ini_name))
|
||||
return meta;
|
||||
|
||||
param1 = strpbrk(param, " ");
|
||||
while (param1) {
|
||||
param1++;
|
||||
if (!sscanf(param1, "%s ", ini_name)) {
|
||||
cfg_err("Invalid ini name %s", meta->name);
|
||||
return NULL;
|
||||
}
|
||||
if (qdf_str_eq(name, ini_name))
|
||||
return meta;
|
||||
param1 = strpbrk(param1, " ");
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static QDF_STATUS
|
||||
cfg_ini_item_handler(void *context, const char *key, const char *value)
|
||||
{
|
||||
struct cfg_value_store *store = context;
|
||||
const struct cfg_meta *meta;
|
||||
|
||||
meta = cfg_lookup_meta(key);
|
||||
if (!meta) {
|
||||
/* TODO: promote to 'err' or 'warn' once legacy is ported */
|
||||
cfg_debug("Unknown config item '%s'", key);
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_BUG(meta->item_handler);
|
||||
if (!meta->item_handler)
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
meta->item_handler(store, meta, value);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static QDF_STATUS cfg_ini_section_handler(void *context, const char *name)
|
||||
{
|
||||
cfg_err("Unexpected section '%s'. Sections are not supported.", name);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
#define cfg_assert_success(expr) \
|
||||
do { \
|
||||
QDF_STATUS __assert_status = (expr); \
|
||||
QDF_BUG(QDF_IS_STATUS_SUCCESS(__assert_status)); \
|
||||
} while (0)
|
||||
|
||||
static bool __cfg_is_init;
|
||||
static struct cfg_value_store *__cfg_global_store;
|
||||
static qdf_list_t __cfg_stores_list;
|
||||
static qdf_spinlock_t __cfg_stores_lock;
|
||||
|
||||
struct cfg_psoc_ctx {
|
||||
struct cfg_value_store *store;
|
||||
};
|
||||
|
||||
static QDF_STATUS
|
||||
cfg_store_alloc(const char *path, struct cfg_value_store **out_store)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
struct cfg_value_store *store;
|
||||
|
||||
cfg_enter();
|
||||
|
||||
store = qdf_mem_common_alloc(sizeof(*store));
|
||||
if (!store)
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
|
||||
status = qdf_str_dup(&store->path, path);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
goto free_store;
|
||||
|
||||
status = qdf_atomic_init(&store->users);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
goto free_path;
|
||||
qdf_atomic_inc(&store->users);
|
||||
|
||||
qdf_spin_lock_bh(&__cfg_stores_lock);
|
||||
status = qdf_list_insert_back(&__cfg_stores_list, &store->node);
|
||||
qdf_spin_unlock_bh(&__cfg_stores_lock);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
goto free_path;
|
||||
|
||||
*out_store = store;
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
free_path:
|
||||
qdf_mem_free(store->path);
|
||||
|
||||
free_store:
|
||||
qdf_mem_common_free(store);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void cfg_store_free(struct cfg_value_store *store)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
|
||||
cfg_enter();
|
||||
|
||||
qdf_spin_lock_bh(&__cfg_stores_lock);
|
||||
status = qdf_list_remove_node(&__cfg_stores_list, &store->node);
|
||||
qdf_spin_unlock_bh(&__cfg_stores_lock);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
QDF_DEBUG_PANIC("Failed config store list removal; status:%d",
|
||||
status);
|
||||
|
||||
qdf_mem_free(store->path);
|
||||
qdf_mem_common_free(store);
|
||||
}
|
||||
|
||||
static QDF_STATUS
|
||||
cfg_store_get(const char *path, struct cfg_value_store **out_store)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
qdf_list_node_t *node;
|
||||
|
||||
*out_store = NULL;
|
||||
|
||||
qdf_spin_lock_bh(&__cfg_stores_lock);
|
||||
status = qdf_list_peek_front(&__cfg_stores_list, &node);
|
||||
while (QDF_IS_STATUS_SUCCESS(status)) {
|
||||
struct cfg_value_store *store =
|
||||
qdf_container_of(node, struct cfg_value_store, node);
|
||||
|
||||
if (qdf_str_eq(path, store->path)) {
|
||||
qdf_atomic_inc(&store->users);
|
||||
*out_store = store;
|
||||
break;
|
||||
}
|
||||
|
||||
status = qdf_list_peek_next(&__cfg_stores_list, node, &node);
|
||||
}
|
||||
qdf_spin_unlock_bh(&__cfg_stores_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void cfg_store_put(struct cfg_value_store *store)
|
||||
{
|
||||
if (qdf_atomic_dec_and_test(&store->users))
|
||||
cfg_store_free(store);
|
||||
}
|
||||
|
||||
static struct cfg_psoc_ctx *cfg_psoc_get_ctx(struct wlan_objmgr_psoc *psoc)
|
||||
{
|
||||
struct cfg_psoc_ctx *psoc_ctx;
|
||||
|
||||
psoc_ctx = cfg_psoc_get_priv(psoc);
|
||||
QDF_BUG(psoc_ctx);
|
||||
|
||||
return psoc_ctx;
|
||||
}
|
||||
|
||||
struct cfg_values *cfg_psoc_get_values(struct wlan_objmgr_psoc *psoc)
|
||||
{
|
||||
return &cfg_psoc_get_ctx(psoc)->store->values;
|
||||
}
|
||||
qdf_export_symbol(cfg_psoc_get_values);
|
||||
|
||||
static QDF_STATUS
|
||||
cfg_ini_parse_to_store(const char *path, struct cfg_value_store *store)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
|
||||
status = qdf_ini_parse(path, store, cfg_ini_item_handler,
|
||||
cfg_ini_section_handler);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
cfg_err("Failed to parse *.ini file @ %s; status:%d",
|
||||
path, status);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static QDF_STATUS
|
||||
cfg_ini_section_parse_to_store(const char *path, const char *section_name,
|
||||
struct cfg_value_store *store)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
|
||||
status = qdf_ini_section_parse(path, store, cfg_ini_item_handler,
|
||||
section_name);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
cfg_err("Failed to parse *.ini file @ %s; status:%d",
|
||||
path, status);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
QDF_STATUS cfg_parse_to_psoc_store(struct wlan_objmgr_psoc *psoc,
|
||||
const char *path)
|
||||
{
|
||||
return cfg_ini_parse_to_store(path, cfg_psoc_get_ctx(psoc)->store);
|
||||
}
|
||||
|
||||
qdf_export_symbol(cfg_parse_to_psoc_store);
|
||||
|
||||
QDF_STATUS cfg_section_parse_to_psoc_store(struct wlan_objmgr_psoc *psoc,
|
||||
const char *path,
|
||||
const char *section_name)
|
||||
{
|
||||
return cfg_ini_section_parse_to_store(path, section_name,
|
||||
cfg_psoc_get_ctx(psoc)->store);
|
||||
}
|
||||
|
||||
qdf_export_symbol(cfg_section_parse_to_psoc_store);
|
||||
|
||||
QDF_STATUS cfg_parse_to_global_store(const char *path)
|
||||
{
|
||||
if (!__cfg_global_store) {
|
||||
cfg_err("Global INI store is not valid");
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
return cfg_ini_parse_to_store(path, __cfg_global_store);
|
||||
}
|
||||
|
||||
qdf_export_symbol(cfg_parse_to_global_store);
|
||||
|
||||
static QDF_STATUS
|
||||
cfg_store_print(struct wlan_objmgr_psoc *psoc)
|
||||
{
|
||||
struct cfg_value_store *store;
|
||||
struct cfg_psoc_ctx *psoc_ctx;
|
||||
void *offset;
|
||||
uint32_t i;
|
||||
|
||||
cfg_enter();
|
||||
|
||||
psoc_ctx = cfg_psoc_get_ctx(psoc);
|
||||
if (!psoc_ctx)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
store = psoc_ctx->store;
|
||||
if (!store)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
for (i = 0; i < QDF_ARRAY_SIZE(cfg_meta_lookup_table); i++) {
|
||||
const struct cfg_meta *meta = &cfg_meta_lookup_table[i];
|
||||
|
||||
offset = cfg_value_ptr(store, meta);
|
||||
|
||||
switch (meta->cfg_type) {
|
||||
case CFG_INT_ITEM:
|
||||
cfg_nofl_debug("%pK %s %d", offset, meta->name,
|
||||
*((int32_t *)offset));
|
||||
break;
|
||||
case CFG_UINT_ITEM:
|
||||
cfg_nofl_debug("%pK %s %d", offset, meta->name,
|
||||
*((uint32_t *)offset));
|
||||
break;
|
||||
case CFG_BOOL_ITEM:
|
||||
cfg_nofl_debug("%pK %s %d", offset, meta->name,
|
||||
*((bool *)offset));
|
||||
break;
|
||||
case CFG_STRING_ITEM:
|
||||
cfg_nofl_debug("%pK %s %s", offset, meta->name,
|
||||
(char *)offset);
|
||||
break;
|
||||
case CFG_MAC_ITEM:
|
||||
cfg_nofl_debug("%pK %s " QDF_MAC_ADDR_FMT,
|
||||
offset, meta->name,
|
||||
QDF_MAC_ADDR_REF((uint8_t *)offset));
|
||||
break;
|
||||
case CFG_IPV4_ITEM:
|
||||
cfg_nofl_debug("%pK %s %pI4",
|
||||
offset, meta->name,
|
||||
offset);
|
||||
break;
|
||||
case CFG_IPV6_ITEM:
|
||||
cfg_nofl_debug("%pK %s %pI6c",
|
||||
offset, meta->name,
|
||||
offset);
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
cfg_exit();
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static QDF_STATUS
|
||||
cfg_ini_config_print(struct wlan_objmgr_psoc *psoc, uint8_t *buf,
|
||||
ssize_t *plen, ssize_t buflen)
|
||||
{
|
||||
struct cfg_value_store *store;
|
||||
struct cfg_psoc_ctx *psoc_ctx;
|
||||
ssize_t len;
|
||||
ssize_t total_len = buflen;
|
||||
uint32_t i;
|
||||
void *offset;
|
||||
|
||||
cfg_enter();
|
||||
|
||||
psoc_ctx = cfg_psoc_get_ctx(psoc);
|
||||
if (!psoc_ctx)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
store = psoc_ctx->store;
|
||||
if (!store)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
for (i = 0; i < QDF_ARRAY_SIZE(cfg_meta_lookup_table); i++) {
|
||||
const struct cfg_meta *meta = &cfg_meta_lookup_table[i];
|
||||
|
||||
offset = cfg_value_ptr(store, meta);
|
||||
|
||||
switch (meta->cfg_type) {
|
||||
case CFG_INT_ITEM:
|
||||
len = qdf_scnprintf(buf, buflen, "%s %d\n", meta->name,
|
||||
*((int32_t *)offset));
|
||||
buf += len;
|
||||
buflen -= len;
|
||||
break;
|
||||
case CFG_UINT_ITEM:
|
||||
len = qdf_scnprintf(buf, buflen, "%s %d\n", meta->name,
|
||||
*((uint32_t *)offset));
|
||||
buf += len;
|
||||
buflen -= len;
|
||||
break;
|
||||
case CFG_BOOL_ITEM:
|
||||
len = qdf_scnprintf(buf, buflen, "%s %d\n", meta->name,
|
||||
*((bool *)offset));
|
||||
buf += len;
|
||||
buflen -= len;
|
||||
break;
|
||||
case CFG_STRING_ITEM:
|
||||
len = qdf_scnprintf(buf, buflen, "%s %s\n", meta->name,
|
||||
(char *)offset);
|
||||
buf += len;
|
||||
buflen -= len;
|
||||
break;
|
||||
case CFG_MAC_ITEM:
|
||||
len = qdf_scnprintf(buf, buflen,
|
||||
"%s " QDF_MAC_ADDR_FMT "\n",
|
||||
meta->name,
|
||||
QDF_MAC_ADDR_REF(
|
||||
(uint8_t *)offset));
|
||||
buf += len;
|
||||
buflen -= len;
|
||||
break;
|
||||
case CFG_IPV4_ITEM:
|
||||
len = qdf_scnprintf(buf, buflen, "%s %pI4\n",
|
||||
meta->name,
|
||||
offset);
|
||||
buf += len;
|
||||
buflen -= len;
|
||||
break;
|
||||
case CFG_IPV6_ITEM:
|
||||
len = qdf_scnprintf(buf, buflen, "%s %pI6c\n",
|
||||
meta->name,
|
||||
offset);
|
||||
buf += len;
|
||||
buflen -= len;
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
*plen = total_len - buflen;
|
||||
cfg_exit();
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS ucfg_cfg_store_print(struct wlan_objmgr_psoc *psoc)
|
||||
{
|
||||
return cfg_store_print(psoc);
|
||||
}
|
||||
|
||||
qdf_export_symbol(ucfg_cfg_store_print);
|
||||
|
||||
QDF_STATUS ucfg_cfg_ini_config_print(struct wlan_objmgr_psoc *psoc,
|
||||
uint8_t *buf, ssize_t *plen,
|
||||
ssize_t buflen)
|
||||
{
|
||||
return cfg_ini_config_print(psoc, buf, plen, buflen);
|
||||
}
|
||||
|
||||
static QDF_STATUS
|
||||
cfg_on_psoc_create(struct wlan_objmgr_psoc *psoc, void *context)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
struct cfg_psoc_ctx *psoc_ctx;
|
||||
|
||||
cfg_enter();
|
||||
|
||||
QDF_BUG(__cfg_global_store);
|
||||
if (!__cfg_global_store)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
psoc_ctx = qdf_mem_malloc(sizeof(*psoc_ctx));
|
||||
if (!psoc_ctx)
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
|
||||
qdf_atomic_inc(&__cfg_global_store->users);
|
||||
psoc_ctx->store = __cfg_global_store;
|
||||
|
||||
status = cfg_psoc_set_priv(psoc, psoc_ctx);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
goto put_store;
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
put_store:
|
||||
cfg_store_put(__cfg_global_store);
|
||||
qdf_mem_free(psoc_ctx);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static QDF_STATUS
|
||||
cfg_on_psoc_destroy(struct wlan_objmgr_psoc *psoc, void *context)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
struct cfg_psoc_ctx *psoc_ctx;
|
||||
|
||||
cfg_enter();
|
||||
|
||||
psoc_ctx = cfg_psoc_get_ctx(psoc);
|
||||
status = cfg_psoc_unset_priv(psoc, psoc_ctx);
|
||||
|
||||
cfg_store_put(psoc_ctx->store);
|
||||
qdf_mem_free(psoc_ctx);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
QDF_STATUS cfg_dispatcher_init(void)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
|
||||
cfg_enter();
|
||||
|
||||
QDF_BUG(!__cfg_is_init);
|
||||
if (__cfg_is_init)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
qdf_list_create(&__cfg_stores_list, 0);
|
||||
qdf_spinlock_create(&__cfg_stores_lock);
|
||||
|
||||
status = cfg_psoc_register_create(cfg_on_psoc_create);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
return status;
|
||||
|
||||
status = cfg_psoc_register_destroy(cfg_on_psoc_destroy);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
goto unreg_create;
|
||||
|
||||
__cfg_is_init = true;
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
unreg_create:
|
||||
cfg_assert_success(cfg_psoc_unregister_create(cfg_on_psoc_create));
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
QDF_STATUS cfg_dispatcher_deinit(void)
|
||||
{
|
||||
cfg_enter();
|
||||
|
||||
QDF_BUG(__cfg_is_init);
|
||||
if (!__cfg_is_init)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
__cfg_is_init = false;
|
||||
|
||||
cfg_assert_success(cfg_psoc_unregister_create(cfg_on_psoc_create));
|
||||
cfg_assert_success(cfg_psoc_unregister_destroy(cfg_on_psoc_destroy));
|
||||
|
||||
qdf_spin_lock_bh(&__cfg_stores_lock);
|
||||
QDF_BUG(qdf_list_empty(&__cfg_stores_list));
|
||||
qdf_spin_unlock_bh(&__cfg_stores_lock);
|
||||
|
||||
qdf_spinlock_destroy(&__cfg_stores_lock);
|
||||
qdf_list_destroy(&__cfg_stores_list);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS cfg_parse(const char *path)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
struct cfg_value_store *store;
|
||||
|
||||
cfg_enter();
|
||||
|
||||
if (!__cfg_global_store) {
|
||||
status = cfg_store_alloc(path, &store);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
return status;
|
||||
|
||||
cfg_store_set_defaults(store);
|
||||
status = cfg_ini_parse_to_store(path, store);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
goto free_store;
|
||||
__cfg_global_store = store;
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
store = __cfg_global_store;
|
||||
status = cfg_ini_parse_to_store(path, store);
|
||||
return status;
|
||||
|
||||
free_store:
|
||||
cfg_store_free(store);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
bool cfg_valid_ini_check(const char *path)
|
||||
{
|
||||
cfg_enter();
|
||||
|
||||
return qdf_valid_ini_check(path);
|
||||
}
|
||||
|
||||
void cfg_release(void)
|
||||
{
|
||||
cfg_enter();
|
||||
|
||||
QDF_BUG(__cfg_global_store);
|
||||
if (!__cfg_global_store)
|
||||
return;
|
||||
|
||||
cfg_store_put(__cfg_global_store);
|
||||
__cfg_global_store = NULL;
|
||||
}
|
||||
|
||||
QDF_STATUS cfg_psoc_parse(struct wlan_objmgr_psoc *psoc, const char *path)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
struct cfg_value_store *store;
|
||||
struct cfg_psoc_ctx *psoc_ctx;
|
||||
|
||||
cfg_enter();
|
||||
|
||||
QDF_BUG(__cfg_global_store);
|
||||
if (!__cfg_global_store)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
QDF_BUG(__cfg_is_init);
|
||||
if (!__cfg_is_init)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
QDF_BUG(psoc);
|
||||
if (!psoc)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
QDF_BUG(path);
|
||||
if (!path)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
psoc_ctx = cfg_psoc_get_ctx(psoc);
|
||||
|
||||
QDF_BUG(psoc_ctx->store == __cfg_global_store);
|
||||
if (psoc_ctx->store != __cfg_global_store)
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
/* check if @path has been parsed before */
|
||||
status = cfg_store_get(path, &store);
|
||||
if (QDF_IS_STATUS_ERROR(status)) {
|
||||
status = cfg_store_alloc(path, &store);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
return status;
|
||||
|
||||
/* inherit global configuration */
|
||||
qdf_mem_copy(&store->values, &__cfg_global_store->values,
|
||||
sizeof(store->values));
|
||||
|
||||
status = cfg_ini_parse_to_store(path, store);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
goto put_store;
|
||||
}
|
||||
|
||||
psoc_ctx->store = store;
|
||||
cfg_store_put(__cfg_global_store);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
put_store:
|
||||
cfg_store_put(store);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
qdf_export_symbol(cfg_psoc_parse);
|
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* DOC: contains coex public structure definitions
|
||||
*/
|
||||
|
||||
#ifndef _WLAN_COEX_PUBLIC_STRUCTS_H_
|
||||
#define _WLAN_COEX_PUBLIC_STRUCTS_H_
|
||||
|
||||
#ifdef WLAN_FEATURE_DBAM_CONFIG
|
||||
#define WLAN_SET_DBAM_CONFIG_TIMEOUT 5000
|
||||
|
||||
/**
|
||||
* enum coex_dbam_config_mode - dbam config mode
|
||||
* @COEX_DBAM_DISABLE: Disable DBAM
|
||||
* @COEX_DBAM_ENABLE: ENABLE DBAM opportunistically when internal
|
||||
* conditions are met.
|
||||
* @COEX_DBAM_FORCE_ENABLE: Enable DBAM forcefully
|
||||
*/
|
||||
enum coex_dbam_config_mode {
|
||||
COEX_DBAM_DISABLE = 0,
|
||||
COEX_DBAM_ENABLE = 1,
|
||||
COEX_DBAM_FORCE_ENABLE = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum coex_dbam_comp_status - dbam config response
|
||||
* @COEX_DBAM_COMP_SUCCESS: FW enabled/disabled DBAM mode succssfully
|
||||
* @COEX_DBAM_COMP_NOT_SUPPORT: DBAM mode is not supported
|
||||
* @COEX_DBAM_COMP_FAIL: FW failed to enable/disable DBAM mode
|
||||
*/
|
||||
enum coex_dbam_comp_status {
|
||||
COEX_DBAM_COMP_SUCCESS = 0,
|
||||
COEX_DBAM_COMP_NOT_SUPPORT = 1,
|
||||
COEX_DBAM_COMP_FAIL = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct coex_dbam_config_params - Coex DBAM config command params
|
||||
* @vdev_id: Virtual device identifier
|
||||
* @dbam_mode: DBAM configuration mode - coex_dbam_config_mode enum
|
||||
*/
|
||||
struct coex_dbam_config_params {
|
||||
uint32_t vdev_id;
|
||||
enum coex_dbam_config_mode dbam_mode;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct coex_dbam_config_resp - Coex DBAM config response
|
||||
* @dbam_resp: DBAM config request response - coex_dbam_comp_status enum
|
||||
*/
|
||||
struct coex_dbam_config_resp {
|
||||
enum coex_dbam_comp_status dbam_resp;
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif
|
@ -0,0 +1,227 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "dp_cal_client_api.h"
|
||||
#include "qdf_module.h"
|
||||
|
||||
/* dp_cal_client_attach - function to attach cal client timer
|
||||
* @cal_client_ctx: cal client timer context
|
||||
* @pdev: pdev handle
|
||||
* @osdev: device pointer
|
||||
* @dp_iterate_peer_list : function pointer to iterate and update peer stats
|
||||
*
|
||||
* return: void
|
||||
*/
|
||||
void dp_cal_client_attach(struct cdp_cal_client **cal_client_ctx,
|
||||
struct cdp_pdev *pdev,
|
||||
qdf_device_t osdev,
|
||||
void (*dp_iterate_peer_list)(struct cdp_pdev *))
|
||||
{
|
||||
struct cal_client *cal_cl;
|
||||
|
||||
*cal_client_ctx = qdf_mem_malloc(sizeof(struct cal_client));
|
||||
|
||||
if (!(*cal_client_ctx))
|
||||
return;
|
||||
|
||||
cal_cl = (struct cal_client *)(*cal_client_ctx);
|
||||
cal_cl->iterate_update_peer_list = dp_iterate_peer_list;
|
||||
cal_cl->pdev_hdl = pdev;
|
||||
|
||||
qdf_timer_init(osdev, &cal_cl->cal_client_timer,
|
||||
dp_cal_client_stats_timer_fn, *cal_client_ctx,
|
||||
QDF_TIMER_TYPE_WAKE_APPS);
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_cal_client_attach);
|
||||
|
||||
/* dp_cal_client_detach - detach cal client timer
|
||||
* @cal_client_ctx: cal client timer context
|
||||
*
|
||||
* return: void
|
||||
*/
|
||||
void dp_cal_client_detach(struct cdp_cal_client **cal_client_ctx)
|
||||
{
|
||||
struct cal_client *cal_cl;
|
||||
|
||||
if (*cal_client_ctx) {
|
||||
cal_cl = (struct cal_client *)*cal_client_ctx;
|
||||
|
||||
qdf_timer_stop(&cal_cl->cal_client_timer);
|
||||
qdf_timer_free(&cal_cl->cal_client_timer);
|
||||
qdf_mem_free(cal_cl);
|
||||
*cal_client_ctx = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_cal_client_detach);
|
||||
|
||||
/* dp_cal_client_timer_start- api to start cal client timer
|
||||
* @ctx: cal client timer ctx
|
||||
*
|
||||
* return: void
|
||||
*/
|
||||
void dp_cal_client_timer_start(void *ctx)
|
||||
{
|
||||
struct cal_client *cal_cl;
|
||||
|
||||
if (ctx) {
|
||||
cal_cl = (struct cal_client *)ctx;
|
||||
qdf_timer_start(&cal_cl->cal_client_timer, DP_CAL_CLIENT_TIME);
|
||||
}
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_cal_client_timer_start);
|
||||
|
||||
/* dp_cal_client_timer_stop- api to stop cal client timer
|
||||
* @ctx: cal client timer ctx
|
||||
*
|
||||
* return: void
|
||||
*/
|
||||
void dp_cal_client_timer_stop(void *ctx)
|
||||
{
|
||||
struct cal_client *cal_cl;
|
||||
|
||||
if (ctx) {
|
||||
cal_cl = (struct cal_client *)ctx;
|
||||
qdf_timer_sync_cancel(&cal_cl->cal_client_timer);
|
||||
qdf_timer_stop(&cal_cl->cal_client_timer);
|
||||
}
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_cal_client_timer_stop);
|
||||
|
||||
/* dp_cal_client_stats_timer_fn- function called on timer interval
|
||||
* @ctx: cal client timer ctx
|
||||
*
|
||||
* return: void
|
||||
*/
|
||||
void dp_cal_client_stats_timer_fn(void *ctx)
|
||||
{
|
||||
struct cal_client *cal_cl = (struct cal_client *)ctx;
|
||||
|
||||
if (!cal_cl)
|
||||
return;
|
||||
|
||||
cal_cl->iterate_update_peer_list(cal_cl->pdev_hdl);
|
||||
qdf_timer_mod(&cal_cl->cal_client_timer, DP_CAL_CLIENT_TIME);
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_cal_client_stats_timer_fn);
|
||||
|
||||
/*dp_cal_client_update_peer_stats - update peer stats in peer
|
||||
* @peer_stats: cdp peer stats pointer
|
||||
*
|
||||
* return: void
|
||||
*/
|
||||
void dp_cal_client_update_peer_stats(struct cdp_peer_stats *peer_stats)
|
||||
{
|
||||
uint32_t temp_rx_bytes = peer_stats->rx.to_stack.bytes;
|
||||
uint32_t temp_rx_data = peer_stats->rx.to_stack.num;
|
||||
uint32_t temp_tx_bytes = peer_stats->tx.tx_success.bytes;
|
||||
uint32_t temp_tx_data = peer_stats->tx.tx_success.num;
|
||||
uint32_t temp_tx_ucast_pkts = peer_stats->tx.ucast.num;
|
||||
|
||||
peer_stats->rx.rx_byte_rate = temp_rx_bytes -
|
||||
peer_stats->rx.rx_bytes_success_last;
|
||||
peer_stats->rx.rx_data_rate = temp_rx_data -
|
||||
peer_stats->rx.rx_data_success_last;
|
||||
peer_stats->tx.tx_byte_rate = temp_tx_bytes -
|
||||
peer_stats->tx.tx_bytes_success_last;
|
||||
peer_stats->tx.tx_data_rate = temp_tx_data -
|
||||
peer_stats->tx.tx_data_success_last;
|
||||
peer_stats->tx.tx_data_ucast_rate = temp_tx_ucast_pkts -
|
||||
peer_stats->tx.tx_data_ucast_last;
|
||||
|
||||
/* Check tx and rx packets in last one second, and increment
|
||||
* inactive time for peer
|
||||
*/
|
||||
if (peer_stats->tx.tx_data_rate || peer_stats->rx.rx_data_rate)
|
||||
peer_stats->tx.inactive_time = 0;
|
||||
else
|
||||
peer_stats->tx.inactive_time++;
|
||||
|
||||
peer_stats->rx.rx_bytes_success_last = temp_rx_bytes;
|
||||
peer_stats->rx.rx_data_success_last = temp_rx_data;
|
||||
peer_stats->tx.tx_bytes_success_last = temp_tx_bytes;
|
||||
peer_stats->tx.tx_data_success_last = temp_tx_data;
|
||||
peer_stats->tx.tx_data_ucast_last = temp_tx_ucast_pkts;
|
||||
|
||||
if (peer_stats->tx.tx_data_ucast_rate) {
|
||||
if (peer_stats->tx.tx_data_ucast_rate >
|
||||
peer_stats->tx.tx_data_rate)
|
||||
peer_stats->tx.last_per =
|
||||
((peer_stats->tx.tx_data_ucast_rate -
|
||||
peer_stats->tx.tx_data_rate) * 100) /
|
||||
peer_stats->tx.tx_data_ucast_rate;
|
||||
else
|
||||
peer_stats->tx.last_per = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_cal_client_update_peer_stats);
|
||||
|
||||
void dp_cal_client_update_peer_stats_wifi3(struct cdp_calibr_stats_intf *peer_stats_intf,
|
||||
struct cdp_calibr_stats *peer_calibr_stats)
|
||||
{
|
||||
uint32_t temp_rx_bytes = peer_stats_intf->to_stack.bytes;
|
||||
uint32_t temp_rx_data = peer_stats_intf->to_stack.num;
|
||||
uint32_t temp_tx_bytes = peer_stats_intf->tx_success.bytes;
|
||||
uint32_t temp_tx_data = peer_stats_intf->tx_success.num;
|
||||
uint32_t temp_tx_ucast_pkts = peer_stats_intf->tx_ucast.num;
|
||||
|
||||
peer_calibr_stats->rx.rx_byte_rate = temp_rx_bytes -
|
||||
peer_calibr_stats->rx.rx_bytes_success_last;
|
||||
peer_calibr_stats->rx.rx_data_rate = temp_rx_data -
|
||||
peer_calibr_stats->rx.rx_data_success_last;
|
||||
peer_calibr_stats->tx.tx_byte_rate = temp_tx_bytes -
|
||||
peer_calibr_stats->tx.tx_bytes_success_last;
|
||||
peer_calibr_stats->tx.tx_data_rate = temp_tx_data -
|
||||
peer_calibr_stats->tx.tx_data_success_last;
|
||||
peer_calibr_stats->tx.tx_data_ucast_rate = temp_tx_ucast_pkts -
|
||||
peer_calibr_stats->tx.tx_data_ucast_last;
|
||||
|
||||
/* Check tx and rx packets in last one second, and increment
|
||||
* inactive time for peer
|
||||
*/
|
||||
if (peer_calibr_stats->tx.tx_data_rate || peer_calibr_stats->rx.rx_data_rate)
|
||||
peer_calibr_stats->tx.inactive_time = 0;
|
||||
else
|
||||
peer_calibr_stats->tx.inactive_time++;
|
||||
|
||||
peer_calibr_stats->rx.rx_bytes_success_last = temp_rx_bytes;
|
||||
peer_calibr_stats->rx.rx_data_success_last = temp_rx_data;
|
||||
peer_calibr_stats->tx.tx_bytes_success_last = temp_tx_bytes;
|
||||
peer_calibr_stats->tx.tx_data_success_last = temp_tx_data;
|
||||
peer_calibr_stats->tx.tx_data_ucast_last = temp_tx_ucast_pkts;
|
||||
|
||||
if (peer_calibr_stats->tx.tx_data_ucast_rate) {
|
||||
if (peer_calibr_stats->tx.tx_data_ucast_rate >
|
||||
peer_calibr_stats->tx.tx_data_rate)
|
||||
peer_calibr_stats->tx.last_per =
|
||||
((peer_calibr_stats->tx.tx_data_ucast_rate -
|
||||
peer_calibr_stats->tx.tx_data_rate) * 100) /
|
||||
peer_calibr_stats->tx.tx_data_ucast_rate;
|
||||
else
|
||||
peer_calibr_stats->tx.last_per = 0;
|
||||
}
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_cal_client_update_peer_stats_wifi3);
|
7289
qcom/opensource/wlan/qca-wifi-host-cmn/dp/cmn_dp_api/dp_ratetable.c
Normal file
7289
qcom/opensource/wlan/qca-wifi-host-cmn/dp/cmn_dp_api/dp_ratetable.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,285 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DP_RATES_H_
|
||||
#define _DP_RATES_H_
|
||||
|
||||
#define CMN_DP_ASSERT(__bool)
|
||||
|
||||
/*
|
||||
* Modes Types
|
||||
*/
|
||||
enum CMN_MODE_TYPES {
|
||||
CMN_IEEE80211_MODE_INVALID = 0,
|
||||
CMN_IEEE80211_MODE_A,
|
||||
CMN_IEEE80211_MODE_B,
|
||||
CMN_IEEE80211_MODE_G,
|
||||
CMN_IEEE80211_MODE_TURBO,
|
||||
CMN_IEEE80211_MODE_NA,
|
||||
CMN_IEEE80211_MODE_NG,
|
||||
CMN_IEEE80211_MODE_N,
|
||||
CMN_IEEE80211_MODE_AC,
|
||||
CMN_IEEE80211_MODE_AXA,
|
||||
CMN_IEEE80211_MODE_AXG,
|
||||
CMN_IEEE80211_MODE_AX,
|
||||
#ifdef WLAN_FEATURE_11BE
|
||||
CMN_IEEE80211_MODE_BEA,
|
||||
CMN_IEEE80211_MODE_BEG,
|
||||
#endif
|
||||
CMN_IEEE80211_MODE_MAX
|
||||
};
|
||||
|
||||
#define NUM_SPATIAL_STREAMS 8
|
||||
#define MAX_SPATIAL_STREAMS_SUPPORTED_AT_160MHZ 4
|
||||
#define VHT_EXTRA_MCS_SUPPORT
|
||||
#define CONFIG_160MHZ_SUPPORT 1
|
||||
#define NUM_HT_MCS 8
|
||||
#define NUM_VHT_MCS 12
|
||||
|
||||
#define NUM_HE_MCS 14
|
||||
#ifdef WLAN_FEATURE_11BE
|
||||
#define NUM_EHT_MCS 16
|
||||
#endif
|
||||
|
||||
#define NUM_SPATIAL_STREAM 4
|
||||
#define NUM_SPATIAL_STREAMS 8
|
||||
#define WHAL_160MHZ_SUPPORT 1
|
||||
#define MAX_SPATIAL_STREAMS_SUPPORTED_AT_160MHZ 4
|
||||
#define RT_GET_RT(_rt) ((const struct DP_CMN_RATE_TABLE *)(_rt))
|
||||
#define RT_GET_INFO(_rt, _index) RT_GET_RT(_rt)->info[(_index)]
|
||||
#define RT_GET_RAW_KBPS(_rt, _index) \
|
||||
(RT_GET_INFO(_rt, (_index)).ratekbps)
|
||||
#define RT_GET_SGI_KBPS(_rt, _index) \
|
||||
(RT_GET_INFO(_rt, (_index)).ratekbpssgi)
|
||||
|
||||
#define HW_RATECODE_CCK_SHORT_PREAM_MASK 0x4
|
||||
#define RT_INVALID_INDEX (0xff)
|
||||
/* pow2 to optimize out * and / */
|
||||
#define DP_ATH_RATE_EP_MULTIPLIER BIT(7)
|
||||
#define DP_ATH_EP_MUL(a, b) ((a) * (b))
|
||||
#define DP_ATH_RATE_LPF_LEN 10 /* Low pass filter length
|
||||
* for averaging rates
|
||||
*/
|
||||
#define DUMMY_MARKER 0
|
||||
#define DP_ATH_RATE_IN(c) (DP_ATH_EP_MUL((c), DP_ATH_RATE_EP_MULTIPLIER))
|
||||
|
||||
static inline int dp_ath_rate_lpf(uint64_t _d, int _e)
|
||||
{
|
||||
_e = DP_ATH_RATE_IN((_e));
|
||||
return (((_d) != DUMMY_MARKER) ? ((((_d) << 3) + (_e) - (_d)) >> 3) :
|
||||
(_e));
|
||||
}
|
||||
|
||||
static inline int dp_ath_rate_out(uint64_t _i)
|
||||
{
|
||||
int _mul = DP_ATH_RATE_EP_MULTIPLIER;
|
||||
|
||||
return (((_i) != DUMMY_MARKER) ?
|
||||
((((_i) % (_mul)) >= ((_mul) / 2)) ?
|
||||
((_i) + ((_mul) - 1)) / (_mul) : (_i) / (_mul)) :
|
||||
DUMMY_MARKER);
|
||||
}
|
||||
|
||||
#define RXDESC_GET_DATA_LEN(rx_desc) \
|
||||
(txrx_pdev->htt_pdev->ar_rx_ops->msdu_desc_msdu_length(rx_desc))
|
||||
#define ASSEMBLE_HW_RATECODE(_rate, _nss, _pream) \
|
||||
(((_pream) << 6) | ((_nss) << 4) | (_rate))
|
||||
#define GET_HW_RATECODE_PREAM(_rcode) (((_rcode) >> 6) & 0x3)
|
||||
#define GET_HW_RATECODE_NSS(_rcode) (((_rcode) >> 4) & 0x3)
|
||||
#define GET_HW_RATECODE_RATE(_rcode) (((_rcode) >> 0) & 0xF)
|
||||
|
||||
#define VHT_INVALID_MCS (0xFF) /* Certain MCSs are not valid in VHT mode */
|
||||
#define VHT_INVALID_BCC_RATE 0
|
||||
#define NUM_HT_SPATIAL_STREAM 4
|
||||
|
||||
#define NUM_HT_RIX_PER_BW (NUM_HT_MCS * NUM_HT_SPATIAL_STREAM)
|
||||
#define NUM_VHT_RIX_PER_BW (NUM_VHT_MCS * NUM_SPATIAL_STREAMS)
|
||||
#define NUM_HE_RIX_PER_BW (NUM_HE_MCS * NUM_SPATIAL_STREAMS)
|
||||
|
||||
#define NUM_VHT_RIX_FOR_160MHZ (NUM_VHT_MCS * \
|
||||
MAX_SPATIAL_STREAMS_SUPPORTED_AT_160MHZ)
|
||||
#define NUM_HE_RIX_FOR_160MHZ (NUM_HE_MCS * \
|
||||
MAX_SPATIAL_STREAMS_SUPPORTED_AT_160MHZ)
|
||||
|
||||
#define CCK_RATE_TABLE_INDEX 0
|
||||
#define CCK_RATE_TABLE_END_INDEX 3
|
||||
#define CCK_RATE_11M_INDEX 0
|
||||
#define CCK_FALLBACK_MIN_RATE 0x3 /** 1 Mbps */
|
||||
#define CCK_FALLBACK_MAX_RATE 0x2 /** 2 Mbps */
|
||||
|
||||
#define OFDM_RATE_TABLE_INDEX 4
|
||||
#define OFDMA_RATE_54M_INDEX 8
|
||||
#define OFDMA_RATE_TABLE_END_INDEX 11
|
||||
|
||||
#define HT_20_RATE_TABLE_INDEX 12
|
||||
#define HT_40_RATE_TABLE_INDEX (HT_20_RATE_TABLE_INDEX + NUM_HT_RIX_PER_BW)
|
||||
|
||||
#define VHT_20_RATE_TABLE_INDEX (HT_40_RATE_TABLE_INDEX + NUM_HT_RIX_PER_BW)
|
||||
#define VHT_40_RATE_TABLE_INDEX (VHT_20_RATE_TABLE_INDEX + NUM_VHT_RIX_PER_BW)
|
||||
#define VHT_80_RATE_TABLE_INDEX (VHT_40_RATE_TABLE_INDEX + NUM_VHT_RIX_PER_BW)
|
||||
|
||||
#define VHT_160_RATE_TABLE_INDEX (VHT_80_RATE_TABLE_INDEX + NUM_VHT_RIX_PER_BW)
|
||||
#define VHT_LAST_RIX_PLUS_ONE (VHT_160_RATE_TABLE_INDEX + \
|
||||
NUM_VHT_RIX_FOR_160MHZ)
|
||||
|
||||
#define HE_20_RATE_TABLE_INDEX VHT_LAST_RIX_PLUS_ONE
|
||||
#define HE_40_RATE_TABLE_INDEX (HE_20_RATE_TABLE_INDEX + NUM_HE_RIX_PER_BW)
|
||||
#define HE_80_RATE_TABLE_INDEX (HE_40_RATE_TABLE_INDEX + NUM_HE_RIX_PER_BW)
|
||||
|
||||
#define HE_160_RATE_TABLE_INDEX (HE_80_RATE_TABLE_INDEX + NUM_HE_RIX_PER_BW)
|
||||
#define HE_LAST_RIX_PLUS_ONE (HE_160_RATE_TABLE_INDEX + NUM_HE_RIX_FOR_160MHZ)
|
||||
|
||||
#ifdef WLAN_FEATURE_11BE
|
||||
#define NUM_EHT_SPATIAL_STREAM 4
|
||||
#define NUM_EHT_RIX_PER_BW (NUM_EHT_MCS * NUM_EHT_SPATIAL_STREAM)
|
||||
|
||||
#define EHT_20_RATE_TABLE_INDEX HE_LAST_RIX_PLUS_ONE
|
||||
#define EHT_40_RATE_TABLE_INDEX (EHT_20_RATE_TABLE_INDEX + NUM_EHT_RIX_PER_BW)
|
||||
#define EHT_60_RATE_TABLE_INDEX (EHT_40_RATE_TABLE_INDEX + NUM_EHT_RIX_PER_BW)
|
||||
#define EHT_80_RATE_TABLE_INDEX (EHT_60_RATE_TABLE_INDEX + NUM_EHT_RIX_PER_BW)
|
||||
#define EHT_120_RATE_TABLE_INDEX (EHT_80_RATE_TABLE_INDEX + NUM_EHT_RIX_PER_BW)
|
||||
#define EHT_140_RATE_TABLE_INDEX (EHT_120_RATE_TABLE_INDEX + NUM_EHT_RIX_PER_BW)
|
||||
#define EHT_160_RATE_TABLE_INDEX (EHT_140_RATE_TABLE_INDEX + NUM_EHT_RIX_PER_BW)
|
||||
#define EHT_200_RATE_TABLE_INDEX (EHT_160_RATE_TABLE_INDEX + NUM_EHT_RIX_PER_BW)
|
||||
#define EHT_240_RATE_TABLE_INDEX (EHT_200_RATE_TABLE_INDEX + NUM_EHT_RIX_PER_BW)
|
||||
#define EHT_280_RATE_TABLE_INDEX (EHT_240_RATE_TABLE_INDEX + NUM_EHT_RIX_PER_BW)
|
||||
#define EHT_320_RATE_TABLE_INDEX (EHT_280_RATE_TABLE_INDEX + NUM_EHT_RIX_PER_BW)
|
||||
#define EHT_LAST_RIX_PLUS_ONE (EHT_320_RATE_TABLE_INDEX + NUM_EHT_RIX_PER_BW)
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_FEATURE_11BE
|
||||
#define DP_RATE_TABLE_SIZE EHT_LAST_RIX_PLUS_ONE
|
||||
#else
|
||||
#define DP_RATE_TABLE_SIZE HE_LAST_RIX_PLUS_ONE
|
||||
#endif
|
||||
|
||||
#define INVALID_RATE_ERR -1
|
||||
#define NUM_LEGACY_MCS 1
|
||||
|
||||
/*
|
||||
* The order of the rate types are jumbled below since the current code
|
||||
* implementation is mapped in such way already.
|
||||
*
|
||||
* @DP_HT_RATE: HT Ratetype
|
||||
* @DP_VHT_RATE: VHT Ratetype
|
||||
* @DP_11B_CCK_RATE: 11B CCK Ratetype
|
||||
* @DP_11A_OFDM_RATE: 11A OFDM Ratetype
|
||||
* @DP_11G_CCK_OFDM_RATE: 11G CCK + OFDM Ratetype
|
||||
* @DP_HE_RATE: HE Ratetype
|
||||
*/
|
||||
enum DP_CMN_RATE_TYPE {
|
||||
DP_HT_RATE = 2,
|
||||
DP_VHT_RATE,
|
||||
DP_11B_CCK_RATE,
|
||||
DP_11A_OFDM_RATE,
|
||||
DP_11G_CCK_OFDM_RATE,
|
||||
DP_HE_RATE
|
||||
};
|
||||
|
||||
#define DP_RATEKBPS_SGI(i) (dp_11abgnratetable.info[i].ratekbpssgi)
|
||||
#define DP_RATEKBPS(i) (dp_11abgnratetable.info[i].ratekbps)
|
||||
#define RATE_ROUNDOUT(rate) (((rate) / 1000) * 1000)
|
||||
|
||||
/* The following would span more than one octet
|
||||
* when 160MHz BW defined for VHT
|
||||
* Also it's important to maintain the ordering of
|
||||
* this enum else it would break other rate adaptation functions.
|
||||
*/
|
||||
enum DP_CMN_MODULATION_TYPE {
|
||||
DP_CMN_MOD_IEEE80211_T_DS, /* direct sequence spread spectrum */
|
||||
DP_CMN_MOD_IEEE80211_T_OFDM, /* frequency division multiplexing */
|
||||
DP_CMN_MOD_IEEE80211_T_HT_20,
|
||||
DP_CMN_MOD_IEEE80211_T_HT_40,
|
||||
DP_CMN_MOD_IEEE80211_T_VHT_20,
|
||||
DP_CMN_MOD_IEEE80211_T_VHT_40,
|
||||
DP_CMN_MOD_IEEE80211_T_VHT_80,
|
||||
DP_CMN_MOD_IEEE80211_T_VHT_160,
|
||||
DP_CMN_MOD_IEEE80211_T_HE_20, /* 11AX support enabled */
|
||||
DP_CMN_MOD_IEEE80211_T_HE_40,
|
||||
DP_CMN_MOD_IEEE80211_T_HE_80,
|
||||
DP_CMN_MOD_IEEE80211_T_HE_160,
|
||||
#ifdef WLAN_FEATURE_11BE
|
||||
DP_CMN_MOD_IEEE80211_T_EHT_20,
|
||||
DP_CMN_MOD_IEEE80211_T_EHT_40,
|
||||
DP_CMN_MOD_IEEE80211_T_EHT_60,
|
||||
DP_CMN_MOD_IEEE80211_T_EHT_80,
|
||||
DP_CMN_MOD_IEEE80211_T_EHT_120,
|
||||
DP_CMN_MOD_IEEE80211_T_EHT_140,
|
||||
DP_CMN_MOD_IEEE80211_T_EHT_160,
|
||||
DP_CMN_MOD_IEEE80211_T_EHT_200,
|
||||
DP_CMN_MOD_IEEE80211_T_EHT_240,
|
||||
DP_CMN_MOD_IEEE80211_T_EHT_280,
|
||||
DP_CMN_MOD_IEEE80211_T_EHT_320,
|
||||
#endif
|
||||
DP_CMN_MOD_IEEE80211_T_MAX_PHY
|
||||
};
|
||||
|
||||
/* more common nomenclature */
|
||||
#define DP_CMN_MOD_IEEE80211_T_CCK DP_CMN_MOD_IEEE80211_T_DS
|
||||
|
||||
enum HW_RATECODE_PREAM_TYPE {
|
||||
HW_RATECODE_PREAM_OFDM,
|
||||
HW_RATECODE_PREAM_CCK,
|
||||
HW_RATECODE_PREAM_HT,
|
||||
HW_RATECODE_PREAM_VHT,
|
||||
HW_RATECODE_PREAM_HE,
|
||||
#ifdef WLAN_FEATURE_11BE
|
||||
HW_RATECODE_PREAM_EHT,
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef WLAN_FEATURE_11BE
|
||||
enum BW_TYPES_FP {
|
||||
BW_20MHZ_F = 0,
|
||||
BW_40MHZ_F,
|
||||
BW_60MHZ_P,
|
||||
BW_80MHZ_F,
|
||||
BW_120MHZ_P,
|
||||
BW_140MHZ_P,
|
||||
BW_160MHZ_F,
|
||||
BW_200MHZ_P,
|
||||
BW_240MHZ_P,
|
||||
BW_280MHZ_P,
|
||||
BW_320MHZ_F,
|
||||
BW_FP_CNT,
|
||||
BW_FP_LAST = BW_320MHZ_F,
|
||||
};
|
||||
#endif
|
||||
|
||||
enum DP_CMN_MODULATION_TYPE dp_getmodulation(uint16_t pream_type,
|
||||
uint8_t width,
|
||||
uint8_t punc_mode);
|
||||
|
||||
uint32_t
|
||||
dp_getrateindex(uint32_t gi, uint16_t mcs, uint8_t nss, uint8_t preamble,
|
||||
uint8_t bw, uint8_t punc_bw, uint32_t *rix, uint16_t *ratecode);
|
||||
|
||||
int dp_rate_idx_to_kbps(uint8_t rate_idx, uint8_t gintval);
|
||||
|
||||
#if ALL_POSSIBLE_RATES_SUPPORTED
|
||||
int dp_get_supported_rates(int mode, int shortgi, int **rates);
|
||||
int dp_get_kbps_to_mcs(int kbps_rate, int shortgi, int htflag);
|
||||
#else
|
||||
int dp_get_supported_rates(int mode, int shortgi, int nss,
|
||||
int ch_width, int **rates);
|
||||
int dp_get_kbps_to_mcs(int kbps_rate, int shortgi, int htflag,
|
||||
int nss, int ch_width);
|
||||
#endif
|
||||
|
||||
#endif /*_DP_RATES_H_*/
|
118
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_bus.h
Normal file
118
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_bus.h
Normal file
@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017, 2019-2020 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_bus.h
|
||||
* Define the host data path bus related functions
|
||||
*/
|
||||
#ifndef _CDP_TXRX_BUS_H_
|
||||
#define _CDP_TXRX_BUS_H_
|
||||
|
||||
/**
|
||||
* cdp_bus_suspend() - suspend bus
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: id of dp pdev handle
|
||||
*
|
||||
* suspend bus
|
||||
*
|
||||
* return QDF_STATUS_SUCCESS suspend is not implemented or suspend done
|
||||
*/
|
||||
static inline QDF_STATUS cdp_bus_suspend(ol_txrx_soc_handle soc,
|
||||
uint8_t pdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->bus_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (soc->ops->bus_ops->bus_suspend)
|
||||
return soc->ops->bus_ops->bus_suspend(soc, pdev_id);
|
||||
return QDF_STATUS_E_NOSUPPORT;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_bus_resume() - resume bus
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: id of dp pdev handle
|
||||
*
|
||||
* resume bus
|
||||
*
|
||||
* return QDF_STATUS_SUCCESS resume is not implemented or suspend done
|
||||
*/
|
||||
static inline QDF_STATUS cdp_bus_resume(ol_txrx_soc_handle soc,
|
||||
uint8_t pdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->bus_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (soc->ops->bus_ops->bus_resume)
|
||||
return soc->ops->bus_ops->bus_resume(soc, pdev_id);
|
||||
return QDF_STATUS_E_NOSUPPORT;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_process_wow_ack_rsp() - Process wow ack response
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: id of dp pdev handle
|
||||
*
|
||||
* Do any required data path operations for target wow ack
|
||||
* suspend response.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline void cdp_process_wow_ack_rsp(ol_txrx_soc_handle soc,
|
||||
uint8_t pdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->bus_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->bus_ops->process_wow_ack_rsp)
|
||||
return soc->ops->bus_ops->process_wow_ack_rsp(soc, pdev_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_process_target_suspend_req() - Process target suspend request
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: id of dp pdev handle
|
||||
*
|
||||
* Complete the datapath specific work before target suspend
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline void cdp_process_target_suspend_req(ol_txrx_soc_handle soc,
|
||||
uint8_t pdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->bus_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->bus_ops->process_target_suspend_req)
|
||||
return soc->ops->bus_ops->process_target_suspend_req(soc,
|
||||
pdev_id);
|
||||
}
|
||||
#endif /* _CDP_TXRX_BUS_H_ */
|
408
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_cfg.h
Normal file
408
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_cfg.h
Normal file
@ -0,0 +1,408 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2019,2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_cfg.h
|
||||
* Define the host data path configuration API functions
|
||||
*/
|
||||
#ifndef _CDP_TXRX_CFG_H_
|
||||
#define _CDP_TXRX_CFG_H_
|
||||
#include "cdp_txrx_handle.h"
|
||||
#include <cdp_txrx_cmn.h>
|
||||
|
||||
/**
|
||||
* cdp_cfg_set_rx_fwd_disabled() - enable/disable rx forwarding
|
||||
* @soc: data path soc handle
|
||||
* @cfg_pdev: data path device instance
|
||||
* @disable_rx_fwd: enable or disable rx forwarding
|
||||
*
|
||||
* enable/disable rx forwarding
|
||||
*
|
||||
* return NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_cfg_set_rx_fwd_disabled(ol_txrx_soc_handle soc, struct cdp_cfg *cfg_pdev,
|
||||
uint8_t disable_rx_fwd)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->set_cfg_rx_fwd_disabled)
|
||||
return;
|
||||
|
||||
soc->ops->cfg_ops->set_cfg_rx_fwd_disabled(cfg_pdev,
|
||||
disable_rx_fwd);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_cfg_set_packet_log_enabled() - enable/disable packet log
|
||||
* @soc: data path soc handle
|
||||
* @cfg_pdev: data path device instance
|
||||
* @val: enable or disable packet log
|
||||
*
|
||||
* packet log enable or disable
|
||||
*
|
||||
* return NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_cfg_set_packet_log_enabled(ol_txrx_soc_handle soc,
|
||||
struct cdp_cfg *cfg_pdev, uint8_t val)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->set_cfg_packet_log_enabled)
|
||||
return;
|
||||
|
||||
soc->ops->cfg_ops->set_cfg_packet_log_enabled(cfg_pdev,
|
||||
val);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_cfg_attach() - attach config module
|
||||
* @soc: data path soc handle
|
||||
* @osdev: os instance
|
||||
* @cfg_param: configuration parameter should be propagated
|
||||
*
|
||||
* Allocate configuration module instance, and propagate configuration values
|
||||
*
|
||||
* return soc configuration module instance
|
||||
*/
|
||||
static inline struct cdp_cfg
|
||||
*cdp_cfg_attach(ol_txrx_soc_handle soc,
|
||||
qdf_device_t osdev, void *cfg_param)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->cfg_attach)
|
||||
return NULL;
|
||||
|
||||
return soc->ops->cfg_ops->cfg_attach(osdev, cfg_param);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_cfg_vdev_rx_set_intrabss_fwd() - enable/disable intra bass forwarding
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: virtual interface id
|
||||
* @val: enable or disable intra bss forwarding
|
||||
*
|
||||
* ap isolate, do not forward intra bss traffic
|
||||
*
|
||||
* return NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_cfg_vdev_rx_set_intrabss_fwd(ol_txrx_soc_handle soc,
|
||||
uint8_t vdev_id, bool val)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->vdev_rx_set_intrabss_fwd)
|
||||
return;
|
||||
|
||||
soc->ops->cfg_ops->vdev_rx_set_intrabss_fwd(soc, vdev_id, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_cfg_is_rx_fwd_disabled() - get vdev rx forward
|
||||
* @soc: data path soc handle
|
||||
* @vdev: virtual interface instance
|
||||
*
|
||||
* Return rx forward feature enable status
|
||||
*
|
||||
* return 1 enabled
|
||||
* 0 disabled
|
||||
*/
|
||||
static inline uint8_t
|
||||
cdp_cfg_is_rx_fwd_disabled(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->is_rx_fwd_disabled)
|
||||
return 0;
|
||||
|
||||
return soc->ops->cfg_ops->is_rx_fwd_disabled(vdev);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_cfg_tx_set_is_mgmt_over_wmi_enabled() - mgmt tx over wmi enable/disable
|
||||
* @soc: data path soc handle
|
||||
* @value: feature enable or disable
|
||||
*
|
||||
* Enable or disable management packet TX over WMI feature
|
||||
*
|
||||
* return None
|
||||
*/
|
||||
static inline void
|
||||
cdp_cfg_tx_set_is_mgmt_over_wmi_enabled(ol_txrx_soc_handle soc,
|
||||
uint8_t value)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->tx_set_is_mgmt_over_wmi_enabled)
|
||||
return;
|
||||
|
||||
soc->ops->cfg_ops->tx_set_is_mgmt_over_wmi_enabled(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_cfg_is_high_latency() - query data path is in high or low latency
|
||||
* @soc: data path soc handle
|
||||
* @cfg_pdev: data path device instance
|
||||
*
|
||||
* query data path is in high or low latency
|
||||
*
|
||||
* return 1 high latency data path, usb or sdio
|
||||
* 0 low latency data path
|
||||
*/
|
||||
static inline int
|
||||
cdp_cfg_is_high_latency(ol_txrx_soc_handle soc, struct cdp_cfg *cfg_pdev)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->is_high_latency)
|
||||
return 0;
|
||||
|
||||
return soc->ops->cfg_ops->is_high_latency(cfg_pdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_cfg_set_flow_control_parameters() - set flow control params
|
||||
* @soc: data path soc handle
|
||||
* @cfg_pdev: dp config module instance
|
||||
* @param: parameters should set
|
||||
*
|
||||
* set flow control params
|
||||
*
|
||||
* return None
|
||||
*/
|
||||
static inline void
|
||||
cdp_cfg_set_flow_control_parameters(ol_txrx_soc_handle soc,
|
||||
struct cdp_cfg *cfg_pdev, void *param)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->set_flow_control_parameters)
|
||||
return;
|
||||
|
||||
soc->ops->cfg_ops->set_flow_control_parameters(cfg_pdev,
|
||||
param);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_cfg_set_flow_steering - Set Rx flow steering config based on CFG ini
|
||||
* config.
|
||||
* @soc: data path soc handle
|
||||
* @cfg_pdev: handle to the physical device
|
||||
* @val: 0 - disable, 1 - enable
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline void cdp_cfg_set_flow_steering(ol_txrx_soc_handle soc,
|
||||
struct cdp_cfg *cfg_pdev, uint8_t val)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->set_flow_steering)
|
||||
return;
|
||||
|
||||
soc->ops->cfg_ops->set_flow_steering(cfg_pdev, val);
|
||||
}
|
||||
|
||||
static inline void cdp_cfg_get_max_peer_id(ol_txrx_soc_handle soc,
|
||||
struct cdp_cfg *cfg_pdev)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_cfg_set_ptp_rx_opt_enabled() - enable/disable ptp rx timestamping
|
||||
* @soc: data path soc handle
|
||||
* @cfg_pdev: data path device instance
|
||||
* @val: enable or disable packet log
|
||||
*
|
||||
* ptp rx timestamping enable or disable
|
||||
*
|
||||
* return NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_cfg_set_ptp_rx_opt_enabled(ol_txrx_soc_handle soc,
|
||||
struct cdp_cfg *cfg_pdev, uint8_t val)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->set_ptp_rx_opt_enabled)
|
||||
return;
|
||||
|
||||
soc->ops->cfg_ops->set_ptp_rx_opt_enabled(cfg_pdev, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_cfg_set_new_htt_msg_format() - set htt h2t msg feature
|
||||
* @soc: datapath soc handle
|
||||
* @val: enable or disable new htt h2t msg feature
|
||||
*
|
||||
* Enable whether htt h2t message length includes htc header length
|
||||
*
|
||||
* return NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_cfg_set_new_htt_msg_format(ol_txrx_soc_handle soc,
|
||||
uint8_t val)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->set_new_htt_msg_format)
|
||||
return;
|
||||
|
||||
soc->ops->cfg_ops->set_new_htt_msg_format(val);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_cfg_set_peer_unmap_conf_support() - set peer unmap conf feature
|
||||
* @soc: datapath soc handle
|
||||
* @val: enable or disable peer unmap conf feature
|
||||
*
|
||||
* Set if peer unmap confirmation feature is supported by both FW and in INI
|
||||
*
|
||||
* return NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_cfg_set_peer_unmap_conf_support(ol_txrx_soc_handle soc, bool val)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->set_peer_unmap_conf_support)
|
||||
return;
|
||||
|
||||
soc->ops->cfg_ops->set_peer_unmap_conf_support(val);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_cfg_get_peer_unmap_conf_support() - check peer unmap conf feature
|
||||
* @soc: datapath soc handle
|
||||
*
|
||||
* Check if peer unmap confirmation feature is enabled
|
||||
*
|
||||
* return true is peer unmap confirmation feature is enabled else false
|
||||
*/
|
||||
static inline bool
|
||||
cdp_cfg_get_peer_unmap_conf_support(ol_txrx_soc_handle soc)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->get_peer_unmap_conf_support)
|
||||
return false;
|
||||
|
||||
return soc->ops->cfg_ops->get_peer_unmap_conf_support();
|
||||
}
|
||||
|
||||
static inline void
|
||||
cdp_cfg_set_tx_compl_tsf64(ol_txrx_soc_handle soc,
|
||||
uint8_t val)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_debug("invalid instance");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->set_tx_compl_tsf64)
|
||||
return;
|
||||
|
||||
soc->ops->cfg_ops->set_tx_compl_tsf64(val);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
cdp_cfg_get_tx_compl_tsf64(ol_txrx_soc_handle soc)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_debug("invalid instance");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!soc->ops->cfg_ops ||
|
||||
!soc->ops->cfg_ops->get_tx_compl_tsf64)
|
||||
return false;
|
||||
|
||||
return soc->ops->cfg_ops->get_tx_compl_tsf64();
|
||||
}
|
||||
|
||||
#endif /* _CDP_TXRX_CFG_H_ */
|
3533
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn.h
Normal file
3533
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn.h
Normal file
File diff suppressed because it is too large
Load Diff
204
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_reg.h
Normal file
204
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_reg.h
Normal file
@ -0,0 +1,204 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file cdp_txrx_cmn.h
|
||||
* @brief Define the host data path converged API functions
|
||||
* called by the host control SW and the OS interface module
|
||||
*/
|
||||
#ifndef _CDP_TXRX_CMN_REG_H_
|
||||
#define _CDP_TXRX_CMN_REG_H_
|
||||
|
||||
#include "hif_main.h"
|
||||
#include "cdp_txrx_cmn_struct.h"
|
||||
|
||||
#define MOB_DRV_LEGACY_DP 0xdeed
|
||||
/* Lithium device IDs */
|
||||
#define LITHIUM_DP 0xfffd
|
||||
/* Beryllium device IDs */
|
||||
#define BERYLLIUM_DP 0xaffe
|
||||
|
||||
/* RHINE device IDs */
|
||||
#define RHINE_DP 0xbff0
|
||||
/* Use device IDs for attach in future */
|
||||
|
||||
/* enum cdp_arch_type - enum for DP arch type
|
||||
* CDP_ARCH_TYPE_LI - for lithium
|
||||
* CDP_ARCH_TYPE_BE - for beryllium
|
||||
* CDP_ARCH_TYPE_NONE - not supported
|
||||
*/
|
||||
enum cdp_arch_type {
|
||||
CDP_ARCH_TYPE_NONE = -1,
|
||||
CDP_ARCH_TYPE_LI,
|
||||
CDP_ARCH_TYPE_BE,
|
||||
CDP_ARCH_TYPE_RH,
|
||||
};
|
||||
|
||||
#if defined(DP_TXRX_SOC_ATTACH)
|
||||
static inline ol_txrx_soc_handle
|
||||
ol_txrx_soc_attach(void *scn_handle, struct ol_if_ops *dp_ol_if_ops)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#else
|
||||
ol_txrx_soc_handle
|
||||
ol_txrx_soc_attach(void *scn_handle, struct ol_if_ops *dp_ol_if_ops);
|
||||
#endif
|
||||
|
||||
#if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
|
||||
defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
|
||||
defined(QCA_WIFI_QCA5332)
|
||||
|
||||
/**
|
||||
* dp_soc_attach_wifi3() - Attach txrx SOC
|
||||
* @ctrl_psoc: Opaque SOC handle from Ctrl plane
|
||||
* @params: soc attach params
|
||||
*
|
||||
* Return: DP SOC handle on success, NULL on failure
|
||||
*/
|
||||
struct cdp_soc_t *
|
||||
dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
|
||||
struct cdp_soc_attach_params *params);
|
||||
|
||||
/**
|
||||
* dp_soc_init_wifi3() - Initialize txrx SOC
|
||||
* @soc: Opaque DP SOC handle
|
||||
* @ctrl_psoc: Opaque SOC handle from control plane
|
||||
* @hif_handle: Opaque HIF handle
|
||||
* @htc_handle: Opaque HTC handle
|
||||
* @qdf_osdev: QDF device
|
||||
* @ol_ops: Offload Operations
|
||||
* @device_id: Device ID
|
||||
*
|
||||
* Return: DP SOC handle on success, NULL on failure
|
||||
*/
|
||||
void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
|
||||
struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
|
||||
struct hif_opaque_softc *hif_handle,
|
||||
HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
|
||||
struct ol_if_ops *ol_ops, uint16_t device_id);
|
||||
#else
|
||||
static inline struct cdp_soc_t *
|
||||
dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
|
||||
struct cdp_soc_attach_params *params)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
|
||||
struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
|
||||
struct hif_opaque_softc *hif_handle,
|
||||
HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
|
||||
struct ol_if_ops *ol_ops, uint16_t device_id)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* QCA_WIFI_QCA8074 */
|
||||
|
||||
static inline int cdp_get_arch_type_from_devid(uint16_t devid)
|
||||
{
|
||||
switch (devid) {
|
||||
case LITHIUM_DP: /*FIXME Add lithium device IDs */
|
||||
case QCA8074_DEVICE_ID: /* Hawekeye */
|
||||
case QCA8074V2_DEVICE_ID: /* Hawekeye V2*/
|
||||
case QCA9574_DEVICE_ID:
|
||||
case QCA5018_DEVICE_ID:
|
||||
case QCA6290_DEVICE_ID:
|
||||
case QCN9000_DEVICE_ID:
|
||||
case QCN6122_DEVICE_ID:
|
||||
case QCN9160_DEVICE_ID:
|
||||
case QCA6390_DEVICE_ID:
|
||||
case QCA6490_DEVICE_ID:
|
||||
case QCA6750_DEVICE_ID:
|
||||
case QCA6390_EMULATION_DEVICE_ID:
|
||||
case RUMIM2M_DEVICE_ID_NODE0: /*lithium emulation */
|
||||
case RUMIM2M_DEVICE_ID_NODE1: /*lithium emulation */
|
||||
case RUMIM2M_DEVICE_ID_NODE2: /*lithium emulation */
|
||||
case RUMIM2M_DEVICE_ID_NODE3: /*lithium emulation */
|
||||
case RUMIM2M_DEVICE_ID_NODE4: /*lithium emulation */
|
||||
case RUMIM2M_DEVICE_ID_NODE5: /*lithium emulation */
|
||||
return CDP_ARCH_TYPE_LI;
|
||||
case BERYLLIUM_DP:
|
||||
case KIWI_DEVICE_ID:
|
||||
case QCN9224_DEVICE_ID:
|
||||
case QCA5332_DEVICE_ID:
|
||||
case MANGO_DEVICE_ID:
|
||||
case PEACH_DEVICE_ID:
|
||||
case QCN6432_DEVICE_ID:
|
||||
return CDP_ARCH_TYPE_BE;
|
||||
case RHINE_DP:
|
||||
return CDP_ARCH_TYPE_RH;
|
||||
default:
|
||||
return CDP_ARCH_TYPE_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
ol_txrx_soc_handle cdp_soc_attach(u_int16_t devid,
|
||||
struct hif_opaque_softc *hif_handle,
|
||||
struct cdp_ctrl_objmgr_psoc *psoc,
|
||||
HTC_HANDLE htc_handle,
|
||||
qdf_device_t qdf_dev,
|
||||
struct ol_if_ops *dp_ol_if_ops)
|
||||
{
|
||||
struct cdp_soc_attach_params params = {0};
|
||||
|
||||
params.hif_handle = hif_handle;
|
||||
params.device_id = devid;
|
||||
params.htc_handle = htc_handle;
|
||||
params.qdf_osdev = qdf_dev;
|
||||
params.ol_ops = dp_ol_if_ops;
|
||||
|
||||
switch (devid) {
|
||||
case LITHIUM_DP: /*FIXME Add lithium device IDs */
|
||||
case BERYLLIUM_DP:
|
||||
case RHINE_DP:
|
||||
case QCA8074_DEVICE_ID: /* Hawekeye */
|
||||
case QCA8074V2_DEVICE_ID: /* Hawekeye V2*/
|
||||
case QCA5018_DEVICE_ID:
|
||||
case QCA6290_DEVICE_ID:
|
||||
case QCN9000_DEVICE_ID:
|
||||
case QCN6122_DEVICE_ID:
|
||||
case QCN9160_DEVICE_ID:
|
||||
case QCN6432_DEVICE_ID:
|
||||
case QCA6390_DEVICE_ID:
|
||||
case QCA6490_DEVICE_ID:
|
||||
case QCA6750_DEVICE_ID:
|
||||
case QCA6390_EMULATION_DEVICE_ID:
|
||||
case RUMIM2M_DEVICE_ID_NODE0: /*lithium emulation */
|
||||
case RUMIM2M_DEVICE_ID_NODE1: /*lithium emulation */
|
||||
case RUMIM2M_DEVICE_ID_NODE2: /*lithium emulation */
|
||||
case RUMIM2M_DEVICE_ID_NODE3: /*lithium emulation */
|
||||
case RUMIM2M_DEVICE_ID_NODE4: /*lithium emulation */
|
||||
case RUMIM2M_DEVICE_ID_NODE5: /*lithium emulation */
|
||||
case KIWI_DEVICE_ID:
|
||||
case QCN9224_DEVICE_ID:
|
||||
case MANGO_DEVICE_ID:
|
||||
case PEACH_DEVICE_ID:
|
||||
case QCA5332_DEVICE_ID:
|
||||
return dp_soc_attach_wifi3(psoc, ¶ms);
|
||||
break;
|
||||
default:
|
||||
return ol_txrx_soc_attach(psoc, dp_ol_if_ops);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /*_CDP_TXRX_CMN_REG_H_ */
|
3377
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_struct.h
Normal file
3377
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_struct.h
Normal file
File diff suppressed because it is too large
Load Diff
1480
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl.h
Normal file
1480
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2016,2018-2019 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file cdp_txrx_ctrl.h
|
||||
* @brief Define the host data path control API functions
|
||||
* called by the host control SW and the OS interface module
|
||||
*/
|
||||
|
||||
#ifndef _CDP_TXRX_CTRL_DEF_H_
|
||||
#define _CDP_TXRX_CTRL_DEF_H_
|
||||
/* TODO: adf need to be replaced with qdf */
|
||||
/*
|
||||
* Cleanups -- Might need cleanup
|
||||
*/
|
||||
#if !QCA_OL_TX_PDEV_LOCK && QCA_NSS_PLATFORM || \
|
||||
(defined QCA_PARTNER_PLATFORM && QCA_PARTNER_SUPPORT_FAST_TX)
|
||||
#define VAP_TX_SPIN_LOCK(_x) spin_lock(_x)
|
||||
#define VAP_TX_SPIN_UNLOCK(_x) spin_unlock(_x)
|
||||
#else /* QCA_OL_TX_PDEV_LOCK */
|
||||
#define VAP_TX_SPIN_LOCK(_x)
|
||||
#define VAP_TX_SPIN_UNLOCK(_x)
|
||||
#endif /* QCA_OL_TX_PDEV_LOCK */
|
||||
|
||||
#if QCA_OL_TX_PDEV_LOCK
|
||||
void ol_ll_pdev_tx_lock(void *);
|
||||
void ol_ll_pdev_tx_unlock(void *);
|
||||
#define OL_TX_LOCK(_x) ol_ll_pdev_tx_lock(_x)
|
||||
#define OL_TX_UNLOCK(_x) ol_ll_pdev_tx_unlock(_x)
|
||||
|
||||
#define OL_TX_PDEV_LOCK(_x) qdf_spin_lock_bh(_x)
|
||||
#define OL_TX_PDEV_UNLOCK(_x) qdf_spin_unlock_bh(_x)
|
||||
#else
|
||||
#define OL_TX_PDEV_LOCK(_x)
|
||||
#define OL_TX_PDEV_UNLOCK(_x)
|
||||
|
||||
#define OL_TX_LOCK(_x)
|
||||
#define OL_TX_UNLOCK(_x)
|
||||
#endif /* QCA_OL_TX_PDEV_LOCK */
|
||||
|
||||
#if !QCA_OL_TX_PDEV_LOCK
|
||||
#define OL_TX_FLOW_CTRL_LOCK(_x) qdf_spin_lock_bh(_x)
|
||||
#define OL_TX_FLOW_CTRL_UNLOCK(_x) qdf_spin_unlock_bh(_x)
|
||||
|
||||
#define OL_TX_DESC_LOCK(_x) qdf_spin_lock_bh(_x)
|
||||
#define OL_TX_DESC_UNLOCK(_x) qdf_spin_unlock_bh(_x)
|
||||
|
||||
#define OSIF_VAP_TX_LOCK(_y, _x) spin_lock(&((_x)->tx_lock))
|
||||
#define OSIF_VAP_TX_UNLOCK(_y, _x) spin_unlock(&((_x)->tx_lock))
|
||||
|
||||
#define OL_TX_PEER_LOCK(_x, _id) qdf_spin_lock_bh(&((_x)->peer_lock[_id]))
|
||||
#define OL_TX_PEER_UNLOCK(_x, _id) qdf_spin_unlock_bh(&((_x)->peer_lock[_id]))
|
||||
|
||||
#define OL_TX_PEER_UPDATE_LOCK(_x, _id) \
|
||||
qdf_spin_lock_bh(&((_x)->peer_lock[_id]))
|
||||
#define OL_TX_PEER_UPDATE_UNLOCK(_x, _id) \
|
||||
qdf_spin_unlock_bh(&((_x)->peer_lock[_id]))
|
||||
|
||||
#else
|
||||
#define OSIF_VAP_TX_LOCK(_y, _x) cdp_vdev_tx_lock( \
|
||||
_y, wlan_vdev_get_id((_x)->ctrl_vdev))
|
||||
#define OSIF_VAP_TX_UNLOCK(_y, _x) cdp_vdev_tx_unlock( \
|
||||
_y, wlan_vdev_get_id((_x)->ctrl_vdev))
|
||||
|
||||
#define OL_TX_FLOW_CTRL_LOCK(_x)
|
||||
#define OL_TX_FLOW_CTRL_UNLOCK(_x)
|
||||
|
||||
#define OL_TX_DESC_LOCK(_x)
|
||||
#define OL_TX_DESC_UNLOCK(_x)
|
||||
|
||||
#define OL_TX_PEER_LOCK(_x, _id)
|
||||
#define OL_TX_PEER_UNLOCK(_x, _id)
|
||||
|
||||
#define OL_TX_PEER_UPDATE_LOCK(_x, _id) qdf_spin_lock_bh(&((_x)->tx_lock))
|
||||
#define OL_TX_PEER_UPDATE_UNLOCK(_x, _id) qdf_spin_unlock_bh(&((_x)->tx_lock))
|
||||
|
||||
#endif /* !QCA_OL_TX_PDEV_LOCK */
|
||||
#endif
|
||||
|
@ -0,0 +1,313 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2019,2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_flow_ctrl_legacy.h
|
||||
* Define the host data path legacy flow control API functions
|
||||
*/
|
||||
#ifndef _CDP_TXRX_FC_LEG_H_
|
||||
#define _CDP_TXRX_FC_LEG_H_
|
||||
#include <cdp_txrx_mob_def.h>
|
||||
#include "cdp_txrx_handle.h"
|
||||
#include <cdp_txrx_cmn.h>
|
||||
#ifdef QCA_HL_NETDEV_FLOW_CONTROL
|
||||
|
||||
/**
|
||||
* cdp_hl_fc_register() - Register HL flow control callback.
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: datapath pdev identifier
|
||||
* @flowcontrol: callback function pointer to stop/start OS netdev queues
|
||||
*
|
||||
* Register flow control callback.
|
||||
*
|
||||
* Return: 0 for success
|
||||
*/
|
||||
static inline int
|
||||
cdp_hl_fc_register(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
tx_pause_callback flowcontrol)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!soc->ops->l_flowctl_ops ||
|
||||
!soc->ops->l_flowctl_ops->register_tx_flow_control)
|
||||
return -EINVAL;
|
||||
|
||||
return soc->ops->l_flowctl_ops->register_tx_flow_control(soc, pdev_id,
|
||||
flowcontrol);
|
||||
}
|
||||
|
||||
static inline int cdp_hl_fc_set_td_limit(ol_txrx_soc_handle soc,
|
||||
uint8_t vdev_id, uint32_t chan_freq)
|
||||
{
|
||||
if (!soc->ops->l_flowctl_ops->set_vdev_tx_desc_limit)
|
||||
return 0;
|
||||
|
||||
return soc->ops->l_flowctl_ops->set_vdev_tx_desc_limit(soc, vdev_id,
|
||||
chan_freq);
|
||||
}
|
||||
|
||||
static inline int cdp_hl_fc_set_os_queue_status(ol_txrx_soc_handle soc,
|
||||
uint8_t vdev_id,
|
||||
enum netif_action_type action)
|
||||
{
|
||||
if (!soc->ops->l_flowctl_ops->set_vdev_os_queue_status)
|
||||
return -EINVAL;
|
||||
|
||||
return soc->ops->l_flowctl_ops->set_vdev_os_queue_status(soc,
|
||||
vdev_id,
|
||||
action);
|
||||
}
|
||||
#else
|
||||
static inline int
|
||||
cdp_hl_fc_register(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
tx_pause_callback flowcontrol)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cdp_hl_fc_set_td_limit(ol_txrx_soc_handle soc,
|
||||
uint8_t vdev_id, uint32_t chan_freq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cdp_hl_fc_set_os_queue_status(ol_txrx_soc_handle soc,
|
||||
uint8_t vdev_id,
|
||||
enum netif_action_type action)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
|
||||
|
||||
#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
|
||||
/**
|
||||
* cdp_fc_register() - Register flow control callback function pointer
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: virtual interface id to register flow control
|
||||
* @flowcontrol: callback function pointer
|
||||
* @osif_fc_ctx: client context pointer
|
||||
* @flow_control_is_pause: is vdev paused by flow control
|
||||
*
|
||||
* Register flow control callback function pointer and client context pointer
|
||||
*
|
||||
* Return: 0 success
|
||||
*/
|
||||
static inline int
|
||||
cdp_fc_register(ol_txrx_soc_handle soc, uint8_t vdev_id,
|
||||
ol_txrx_tx_flow_control_fp flowcontrol, void *osif_fc_ctx,
|
||||
ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!soc->ops->l_flowctl_ops ||
|
||||
!soc->ops->l_flowctl_ops->register_tx_flow_control)
|
||||
return 0;
|
||||
|
||||
return soc->ops->l_flowctl_ops->register_tx_flow_control(
|
||||
soc, vdev_id, flowcontrol, osif_fc_ctx,
|
||||
flow_control_is_pause);
|
||||
}
|
||||
#else
|
||||
static inline int
|
||||
cdp_fc_register(ol_txrx_soc_handle soc, uint8_t vdev_id,
|
||||
ol_txrx_tx_flow_control_fp flowcontrol, void *osif_fc_ctx,
|
||||
ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
|
||||
/**
|
||||
* cdp_fc_deregister() - remove flow control instance
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: virtual interface id to register flow control
|
||||
*
|
||||
* remove flow control instance
|
||||
*
|
||||
* Return: 0 success
|
||||
*/
|
||||
static inline int
|
||||
cdp_fc_deregister(ol_txrx_soc_handle soc, uint8_t vdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!soc->ops->l_flowctl_ops ||
|
||||
!soc->ops->l_flowctl_ops->deregister_tx_flow_control_cb)
|
||||
return 0;
|
||||
|
||||
return soc->ops->l_flowctl_ops->deregister_tx_flow_control_cb(
|
||||
soc, vdev_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_fc_get_tx_resource() - get data path resource count
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: datapath pdev ID
|
||||
* @peer_addr: peer mac address
|
||||
* @low_watermark: low resource threshold
|
||||
* @high_watermark_offset: high resource threshold
|
||||
*
|
||||
* get data path resource count
|
||||
*
|
||||
* Return: true enough data path resource available
|
||||
* false resource is not available
|
||||
*/
|
||||
static inline bool
|
||||
cdp_fc_get_tx_resource(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
struct qdf_mac_addr peer_addr,
|
||||
unsigned int low_watermark,
|
||||
unsigned int high_watermark_offset)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!soc->ops->l_flowctl_ops ||
|
||||
!soc->ops->l_flowctl_ops->get_tx_resource)
|
||||
return false;
|
||||
|
||||
return soc->ops->l_flowctl_ops->get_tx_resource(soc, pdev_id, peer_addr,
|
||||
low_watermark,
|
||||
high_watermark_offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_fc_ll_set_tx_pause_q_depth() - set pause queue depth
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: virtual interface id to register flow control
|
||||
* @pause_q_depth: pending tx queue delth
|
||||
*
|
||||
* set pause queue depth
|
||||
*
|
||||
* Return: 0 success
|
||||
*/
|
||||
static inline int
|
||||
cdp_fc_ll_set_tx_pause_q_depth(ol_txrx_soc_handle soc,
|
||||
uint8_t vdev_id, int pause_q_depth)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!soc->ops->l_flowctl_ops ||
|
||||
!soc->ops->l_flowctl_ops->ll_set_tx_pause_q_depth)
|
||||
return 0;
|
||||
|
||||
return soc->ops->l_flowctl_ops->ll_set_tx_pause_q_depth(
|
||||
soc, vdev_id, pause_q_depth);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_fc_vdev_flush() - flush tx queue
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: id of vdev
|
||||
*
|
||||
* flush tx queue
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline void
|
||||
cdp_fc_vdev_flush(ol_txrx_soc_handle soc, uint8_t vdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->l_flowctl_ops ||
|
||||
!soc->ops->l_flowctl_ops->vdev_flush)
|
||||
return;
|
||||
|
||||
soc->ops->l_flowctl_ops->vdev_flush(soc, vdev_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_fc_vdev_pause() - pause tx scheduler on vdev
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: id of vdev
|
||||
* @reason: pause reason
|
||||
* @pause_type: type of pause
|
||||
*
|
||||
* pause tx scheduler on vdev
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline void
|
||||
cdp_fc_vdev_pause(ol_txrx_soc_handle soc, uint8_t vdev_id,
|
||||
uint32_t reason, uint32_t pause_type)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->l_flowctl_ops ||
|
||||
!soc->ops->l_flowctl_ops->vdev_pause)
|
||||
return;
|
||||
|
||||
soc->ops->l_flowctl_ops->vdev_pause(soc, vdev_id, reason, pause_type);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_fc_vdev_unpause() - resume tx scheduler on vdev
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: id of vdev
|
||||
* @reason: pause reason
|
||||
* @pause_type: type of pause
|
||||
*
|
||||
* resume tx scheduler on vdev
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline void
|
||||
cdp_fc_vdev_unpause(ol_txrx_soc_handle soc, uint8_t vdev_id,
|
||||
uint32_t reason, uint32_t pause_type)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->l_flowctl_ops ||
|
||||
!soc->ops->l_flowctl_ops->vdev_unpause)
|
||||
return;
|
||||
|
||||
soc->ops->l_flowctl_ops->vdev_unpause(soc, vdev_id, reason,
|
||||
pause_type);
|
||||
}
|
||||
#endif /* _CDP_TXRX_FC_LEG_H_ */
|
@ -0,0 +1,129 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2019,2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_flow_ctrl_v2.h
|
||||
* Define the host data path flow control version 2 API functions
|
||||
*/
|
||||
#ifndef _CDP_TXRX_FC_V2_H_
|
||||
#define _CDP_TXRX_FC_V2_H_
|
||||
#include <cdp_txrx_ops.h>
|
||||
#include <cdp_txrx_cmn.h>
|
||||
|
||||
/**
|
||||
* cdp_register_pause_cb() - Register flow control callback function pointer
|
||||
* @soc: data path soc handle
|
||||
* @pause_cb: Pause callback intend to register
|
||||
*
|
||||
* Register flow control callback function pointer and client context pointer
|
||||
*
|
||||
* return QDF_STATUS_SUCCESS success
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_register_pause_cb(ol_txrx_soc_handle soc,
|
||||
tx_pause_callback pause_cb)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (!soc->ops->flowctl_ops ||
|
||||
!soc->ops->flowctl_ops->register_pause_cb)
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
return soc->ops->flowctl_ops->register_pause_cb(soc, pause_cb);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_set_desc_global_pool_size() - set global device pool size
|
||||
* @soc: data path soc handle
|
||||
* @num_msdu_desc: descriptor pool size
|
||||
*
|
||||
* set global device pool size
|
||||
*
|
||||
* return none
|
||||
*/
|
||||
static inline void
|
||||
cdp_set_desc_global_pool_size(ol_txrx_soc_handle soc,
|
||||
uint32_t num_msdu_desc)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->flowctl_ops ||
|
||||
!soc->ops->flowctl_ops->set_desc_global_pool_size)
|
||||
return;
|
||||
|
||||
soc->ops->flowctl_ops->set_desc_global_pool_size(
|
||||
num_msdu_desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_dump_flow_pool_info() - dump flow pool information
|
||||
* @soc: data path soc handle
|
||||
*
|
||||
* dump flow pool information
|
||||
*
|
||||
* return none
|
||||
*/
|
||||
static inline void
|
||||
cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->flowctl_ops ||
|
||||
!soc->ops->flowctl_ops->dump_flow_pool_info)
|
||||
return;
|
||||
|
||||
soc->ops->flowctl_ops->dump_flow_pool_info(soc);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_tx_desc_thresh_reached() - Check if avail tx desc meet threshold
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: vdev_id corresponding to vdev start
|
||||
*
|
||||
* Return: true if threshold is met, false if not
|
||||
*/
|
||||
static inline bool
|
||||
cdp_tx_desc_thresh_reached(struct cdp_soc_t *soc, uint8_t vdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("invalid instance");
|
||||
QDF_BUG(0);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!soc->ops->flowctl_ops ||
|
||||
!soc->ops->flowctl_ops->tx_desc_thresh_reached)
|
||||
return false;
|
||||
|
||||
return soc->ops->flowctl_ops->tx_desc_thresh_reached(soc, vdev_id);
|
||||
}
|
||||
#endif /* _CDP_TXRX_FC_V2_H_ */
|
73
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_fse.h
Normal file
73
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_fse.h
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _CDP_TXRX_FSE_H_
|
||||
#define _CDP_TXRX_FSE_H_
|
||||
|
||||
#include <cdp_txrx_cmn_struct.h>
|
||||
#include <cdp_txrx_cmn.h>
|
||||
|
||||
#ifdef WLAN_SUPPORT_RX_FLOW_TAG
|
||||
static inline QDF_STATUS
|
||||
cdp_fse_flow_add(ol_txrx_soc_handle soc,
|
||||
uint32_t *src_ip, uint32_t src_port,
|
||||
uint32_t *dest_ip, uint32_t dest_port,
|
||||
uint8_t protocol, uint8_t version)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->fse_ops ||
|
||||
!soc->ops->fse_ops->fse_rule_add) {
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
return soc->ops->fse_ops->fse_rule_add(soc,
|
||||
src_ip, src_port,
|
||||
dest_ip, dest_port,
|
||||
protocol, version);
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_fse_flow_delete(ol_txrx_soc_handle soc,
|
||||
uint32_t *src_ip, uint32_t src_port,
|
||||
uint32_t *dest_ip, uint32_t dest_port,
|
||||
uint8_t protocol, uint8_t version)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->fse_ops ||
|
||||
!soc->ops->fse_ops->fse_rule_delete) {
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
return soc->ops->fse_ops->fse_rule_delete(soc,
|
||||
src_ip, src_port,
|
||||
dest_ip, dest_port,
|
||||
protocol, version);
|
||||
}
|
||||
|
||||
#endif /* WLAN_SUPPORT_RX_FLOW_TAG */
|
||||
#endif /* _CDP_TXRX_FSE_H_ */
|
@ -0,0 +1,66 @@
|
||||
|
||||
/*
|
||||
* Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_handle.h
|
||||
* Holds the forward structure declarations for handles
|
||||
* passed from the upper layers
|
||||
*/
|
||||
|
||||
#ifndef CDP_TXRX_HANDLE_H
|
||||
#define CDP_TXRX_HANDLE_H
|
||||
|
||||
struct cdp_cfg;
|
||||
struct cdp_pdev;
|
||||
struct cdp_vdev;
|
||||
struct cdp_peer;
|
||||
struct cdp_raw_ast;
|
||||
struct cdp_soc;
|
||||
|
||||
/*
|
||||
* cdp_ctrl_objmgr_psoc - opaque handle for UMAC psoc object
|
||||
*/
|
||||
struct cdp_ctrl_objmgr_psoc;
|
||||
|
||||
/*
|
||||
* cdp_ctrl_objmgr_pdev - opaque handle for UMAC pdev object
|
||||
*/
|
||||
struct cdp_ctrl_objmgr_pdev;
|
||||
|
||||
/*
|
||||
* cdp_ctrl_objmgr_vdev - opaque handle for UMAC vdev object
|
||||
*/
|
||||
struct cdp_ctrl_objmgr_vdev;
|
||||
|
||||
/*
|
||||
* cdp_ctrl_objmgr_peer - opaque handle for UMAC peer object
|
||||
*/
|
||||
struct cdp_ctrl_objmgr_peer;
|
||||
|
||||
/*
|
||||
* cdp_cal_client - opaque handle for cal client object
|
||||
*/
|
||||
struct cdp_cal_client;
|
||||
|
||||
/*
|
||||
* cdp_ext_vdev - opaque handle for extended vdev data path handle
|
||||
*/
|
||||
struct cdp_ext_vdev;
|
||||
#endif
|
@ -0,0 +1,104 @@
|
||||
/*
|
||||
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_hist_struct.h
|
||||
* Define the host data path histogram data types
|
||||
*/
|
||||
#ifndef _CDP_TXRX_HIST_STRUCT_H_
|
||||
#define _CDP_TXRX_HIST_STRUCT_H_
|
||||
|
||||
#define CDP_RSSI_CHAIN_LEN 8
|
||||
/**
|
||||
* enum cdp_hist_bucket_index - Histogram Bucket
|
||||
* @CDP_HIST_BUCKET_0: Bucket Index 0
|
||||
* @CDP_HIST_BUCKET_1: Bucket Index 1
|
||||
* @CDP_HIST_BUCKET_2: Bucket Index 2
|
||||
* @CDP_HIST_BUCKET_3: Bucket Index 3
|
||||
* @CDP_HIST_BUCKET_4: Bucket Index 4
|
||||
* @CDP_HIST_BUCKET_5: Bucket Index 5
|
||||
* @CDP_HIST_BUCKET_6: Bucket Index 6
|
||||
* @CDP_HIST_BUCKET_7: Bucket Index 7
|
||||
* @CDP_HIST_BUCKET_8: Bucket Index 8
|
||||
* @CDP_HIST_BUCKET_9: Bucket Index 9
|
||||
* @CDP_HIST_BUCKET_10: Bucket Index 10
|
||||
* @CDP_HIST_BUCKET_11: Bucket Index 11
|
||||
* @CDP_HIST_BUCKET_12: Bucket Index 12
|
||||
* @CDP_HIST_BUCKET_MAX: Max enumeration
|
||||
*/
|
||||
enum cdp_hist_bucket_index {
|
||||
CDP_HIST_BUCKET_0,
|
||||
CDP_HIST_BUCKET_1,
|
||||
CDP_HIST_BUCKET_2,
|
||||
CDP_HIST_BUCKET_3,
|
||||
CDP_HIST_BUCKET_4,
|
||||
CDP_HIST_BUCKET_5,
|
||||
CDP_HIST_BUCKET_6,
|
||||
CDP_HIST_BUCKET_7,
|
||||
CDP_HIST_BUCKET_8,
|
||||
CDP_HIST_BUCKET_9,
|
||||
CDP_HIST_BUCKET_10,
|
||||
CDP_HIST_BUCKET_11,
|
||||
CDP_HIST_BUCKET_12,
|
||||
CDP_HIST_BUCKET_MAX,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum cdp_hist_types - Histogram Types
|
||||
* @CDP_HIST_TYPE_SW_ENQEUE_DELAY: From stack to HW enqueue delay
|
||||
* @CDP_HIST_TYPE_HW_COMP_DELAY: From HW enqueue to completion delay
|
||||
* @CDP_HIST_TYPE_REAP_STACK: Rx HW reap to stack deliver delay
|
||||
* @CDP_HIST_TYPE_HW_TX_COMP_DELAY: Tx completion delay based on the timestamp
|
||||
* provided by HW
|
||||
* @CDP_HIST_TYPE_DELAY_PERCENTILE: Tx completion delay based on the perctile
|
||||
* @CDP_HIST_TYPE_MAX: Max enumeration
|
||||
*/
|
||||
enum cdp_hist_types {
|
||||
CDP_HIST_TYPE_SW_ENQEUE_DELAY,
|
||||
CDP_HIST_TYPE_HW_COMP_DELAY,
|
||||
CDP_HIST_TYPE_REAP_STACK,
|
||||
CDP_HIST_TYPE_HW_TX_COMP_DELAY,
|
||||
CDP_HIST_TYPE_DELAY_PERCENTILE,
|
||||
CDP_HIST_TYPE_MAX,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cdp_hist_bucket - Histogram Bucket
|
||||
* @hist_type: Histogram type
|
||||
* @freq: Frequency
|
||||
*/
|
||||
struct cdp_hist_bucket {
|
||||
enum cdp_hist_types hist_type;
|
||||
uint64_t freq[CDP_HIST_BUCKET_MAX];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cdp_hist_stats - Histogram of a stats type
|
||||
* @hist: Frequency distribution
|
||||
* @max: Max frequency
|
||||
* @min: Minimum frequency
|
||||
* @avg: Average frequency
|
||||
*/
|
||||
struct cdp_hist_stats {
|
||||
struct cdp_hist_bucket hist;
|
||||
int max;
|
||||
int min;
|
||||
int avg;
|
||||
};
|
||||
#endif /* _CDP_TXRX_HIST_STRUCT_H_ */
|
1391
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_host_stats.h
Normal file
1391
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_host_stats.h
Normal file
File diff suppressed because it is too large
Load Diff
1054
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_ipa.h
Normal file
1054
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_ipa.h
Normal file
File diff suppressed because it is too large
Load Diff
85
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_me.h
Normal file
85
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_me.h
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_me.h
|
||||
* Define the host data path mcast enhance API functions
|
||||
* called by the host control SW and the OS interface module
|
||||
*/
|
||||
#ifndef _CDP_TXRX_ME_H_
|
||||
#define _CDP_TXRX_ME_H_
|
||||
|
||||
#include <cdp_txrx_ops.h>
|
||||
#include "cdp_txrx_handle.h"
|
||||
#include <cdp_txrx_cmn.h>
|
||||
|
||||
static inline void
|
||||
cdp_tx_me_alloc_descriptor(ol_txrx_soc_handle soc, uint8_t pdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->me_ops ||
|
||||
!soc->ops->me_ops->tx_me_alloc_descriptor)
|
||||
return;
|
||||
|
||||
soc->ops->me_ops->tx_me_alloc_descriptor(soc, pdev_id);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cdp_tx_me_free_descriptor(ol_txrx_soc_handle soc, uint8_t pdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->me_ops ||
|
||||
!soc->ops->me_ops->tx_me_free_descriptor)
|
||||
return;
|
||||
|
||||
soc->ops->me_ops->tx_me_free_descriptor(soc, pdev_id);
|
||||
}
|
||||
|
||||
static inline uint16_t
|
||||
cdp_tx_me_convert_ucast(ol_txrx_soc_handle soc, uint8_t vdev_id,
|
||||
qdf_nbuf_t wbuf, u_int8_t newmac[][6],
|
||||
uint8_t newmaccnt, uint8_t tid, bool is_igmp,
|
||||
bool is_dms_pkt)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!soc->ops->me_ops ||
|
||||
!soc->ops->me_ops->tx_me_convert_ucast)
|
||||
return 0;
|
||||
|
||||
return soc->ops->me_ops->tx_me_convert_ucast
|
||||
(soc, vdev_id, wbuf, newmac, newmaccnt, tid, is_igmp,
|
||||
is_dms_pkt);
|
||||
}
|
||||
|
||||
#endif
|
@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_mesh_latency.h
|
||||
* Define the host data path MESH latency API functions
|
||||
* called by the host control SW and the OS interface module
|
||||
*/
|
||||
#ifndef _CDP_TXRX_MESH_LATENCY_H_
|
||||
#define _CDP_TXRX_MESH_LATENCY_H_
|
||||
#include "cdp_txrx_handle.h"
|
||||
#ifdef WLAN_SUPPORT_MESH_LATENCY
|
||||
/**
|
||||
* cdp_mesh_latency_update_peer_parameter() - find MSCS enabled peer for this
|
||||
* mac address and validate priority
|
||||
* @soc: soc context
|
||||
* @dest_mac: destination mac address
|
||||
* @service_interval_dl: Service Interval per tid on DL
|
||||
* @burst_size_dl: Burst size per tid on DL
|
||||
* @service_interval_ul: Service Interval per tid on UL
|
||||
* @burst_size_ul: Burst size per tid on UL
|
||||
* @priority: user priority combination of tid and msdu queue
|
||||
* @add_or_sub: indicates to add or subtract latency parameter
|
||||
*
|
||||
* This function updates per peer per TID mesh latency related parameters.
|
||||
*
|
||||
* Return: 0 for non error case, -1 for failure
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_mesh_latency_update_peer_parameter(ol_txrx_soc_handle soc,
|
||||
uint8_t *dest_mac, uint32_t service_interval_dl,
|
||||
uint32_t burst_size_dl, uint32_t service_interval_ul,
|
||||
uint32_t burst_size_ul, uint16_t priority,
|
||||
uint8_t add_or_sub)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->mesh_latency_ops) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (soc->ops->mesh_latency_ops->mesh_latency_update_peer_parameter)
|
||||
return soc->ops->mesh_latency_ops->
|
||||
mesh_latency_update_peer_parameter(soc,
|
||||
dest_mac, service_interval_dl,
|
||||
burst_size_dl, service_interval_ul,
|
||||
burst_size_ul, priority, add_or_sub);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif
|
1077
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_misc.h
Normal file
1077
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_misc.h
Normal file
File diff suppressed because it is too large
Load Diff
173
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_mlo.h
Normal file
173
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_mlo.h
Normal file
@ -0,0 +1,173 @@
|
||||
/*
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
#ifndef _CDP_TXRX_MLO_H_
|
||||
#define _CDP_TXRX_MLO_H_
|
||||
#include "cdp_txrx_ops.h"
|
||||
|
||||
struct cdp_mlo_ctxt;
|
||||
|
||||
static inline
|
||||
struct cdp_mlo_ctxt *cdp_mlo_ctxt_attach(ol_txrx_soc_handle soc,
|
||||
struct cdp_ctrl_mlo_mgr *ctrl_ctxt)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
QDF_BUG(0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!soc->ops->mlo_ops ||
|
||||
!soc->ops->mlo_ops->mlo_ctxt_attach)
|
||||
return NULL;
|
||||
|
||||
return soc->ops->mlo_ops->mlo_ctxt_attach(ctrl_ctxt);
|
||||
}
|
||||
|
||||
static inline
|
||||
void cdp_mlo_ctxt_detach(ol_txrx_soc_handle soc,
|
||||
struct cdp_mlo_ctxt *ml_ctxt)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->mlo_ops ||
|
||||
!soc->ops->mlo_ops->mlo_ctxt_detach)
|
||||
return;
|
||||
|
||||
soc->ops->mlo_ops->mlo_ctxt_detach(ml_ctxt);
|
||||
}
|
||||
|
||||
static inline void cdp_soc_mlo_soc_setup(ol_txrx_soc_handle soc,
|
||||
struct cdp_mlo_ctxt *mlo_ctx)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->mlo_ops ||
|
||||
!soc->ops->mlo_ops->mlo_soc_setup)
|
||||
return;
|
||||
|
||||
soc->ops->mlo_ops->mlo_soc_setup(soc, mlo_ctx);
|
||||
}
|
||||
|
||||
static inline void cdp_soc_mlo_soc_teardown(ol_txrx_soc_handle soc,
|
||||
struct cdp_mlo_ctxt *mlo_ctx,
|
||||
bool is_force_down)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->mlo_ops ||
|
||||
!soc->ops->mlo_ops->mlo_soc_teardown)
|
||||
return;
|
||||
|
||||
soc->ops->mlo_ops->mlo_soc_teardown(soc, mlo_ctx, is_force_down);
|
||||
}
|
||||
|
||||
static inline void cdp_mlo_setup_complete(ol_txrx_soc_handle soc,
|
||||
struct cdp_mlo_ctxt *mlo_ctx)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->mlo_ops ||
|
||||
!soc->ops->mlo_ops->mlo_setup_complete)
|
||||
return;
|
||||
|
||||
soc->ops->mlo_ops->mlo_setup_complete(mlo_ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* cdp_mlo_update_delta_tsf2 - Update delta_tsf2
|
||||
* @soc: soc handle
|
||||
* @pdev_id: pdev id
|
||||
* @delta_tsf2: delta_tsf2
|
||||
*
|
||||
* return: none
|
||||
*/
|
||||
static inline void cdp_mlo_update_delta_tsf2(ol_txrx_soc_handle soc,
|
||||
uint8_t pdev_id,
|
||||
uint64_t delta_tsf2)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->mlo_ops ||
|
||||
!soc->ops->mlo_ops->mlo_update_delta_tsf2)
|
||||
return;
|
||||
|
||||
soc->ops->mlo_ops->mlo_update_delta_tsf2(soc, pdev_id, delta_tsf2);
|
||||
}
|
||||
|
||||
/*
|
||||
* cdp_mlo_update_delta_tqm - Update delta_tqm
|
||||
* @soc: soc handle
|
||||
* @delta_tqm: delta_tqm
|
||||
*
|
||||
* return: none
|
||||
*/
|
||||
static inline void cdp_mlo_update_delta_tqm(ol_txrx_soc_handle soc,
|
||||
uint64_t delta_tqm)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!soc->ops->mlo_ops ||
|
||||
!soc->ops->mlo_ops->mlo_update_delta_tqm)
|
||||
return;
|
||||
|
||||
soc->ops->mlo_ops->mlo_update_delta_tqm(soc, delta_tqm);
|
||||
}
|
||||
|
||||
/*
|
||||
* cdp_mlo_get_mld_vdev_stats - Get MLD vdev stats
|
||||
* @soc: soc handle
|
||||
* @vdev_id: vdev_id of one of the vdev's of the MLD group
|
||||
* @buf: buffer to hold vdev_stats
|
||||
* @link_vdev_only: flag to indicate if stats are required for specific vdev
|
||||
*
|
||||
* return: QDF_STATUS
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_mlo_get_mld_vdev_stats(ol_txrx_soc_handle soc,
|
||||
uint8_t vdev_id, struct cdp_vdev_stats *buf,
|
||||
bool link_vdev_only)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->mlo_ops || !soc->ops->mlo_ops->mlo_get_mld_vdev_stats)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->mlo_ops->mlo_get_mld_vdev_stats(soc,
|
||||
vdev_id,
|
||||
buf,
|
||||
link_vdev_only);
|
||||
}
|
||||
#endif /*_CDP_TXRX_MLO_H_*/
|
599
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_mob_def.h
Normal file
599
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_mob_def.h
Normal file
@ -0,0 +1,599 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __CDP_TXRX_MOB_DEF_H
|
||||
#define __CDP_TXRX_MOB_DEF_H
|
||||
#include <sir_types.h>
|
||||
#include <htt.h>
|
||||
|
||||
#define TX_WMM_AC_NUM 4
|
||||
#define ENABLE_DP_HIST_STATS
|
||||
#define DP_MEMORY_OPT
|
||||
#ifndef CONFIG_BERYLLIUM
|
||||
#define DP_USE_SINGLE_TCL
|
||||
#endif
|
||||
|
||||
#define DP_RX_DISABLE_NDI_MDNS_FORWARDING
|
||||
|
||||
#define OL_TXQ_PAUSE_REASON_FW (1 << 0)
|
||||
#define OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED (1 << 1)
|
||||
#define OL_TXQ_PAUSE_REASON_TX_ABORT (1 << 2)
|
||||
#define OL_TXQ_PAUSE_REASON_VDEV_STOP (1 << 3)
|
||||
#define OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION (1 << 4)
|
||||
|
||||
#define OL_TXRX_INVALID_NUM_PEERS (-1)
|
||||
|
||||
|
||||
/* Maximum number of station supported by data path, including BC. */
|
||||
#define WLAN_MAX_STA_COUNT (HAL_NUM_STA)
|
||||
|
||||
/* The symbolic station ID return to HDD to specify the packet is bc/mc */
|
||||
#define WLAN_RX_BCMC_STA_ID (WLAN_MAX_STA_COUNT + 1)
|
||||
|
||||
/* The symbolic station ID return to HDD to specify the packet is
|
||||
to soft-AP itself */
|
||||
#define WLAN_RX_SAP_SELF_STA_ID (WLAN_MAX_STA_COUNT + 2)
|
||||
|
||||
/* is 802.11 address multicast/broadcast? */
|
||||
#define IEEE80211_IS_MULTICAST(_a) (*(_a) & 0x01)
|
||||
|
||||
#define MAX_PEERS 32
|
||||
|
||||
/*
|
||||
* Bins used for reporting delay histogram:
|
||||
* bin 0: 0 - 10 ms delay
|
||||
* bin 1: 10 - 20 ms delay
|
||||
* bin 2: 20 - 40 ms delay
|
||||
* bin 3: 40 - 80 ms delay
|
||||
* bin 4: 80 - 160 ms delay
|
||||
* bin 5: > 160 ms delay
|
||||
*/
|
||||
#define QCA_TX_DELAY_HIST_REPORT_BINS 6
|
||||
|
||||
/* BA actions */
|
||||
#define IEEE80211_ACTION_BA_ADDBA_REQUEST 0 /* ADDBA request */
|
||||
#define IEEE80211_ACTION_BA_ADDBA_RESPONSE 1 /* ADDBA response */
|
||||
#define IEEE80211_ACTION_BA_DELBA 2 /* DELBA */
|
||||
|
||||
#define IEEE80211_BA_POLICY_DELAYED 0
|
||||
#define IEEE80211_BA_POLICY_IMMEDIATE 1
|
||||
#define IEEE80211_BA_AMSDU_SUPPORTED 1
|
||||
|
||||
/**
|
||||
* enum netif_action_type - Type of actions on netif queues
|
||||
* @WLAN_NETIF_ACTION_TYPE_NONE: perform no action
|
||||
* @WLAN_STOP_ALL_NETIF_QUEUE: stop all netif queues
|
||||
* @WLAN_START_ALL_NETIF_QUEUE: start all netif queues
|
||||
* @WLAN_WAKE_ALL_NETIF_QUEUE: wake all netif queues
|
||||
* @WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER: stop all queues and off carrier
|
||||
* @WLAN_START_ALL_NETIF_QUEUE_N_CARRIER: start all queues and on carrier
|
||||
* @WLAN_NETIF_TX_DISABLE: disable tx
|
||||
* @WLAN_NETIF_TX_DISABLE_N_CARRIER: disable tx and off carrier
|
||||
* @WLAN_NETIF_CARRIER_ON: on carrier
|
||||
* @WLAN_NETIF_CARRIER_OFF: off carrier
|
||||
* @WLAN_NETIF_PRIORITY_QUEUE_ON: start priority netif queues
|
||||
* @WLAN_NETIF_PRIORITY_QUEUE_OFF: stop priority netif queues
|
||||
* @WLAN_NETIF_VO_QUEUE_ON: start voice queue
|
||||
* @WLAN_NETIF_VO_QUEUE_OFF: stop voice queue
|
||||
* @WLAN_NETIF_VI_QUEUE_ON: start video queue
|
||||
* @WLAN_NETIF_VI_QUEUE_OFF: stop video queue
|
||||
* @WLAN_NETIF_BE_BK_QUEUE_OFF: start best-effort & background queue
|
||||
* @WLAN_NETIF_BE_BK_QUEUE_ON: stop best-effort & background queue
|
||||
* @WLAN_WAKE_NON_PRIORITY_QUEUE: wake non priority netif queues
|
||||
* @WLAN_STOP_NON_PRIORITY_QUEUE: stop non priority netif queues
|
||||
* @WLAN_NETIF_ACTION_TYPE_MAX: Maximum action
|
||||
*/
|
||||
enum netif_action_type {
|
||||
WLAN_NETIF_ACTION_TYPE_NONE = 0,
|
||||
WLAN_STOP_ALL_NETIF_QUEUE = 1,
|
||||
WLAN_START_ALL_NETIF_QUEUE = 2,
|
||||
WLAN_WAKE_ALL_NETIF_QUEUE = 3,
|
||||
WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER = 4,
|
||||
WLAN_START_ALL_NETIF_QUEUE_N_CARRIER = 5,
|
||||
WLAN_NETIF_TX_DISABLE = 6,
|
||||
WLAN_NETIF_TX_DISABLE_N_CARRIER = 7,
|
||||
WLAN_NETIF_CARRIER_ON = 8,
|
||||
WLAN_NETIF_CARRIER_OFF = 9,
|
||||
WLAN_NETIF_PRIORITY_QUEUE_ON = 10,
|
||||
WLAN_NETIF_PRIORITY_QUEUE_OFF = 11,
|
||||
WLAN_NETIF_VO_QUEUE_ON = 12,
|
||||
WLAN_NETIF_VO_QUEUE_OFF = 13,
|
||||
WLAN_NETIF_VI_QUEUE_ON = 14,
|
||||
WLAN_NETIF_VI_QUEUE_OFF = 15,
|
||||
WLAN_NETIF_BE_BK_QUEUE_OFF = 16,
|
||||
WLAN_NETIF_BE_BK_QUEUE_ON = 17,
|
||||
WLAN_WAKE_NON_PRIORITY_QUEUE = 18,
|
||||
WLAN_STOP_NON_PRIORITY_QUEUE = 19,
|
||||
WLAN_NETIF_ACTION_TYPE_MAX,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum netif_reason_type - reason for netif queue action
|
||||
* @WLAN_CONTROL_PATH: action from control path
|
||||
* @WLAN_DATA_FLOW_CONTROL: because of flow control
|
||||
* @WLAN_FW_PAUSE: because of firmware pause
|
||||
* @WLAN_TX_ABORT: because of tx abort
|
||||
* @WLAN_VDEV_STOP: because of vdev stop
|
||||
* @WLAN_PEER_UNAUTHORISED: because of peer is unauthorised
|
||||
* @WLAN_THERMAL_MITIGATION: because of thermal mitigation
|
||||
* @WLAN_DATA_FLOW_CONTROL_PRIORITY:
|
||||
* @WLAN_DATA_FLOW_CTRL_BE_BK:
|
||||
* @WLAN_DATA_FLOW_CTRL_VI:
|
||||
* @WLAN_DATA_FLOW_CTRL_VO:
|
||||
* @WLAN_DATA_FLOW_CTRL_PRI:
|
||||
* @WLAN_REASON_TYPE_MAX: maximum reason
|
||||
*/
|
||||
enum netif_reason_type {
|
||||
WLAN_CONTROL_PATH = 1,
|
||||
WLAN_DATA_FLOW_CONTROL,
|
||||
WLAN_FW_PAUSE,
|
||||
WLAN_TX_ABORT,
|
||||
WLAN_VDEV_STOP,
|
||||
WLAN_PEER_UNAUTHORISED,
|
||||
WLAN_THERMAL_MITIGATION,
|
||||
WLAN_DATA_FLOW_CONTROL_PRIORITY,
|
||||
WLAN_DATA_FLOW_CTRL_BE_BK,
|
||||
WLAN_DATA_FLOW_CTRL_VI,
|
||||
WLAN_DATA_FLOW_CTRL_VO,
|
||||
WLAN_DATA_FLOW_CTRL_PRI,
|
||||
WLAN_REASON_TYPE_MAX,
|
||||
};
|
||||
|
||||
enum ol_rx_err_type {
|
||||
OL_RX_ERR_DEFRAG_MIC,
|
||||
OL_RX_ERR_PN,
|
||||
OL_RX_ERR_UNKNOWN_PEER,
|
||||
OL_RX_ERR_MALFORMED,
|
||||
OL_RX_ERR_TKIP_MIC,
|
||||
OL_RX_ERR_DECRYPT,
|
||||
OL_RX_ERR_MPDU_LENGTH,
|
||||
OL_RX_ERR_ENCRYPT_REQUIRED,
|
||||
OL_RX_ERR_DUP,
|
||||
OL_RX_ERR_UNKNOWN,
|
||||
OL_RX_ERR_FCS,
|
||||
OL_RX_ERR_PRIVACY,
|
||||
OL_RX_ERR_NONE_FRAG,
|
||||
OL_RX_ERR_NONE = 0xFF
|
||||
};
|
||||
|
||||
enum throttle_level {
|
||||
THROTTLE_LEVEL_0,
|
||||
THROTTLE_LEVEL_1,
|
||||
THROTTLE_LEVEL_2,
|
||||
THROTTLE_LEVEL_3,
|
||||
THROTTLE_LEVEL_4,
|
||||
THROTTLE_LEVEL_5,
|
||||
/* Invalid */
|
||||
THROTTLE_LEVEL_MAX,
|
||||
};
|
||||
|
||||
enum {
|
||||
OL_TX_WMM_AC_BE,
|
||||
OL_TX_WMM_AC_BK,
|
||||
OL_TX_WMM_AC_VI,
|
||||
OL_TX_WMM_AC_VO,
|
||||
OL_TX_NUM_WMM_AC
|
||||
};
|
||||
|
||||
/**
|
||||
* enum ol_tx_spec - indicate what non-standard transmission actions to apply
|
||||
* @OL_TX_SPEC_STD: do regular processing
|
||||
* @OL_TX_SPEC_RAW: skip encap + A-MSDU aggr
|
||||
* @OL_TX_SPEC_NO_AGGR: skip encap + all aggr
|
||||
* @OL_TX_SPEC_NO_ENCRYPT: skip encap + encrypt
|
||||
* @OL_TX_SPEC_TSO: TCP segmented
|
||||
* @OL_TX_SPEC_NWIFI_NO_ENCRYPT: skip encrypt for nwifi
|
||||
* @OL_TX_SPEC_NO_FREE: give to cb rather than free
|
||||
*
|
||||
* Indicate one or more of the following:
|
||||
* - The tx frame already has a complete 802.11 header.
|
||||
* Thus, skip 802.3/native-WiFi to 802.11 header encapsulation and
|
||||
* A-MSDU aggregation.
|
||||
* - The tx frame should not be aggregated (A-MPDU or A-MSDU)
|
||||
* - The tx frame is already encrypted - don't attempt encryption.
|
||||
* - The tx frame is a segment of a TCP jumbo frame.
|
||||
* - This tx frame should not be unmapped and freed by the txrx layer
|
||||
* after transmission, but instead given to a registered tx completion
|
||||
* callback.
|
||||
* More than one of these specification can apply, though typically
|
||||
* only a single specification is applied to a tx frame.
|
||||
* A compound specification can be created, as a bit-OR of these
|
||||
* specifications.
|
||||
*/
|
||||
enum ol_tx_spec {
|
||||
OL_TX_SPEC_STD = 0x0, /* do regular processing */
|
||||
OL_TX_SPEC_RAW = 0x1, /* skip encap + A-MSDU aggr */
|
||||
OL_TX_SPEC_NO_AGGR = 0x2, /* skip encap + all aggr */
|
||||
OL_TX_SPEC_NO_ENCRYPT = 0x4, /* skip encap + encrypt */
|
||||
OL_TX_SPEC_TSO = 0x8, /* TCP segmented */
|
||||
OL_TX_SPEC_NWIFI_NO_ENCRYPT = 0x10, /* skip encrypt for nwifi */
|
||||
OL_TX_SPEC_NO_FREE = 0x20, /* give to cb rather than free */
|
||||
};
|
||||
|
||||
/**
|
||||
* enum peer_debug_id_type - debug ids to track peer get_ref and release_ref
|
||||
* @PEER_DEBUG_ID_OL_INTERNAL: debug id for OL internal usage
|
||||
* @PEER_DEBUG_ID_WMA_PKT_DROP: debug id for wma_is_pkt_drop_candidate API
|
||||
* @PEER_DEBUG_ID_WMA_ADDBA_REQ: debug id for ADDBA request
|
||||
* @PEER_DEBUG_ID_WMA_DELBA_REQ: debug id for DELBA request
|
||||
* @PEER_DEBUG_ID_LIM_SEND_ADDBA_RESP: debug id for send ADDBA response
|
||||
* @PEER_DEBUG_ID_OL_RX_THREAD: debug id for rx thread
|
||||
* @PEER_DEBUG_ID_WMA_CCMP_REPLAY_ATTACK: debug id for CCMP replay
|
||||
* @PEER_DEBUG_ID_WMA_DEL_BSS:debug id for remove BSS
|
||||
* @PEER_DEBUG_ID_WMA_VDEV_STOP_RESP:debug id for vdev stop response handler
|
||||
* @PEER_DEBUG_ID_OL_PEER_MAP:debug id for peer map/unmap
|
||||
* @PEER_DEBUG_ID_OL_PEER_ATTACH: debug id for peer attach/detach
|
||||
* @PEER_DEBUG_ID_OL_TXQ_VDEV_FL: debug id for vdev flush
|
||||
* @PEER_DEBUG_ID_OL_HASH_ERS: debug id for peer find hash erase
|
||||
* @PEER_DEBUG_ID_OL_UNMAP_TIMER_WORK: debug id for peer unmap timer work
|
||||
* @PEER_DEBUG_ID_MAX: debug id MAX
|
||||
*
|
||||
* Unique peer debug IDs to track the callers. Each new usage can add to
|
||||
* this enum list to create a new "PEER_DEBUG_ID_".
|
||||
*/
|
||||
enum peer_debug_id_type {
|
||||
PEER_DEBUG_ID_OL_INTERNAL,
|
||||
PEER_DEBUG_ID_WMA_PKT_DROP,
|
||||
PEER_DEBUG_ID_WMA_ADDBA_REQ,
|
||||
PEER_DEBUG_ID_WMA_DELBA_REQ,
|
||||
PEER_DEBUG_ID_LIM_SEND_ADDBA_RESP,
|
||||
PEER_DEBUG_ID_OL_RX_THREAD,
|
||||
PEER_DEBUG_ID_WMA_CCMP_REPLAY_ATTACK,
|
||||
PEER_DEBUG_ID_WMA_DEL_BSS,
|
||||
PEER_DEBUG_ID_WMA_VDEV_STOP_RESP,
|
||||
PEER_DEBUG_ID_OL_PEER_MAP,
|
||||
PEER_DEBUG_ID_OL_PEER_ATTACH,
|
||||
PEER_DEBUG_ID_OL_TXQ_VDEV_FL,
|
||||
PEER_DEBUG_ID_OL_HASH_ERS,
|
||||
PEER_DEBUG_ID_OL_UNMAP_TIMER_WORK,
|
||||
PEER_DEBUG_ID_MAX
|
||||
};
|
||||
|
||||
/**
|
||||
* enum cdp_peer_bw - Bandwidth types
|
||||
* @CDP_20_MHZ: 20MHz BW
|
||||
* @CDP_40_MHZ: 40MHz BW
|
||||
* @CDP_80_MHZ: 80MHz BW
|
||||
* @CDP_160_MHZ: 160MHz BW
|
||||
* @CDP_80P80_MHZ: 80+80MHz BW
|
||||
* @CDP_5_MHZ: 5MHz BW
|
||||
* @CDP_10_MHZ: 10MHz BW
|
||||
* @CDP_320_MHZ: 320MHz BW
|
||||
* @CDP_BW_INVALID: Invalid BW
|
||||
* @CDP_BW_MAX: Max BW id
|
||||
*/
|
||||
enum cdp_peer_bw {
|
||||
CDP_20_MHZ,
|
||||
CDP_40_MHZ,
|
||||
CDP_80_MHZ,
|
||||
CDP_160_MHZ,
|
||||
CDP_80P80_MHZ,
|
||||
CDP_5_MHZ,
|
||||
CDP_10_MHZ,
|
||||
CDP_320_MHZ,
|
||||
CDP_BW_INVALID,
|
||||
CDP_BW_MAX
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ol_txrx_desc_type - txrx descriptor type
|
||||
* @is_qos_enabled: is station qos enabled
|
||||
* @is_wapi_supported: is station wapi supported
|
||||
* @peer_addr: peer mac address
|
||||
* @bw: bandwidth of peer connection
|
||||
*/
|
||||
struct ol_txrx_desc_type {
|
||||
uint8_t is_qos_enabled;
|
||||
uint8_t is_wapi_supported;
|
||||
struct qdf_mac_addr peer_addr;
|
||||
enum cdp_peer_bw bw;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ol_tx_sched_wrr_ac_specs_t - the wrr ac specs params structure
|
||||
* @wrr_skip_weight: map to ol_tx_sched_wrr_adv_category_info_t.specs.
|
||||
* wrr_skip_weight
|
||||
* @credit_threshold: map to ol_tx_sched_wrr_adv_category_info_t.specs.
|
||||
* credit_threshold
|
||||
* @send_limit: map to ol_tx_sched_wrr_adv_category_info_t.specs.
|
||||
* send_limit
|
||||
* @credit_reserve: map to ol_tx_sched_wrr_adv_category_info_t.specs.
|
||||
* credit_reserve
|
||||
* @discard_weight: map to ol_tx_sched_wrr_adv_category_info_t.specs.
|
||||
* discard_weight
|
||||
*
|
||||
* This structure is for wrr ac specs params set from user, it will update
|
||||
* its content corresponding to the ol_tx_sched_wrr_adv_category_info_t.specs.
|
||||
*/
|
||||
struct ol_tx_sched_wrr_ac_specs_t {
|
||||
int wrr_skip_weight;
|
||||
uint32_t credit_threshold;
|
||||
uint16_t send_limit;
|
||||
int credit_reserve;
|
||||
int discard_weight;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct txrx_pdev_cfg_param_t - configuration information
|
||||
* passed to the data path
|
||||
* @is_full_reorder_offload:
|
||||
* @is_uc_offload_enabled: IPA Micro controller data path offload enable flag
|
||||
* @uc_tx_buffer_count: IPA Micro controller data path offload TX buffer count
|
||||
* @uc_tx_buffer_size: IPA Micro controller data path offload TX buffer size
|
||||
* @uc_rx_indication_ring_count: IPA Micro controller data path offload RX
|
||||
* indication ring count
|
||||
* @uc_tx_partition_base: IPA Micro controller data path offload TX partition
|
||||
* base
|
||||
* @ip_tcp_udp_checksum_offload: IP, TCP and UDP checksum offload
|
||||
* @nan_ip_tcp_udp_checksum_offload: IP, TCP and UDP checksum offload for NAN
|
||||
* Mode
|
||||
* @p2p_ip_tcp_udp_checksum_offload: IP, TCP and UDP checksum offload for P2P
|
||||
* Mod
|
||||
* @legacy_mode_csum_disable: Checksum offload override flag for Legcay modes
|
||||
* @enable_rxthread: Rx processing in thread from TXRX
|
||||
* @ce_classify_enabled: CE classification enabled through INI
|
||||
* @tx_flow_stop_queue_th: Threshold to stop queue in percentage
|
||||
* @tx_flow_start_queue_offset: Start queue offset in percentage
|
||||
* @del_ack_enable: enable the tcp delay ack feature in the driver
|
||||
* @del_ack_timer_value: timeout if no more tcp ack frames, unit is ms
|
||||
* @del_ack_pkt_count: the maximum number of replaced tcp ack frames
|
||||
* @ac_specs:
|
||||
* @gro_enable:
|
||||
* @tso_enable:
|
||||
* @lro_enable:
|
||||
* @sg_enable:
|
||||
* @enable_data_stall_detection:
|
||||
* @enable_flow_steering:
|
||||
* @disable_intra_bss_fwd:
|
||||
* @bundle_timer_value:
|
||||
* @bundle_size:
|
||||
* @pktlog_buffer_size:
|
||||
*/
|
||||
struct txrx_pdev_cfg_param_t {
|
||||
uint8_t is_full_reorder_offload;
|
||||
uint8_t is_uc_offload_enabled;
|
||||
uint32_t uc_tx_buffer_count;
|
||||
uint32_t uc_tx_buffer_size;
|
||||
uint32_t uc_rx_indication_ring_count;
|
||||
uint32_t uc_tx_partition_base;
|
||||
bool ip_tcp_udp_checksum_offload;
|
||||
bool nan_ip_tcp_udp_checksum_offload;
|
||||
bool p2p_ip_tcp_udp_checksum_offload;
|
||||
bool legacy_mode_csum_disable;
|
||||
bool enable_rxthread;
|
||||
bool ce_classify_enabled;
|
||||
#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
|
||||
uint32_t tx_flow_stop_queue_th;
|
||||
uint32_t tx_flow_start_queue_offset;
|
||||
#endif
|
||||
|
||||
#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
|
||||
bool del_ack_enable;
|
||||
uint16_t del_ack_timer_value;
|
||||
uint16_t del_ack_pkt_count;
|
||||
#endif
|
||||
|
||||
struct ol_tx_sched_wrr_ac_specs_t ac_specs[TX_WMM_AC_NUM];
|
||||
bool gro_enable;
|
||||
bool tso_enable;
|
||||
bool lro_enable;
|
||||
bool sg_enable;
|
||||
uint32_t enable_data_stall_detection;
|
||||
bool enable_flow_steering;
|
||||
bool disable_intra_bss_fwd;
|
||||
|
||||
#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
|
||||
uint16_t bundle_timer_value;
|
||||
uint16_t bundle_size;
|
||||
#endif
|
||||
uint8_t pktlog_buffer_size;
|
||||
};
|
||||
|
||||
#ifdef IPA_OFFLOAD
|
||||
/**
|
||||
* struct ol_txrx_ipa_resources - Resources needed for IPA
|
||||
* @ce_sr:
|
||||
* @ce_sr_ring_size:
|
||||
* @ce_reg_paddr:
|
||||
* @tx_comp_ring:
|
||||
* @tx_num_alloc_buffer:
|
||||
* @rx_rdy_ring:
|
||||
* @rx_proc_done_idx:
|
||||
* @rx2_rdy_ring:
|
||||
* @rx2_proc_done_idx:
|
||||
* @tx_comp_doorbell_dmaaddr: IPA UC Tx Complete doorbell register paddr
|
||||
* @rx_ready_doorbell_dmaaddr: IPA UC Rx Ready doorbell register paddr
|
||||
* @tx_pipe_handle:
|
||||
* @rx_pipe_handle:
|
||||
*/
|
||||
struct ol_txrx_ipa_resources {
|
||||
qdf_shared_mem_t *ce_sr;
|
||||
uint32_t ce_sr_ring_size;
|
||||
qdf_dma_addr_t ce_reg_paddr;
|
||||
|
||||
qdf_shared_mem_t *tx_comp_ring;
|
||||
uint32_t tx_num_alloc_buffer;
|
||||
|
||||
qdf_shared_mem_t *rx_rdy_ring;
|
||||
qdf_shared_mem_t *rx_proc_done_idx;
|
||||
|
||||
qdf_shared_mem_t *rx2_rdy_ring;
|
||||
qdf_shared_mem_t *rx2_proc_done_idx;
|
||||
|
||||
/* IPA UC doorbell registers paddr */
|
||||
qdf_dma_addr_t tx_comp_doorbell_dmaaddr;
|
||||
qdf_dma_addr_t rx_ready_doorbell_dmaaddr;
|
||||
|
||||
uint32_t tx_pipe_handle;
|
||||
uint32_t rx_pipe_handle;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct ol_txrx_ocb_chan_info {
|
||||
uint32_t chan_freq;
|
||||
uint16_t disable_rx_stats_hdr:1;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ol_mic_error_info - carries the information associated with
|
||||
* a MIC error
|
||||
* @vdev_id: virtual device ID
|
||||
* @key_id: Key ID
|
||||
* @pn: packet number
|
||||
* @sa: source address
|
||||
* @da: destination address
|
||||
* @ta: transmitter address
|
||||
*/
|
||||
struct ol_mic_error_info {
|
||||
uint8_t vdev_id;
|
||||
uint32_t key_id;
|
||||
uint64_t pn;
|
||||
uint8_t sa[QDF_MAC_ADDR_SIZE];
|
||||
uint8_t da[QDF_MAC_ADDR_SIZE];
|
||||
uint8_t ta[QDF_MAC_ADDR_SIZE];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ol_error_info - carries the information associated with an
|
||||
* error indicated by the firmware
|
||||
* @u: union of error information structs
|
||||
* @u.mic_err: MIC error information
|
||||
*/
|
||||
struct ol_error_info {
|
||||
union {
|
||||
struct ol_mic_error_info mic_err;
|
||||
} u;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* struct ol_txrx_ocb_set_chan - txrx OCB channel info
|
||||
* @ocb_channel_count: Channel count
|
||||
* @ocb_channel_info: OCB channel info
|
||||
*/
|
||||
struct ol_txrx_ocb_set_chan {
|
||||
uint32_t ocb_channel_count;
|
||||
struct ol_txrx_ocb_chan_info *ocb_channel_info;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ol_tx_ac_param_t - WMM parameters
|
||||
* @aifs: Arbitration Inter-Frame Space
|
||||
* @cwmin: Minimum contention window size
|
||||
* @cwmax: Maximum contention window size
|
||||
*
|
||||
* The struct is used to specify information to update TX WMM scheduler.
|
||||
*/
|
||||
struct ol_tx_ac_param_t {
|
||||
uint32_t aifs;
|
||||
uint32_t cwmin;
|
||||
uint32_t cwmax;
|
||||
};
|
||||
|
||||
struct ol_tx_wmm_param_t {
|
||||
struct ol_tx_ac_param_t ac[OL_TX_NUM_WMM_AC];
|
||||
};
|
||||
|
||||
struct ieee80211_ba_parameterset {
|
||||
#if _BYTE_ORDER == _BIG_ENDIAN
|
||||
uint16_t buffersize:10, /* B6-15 buffer size */
|
||||
tid:4, /* B2-5 TID */
|
||||
bapolicy:1, /* B1 block ack policy */
|
||||
amsdusupported:1; /* B0 amsdu supported */
|
||||
#else
|
||||
uint16_t amsdusupported:1, /* B0 amsdu supported */
|
||||
bapolicy:1, /* B1 block ack policy */
|
||||
tid:4, /* B2-5 TID */
|
||||
buffersize:10; /* B6-15 buffer size */
|
||||
#endif
|
||||
} __packed;
|
||||
|
||||
struct ieee80211_ba_seqctrl {
|
||||
#if _BYTE_ORDER == _BIG_ENDIAN
|
||||
uint16_t startseqnum:12, /* B4-15 starting sequence number */
|
||||
fragnum:4; /* B0-3 fragment number */
|
||||
#else
|
||||
uint16_t fragnum:4, /* B0-3 fragment number */
|
||||
startseqnum:12; /* B4-15 starting sequence number */
|
||||
#endif
|
||||
} __packed;
|
||||
|
||||
struct ieee80211_delba_parameterset {
|
||||
#if _BYTE_ORDER == _BIG_ENDIAN
|
||||
uint16_t tid:4, /* B12-15 tid */
|
||||
initiator:1, /* B11 initiator */
|
||||
reserved0:11; /* B0-10 reserved */
|
||||
#else
|
||||
uint16_t reserved0:11, /* B0-10 reserved */
|
||||
initiator:1, /* B11 initiator */
|
||||
tid:4; /* B12-15 tid */
|
||||
#endif
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* typedef ol_txrx_vdev_peer_remove_cb() - wma_remove_peer callback
|
||||
* @handle: callback handle
|
||||
* @bssid: BSSID
|
||||
* @vdev_id: virtual device ID
|
||||
* @peer: peer
|
||||
*/
|
||||
typedef void (*ol_txrx_vdev_peer_remove_cb)(void *handle, uint8_t *bssid,
|
||||
uint8_t vdev_id, void *peer);
|
||||
|
||||
/**
|
||||
* typedef tx_pause_callback() - OSIF function registered with the data path
|
||||
* @vdev_id: virtual device id
|
||||
* @action: tx pause action to take
|
||||
* @reason: reason for the tx pause action
|
||||
*/
|
||||
typedef void (*tx_pause_callback)(uint8_t vdev_id,
|
||||
enum netif_action_type action,
|
||||
enum netif_reason_type reason);
|
||||
|
||||
/**
|
||||
* struct ol_rx_inv_peer_params - rx invalid peer data parameters
|
||||
* @vdev_id: Virtual device ID
|
||||
* @ra: RX data receiver MAC address
|
||||
* @ta: RX data transmitter MAC address
|
||||
*/
|
||||
struct ol_rx_inv_peer_params {
|
||||
uint8_t vdev_id;
|
||||
uint8_t ra[QDF_MAC_ADDR_SIZE];
|
||||
uint8_t ta[QDF_MAC_ADDR_SIZE];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cdp_txrx_ext_stats: dp extended stats
|
||||
* @tx_msdu_enqueue: tx msdu queued to hw
|
||||
* @tx_msdu_overflow: tx msdu overflow
|
||||
* @rx_mpdu_received: rx mpdu processed by hw
|
||||
* @rx_mpdu_delivered: rx mpdu received from hw
|
||||
* @rx_mpdu_error: rx mpdu error count
|
||||
* @rx_mpdu_missed: rx mpdu missed by hw
|
||||
*/
|
||||
struct cdp_txrx_ext_stats {
|
||||
uint32_t tx_msdu_enqueue;
|
||||
uint32_t tx_msdu_overflow;
|
||||
uint32_t rx_mpdu_received;
|
||||
uint32_t rx_mpdu_delivered;
|
||||
uint32_t rx_mpdu_error;
|
||||
uint32_t rx_mpdu_missed;
|
||||
};
|
||||
|
||||
#endif /* __CDP_TXRX_MOB_DEF_H */
|
404
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon.h
Normal file
404
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon.h
Normal file
@ -0,0 +1,404 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_mon.h
|
||||
* Define the monitor mode API functions
|
||||
* called by the host control SW and the OS interface module
|
||||
*/
|
||||
|
||||
#ifndef _CDP_TXRX_MON_H_
|
||||
#define _CDP_TXRX_MON_H_
|
||||
#include "cdp_txrx_handle.h"
|
||||
#include <cdp_txrx_cmn.h>
|
||||
|
||||
static inline QDF_STATUS cdp_reset_monitor_mode(ol_txrx_soc_handle soc,
|
||||
uint8_t pdev_id,
|
||||
u_int8_t smart_monitor)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!soc->ops->mon_ops ||
|
||||
!soc->ops->mon_ops->txrx_reset_monitor_mode)
|
||||
return 0;
|
||||
|
||||
return soc->ops->mon_ops->txrx_reset_monitor_mode(soc, pdev_id,
|
||||
smart_monitor);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
|
||||
* @soc: Datapath SOC handle
|
||||
* @pdev_id: id of datapath PDEV handle
|
||||
* @nbuf: Management frame buffer
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_deliver_tx_mgmt(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
qdf_nbuf_t nbuf)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->mon_ops ||
|
||||
!soc->ops->mon_ops->txrx_deliver_tx_mgmt)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->mon_ops->txrx_deliver_tx_mgmt(soc, pdev_id, nbuf);
|
||||
}
|
||||
|
||||
#ifdef QCA_SUPPORT_LITE_MONITOR
|
||||
/**
|
||||
* cdp_set_lite_mon_config() - Set lite monitor config/filter
|
||||
*
|
||||
* @soc: dp soc handle
|
||||
* @config: lite monitor config
|
||||
* @pdev_id: pdev id
|
||||
*
|
||||
* This API is used to enable/disable lite monitor feature
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS if value set successfully
|
||||
* QDF_STATUS_E_INVAL false if error
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_set_lite_mon_config(ol_txrx_soc_handle soc,
|
||||
struct cdp_lite_mon_filter_config *config,
|
||||
uint8_t pdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (!soc->ops->mon_ops ||
|
||||
!soc->ops->mon_ops->txrx_set_lite_mon_config)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
return soc->ops->mon_ops->txrx_set_lite_mon_config(soc, config,
|
||||
pdev_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_get_lite_mon_config() - Get lite monitor config
|
||||
*
|
||||
* @soc: dp soc handle
|
||||
* @config: lite monitor config
|
||||
* @pdev_id: pdev id
|
||||
*
|
||||
* This API is used to get lite monitor feature config
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS if get is successfully
|
||||
* QDF_STATUS_E_INVAL false if error
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_get_lite_mon_config(ol_txrx_soc_handle soc,
|
||||
struct cdp_lite_mon_filter_config *config,
|
||||
uint8_t pdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (!soc->ops->mon_ops ||
|
||||
!soc->ops->mon_ops->txrx_get_lite_mon_config)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
return soc->ops->mon_ops->txrx_get_lite_mon_config(soc, config,
|
||||
pdev_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_set_lite_mon_peer_config() - Set lite monitor peer config
|
||||
*
|
||||
* @soc: dp soc handle
|
||||
* @config: lite monitor peer config
|
||||
* @pdev_id: pdev id
|
||||
*
|
||||
* This API is used to add/del lite monitor peers
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS if value set successfully
|
||||
* QDF_STATUS_E_INVAL false if error
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_set_lite_mon_peer_config(ol_txrx_soc_handle soc,
|
||||
struct cdp_lite_mon_peer_config *config,
|
||||
uint8_t pdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (!soc->ops->mon_ops ||
|
||||
!soc->ops->mon_ops->txrx_set_lite_mon_peer_config)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
return soc->ops->mon_ops->txrx_set_lite_mon_peer_config(soc, config,
|
||||
pdev_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_get_lite_mon_peer_config() - Get lite monitor peer list
|
||||
*
|
||||
* @soc: dp soc handle
|
||||
* @info: lite monitor peer info
|
||||
* @pdev_id: pdev id
|
||||
*
|
||||
* This API is used to get lite monitor peers
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS if value set successfully
|
||||
* QDF_STATUS_E_INVAL false if error
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_get_lite_mon_peer_config(ol_txrx_soc_handle soc,
|
||||
struct cdp_lite_mon_peer_info *info,
|
||||
uint8_t pdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (!soc->ops->mon_ops ||
|
||||
!soc->ops->mon_ops->txrx_get_lite_mon_peer_config)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
return soc->ops->mon_ops->txrx_get_lite_mon_peer_config(soc, info,
|
||||
pdev_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_is_lite_mon_enabled() - Get lite monitor enable status
|
||||
*
|
||||
* @soc: dp soc handle
|
||||
* @pdev_id: pdev id
|
||||
* @dir: direction tx/rx
|
||||
*
|
||||
* This API is used to get lite monitor enable status
|
||||
*
|
||||
* Return: 0 if disabled
|
||||
* 1 if enabled
|
||||
*/
|
||||
static inline int
|
||||
cdp_is_lite_mon_enabled(ol_txrx_soc_handle soc,
|
||||
uint8_t pdev_id, uint8_t dir)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!soc->ops->mon_ops ||
|
||||
!soc->ops->mon_ops->txrx_is_lite_mon_enabled)
|
||||
return 0;
|
||||
|
||||
return soc->ops->mon_ops->txrx_is_lite_mon_enabled(soc, pdev_id, dir);
|
||||
}
|
||||
|
||||
/*
|
||||
* cdp_get_lite_mon_legacy_feature_enabled() - Get the legacy feature enabled
|
||||
*
|
||||
* @soc: dp soc handle
|
||||
* @pdev_id: pdev id
|
||||
* @dir: direction tx/rx
|
||||
*
|
||||
* This API is used to get the legacy feature enabled using lite_mon
|
||||
*
|
||||
* Return: legacy feature enabled
|
||||
*/
|
||||
static inline int
|
||||
cdp_get_lite_mon_legacy_feature_enabled(ol_txrx_soc_handle soc,
|
||||
uint8_t pdev_id, uint8_t dir)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!soc->ops->mon_ops ||
|
||||
!soc->ops->mon_ops->txrx_get_lite_mon_legacy_feature_enabled)
|
||||
return 0;
|
||||
|
||||
return soc->ops->mon_ops->txrx_get_lite_mon_legacy_feature_enabled(soc,
|
||||
pdev_id,
|
||||
dir);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef QCA_RSSI_DB2DBM
|
||||
/**
|
||||
* cdp_set_params_rssi_dbm_conversion - Set the rssi dbm conversion params
|
||||
* into dp_pdev structure
|
||||
* @soc: soc txrx handler
|
||||
* @params: cdp_rssi_db2dbm_param_dp pointer
|
||||
*
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_set_params_rssi_dbm_conversion(ol_txrx_soc_handle soc,
|
||||
struct cdp_rssi_db2dbm_param_dp *params)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance:");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->mon_ops ||
|
||||
!soc->ops->mon_ops->txrx_set_mon_pdev_params_rssi_dbm_conv)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->mon_ops->txrx_set_mon_pdev_params_rssi_dbm_conv
|
||||
(soc, params);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_CONFIG_TELEMETRY_AGENT
|
||||
/*
|
||||
* cdp_update_pdev_mon_telemetry_airtime_stats() - update telemetry airtime
|
||||
* stats in monitor pdev
|
||||
*
|
||||
*@soc: dp soc handle
|
||||
*@pdev_id: pdev id
|
||||
*
|
||||
* This API is used to update telemetry airtime stats in monitor pdev
|
||||
*
|
||||
* Return: Success if stats are updated, else failure
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_update_pdev_mon_telemetry_airtime_stats(ol_txrx_soc_handle soc,
|
||||
uint8_t pdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->mon_ops ||
|
||||
!soc->ops->mon_ops->txrx_update_pdev_mon_telemetry_airtime_stats)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->mon_ops->txrx_update_pdev_mon_telemetry_airtime_stats(
|
||||
soc, pdev_id);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE
|
||||
/**
|
||||
* cdp_start_local_pkt_capture() - start local pkt capture
|
||||
* @soc: opaque soc handle
|
||||
* @pdev_id: pdev id
|
||||
* @filter: monitor filter config
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS if success
|
||||
* QDF_STATUS_E_FAILURE if error
|
||||
*/
|
||||
static inline
|
||||
QDF_STATUS cdp_start_local_pkt_capture(ol_txrx_soc_handle soc,
|
||||
uint8_t pdev_id,
|
||||
struct cdp_monitor_filter *filter)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->mon_ops ||
|
||||
!soc->ops->mon_ops->start_local_pkt_capture)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->mon_ops->start_local_pkt_capture(soc, pdev_id, filter);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_stop_local_pkt_capture() - stop local pkt capture
|
||||
* @soc: opaque soc handle
|
||||
* @pdev_id: pdev_id
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS if success
|
||||
* QDF_STATUS_E_FAILURE if error
|
||||
*/
|
||||
static inline
|
||||
QDF_STATUS cdp_stop_local_pkt_capture(ol_txrx_soc_handle soc, uint8_t pdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->mon_ops ||
|
||||
!soc->ops->mon_ops->stop_local_pkt_capture)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->mon_ops->stop_local_pkt_capture(soc, pdev_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_is_local_pkt_capture_running() - get is local packet capture running
|
||||
* @soc: opaque soc handle
|
||||
* @pdev_id: pdev id
|
||||
*
|
||||
* Return: true if running
|
||||
* false if not running
|
||||
*/
|
||||
static inline
|
||||
bool cdp_is_local_pkt_capture_running(ol_txrx_soc_handle soc, uint8_t pdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!soc->ops->mon_ops ||
|
||||
!soc->ops->mon_ops->is_local_pkt_capture_running)
|
||||
return false;
|
||||
|
||||
return soc->ops->mon_ops->is_local_pkt_capture_running(soc, pdev_id);
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
QDF_STATUS cdp_start_local_pkt_capture(ol_txrx_soc_handle soc,
|
||||
uint8_t pdev_id,
|
||||
struct cdp_monitor_filter *filter)
|
||||
{
|
||||
return QDF_STATUS_E_NOSUPPORT;
|
||||
}
|
||||
|
||||
static inline
|
||||
QDF_STATUS cdp_stop_local_pkt_capture(ol_txrx_soc_handle soc, uint8_t pdev_id)
|
||||
{
|
||||
return QDF_STATUS_E_NOSUPPORT;
|
||||
}
|
||||
|
||||
static inline
|
||||
bool cdp_is_local_pkt_capture_running(ol_txrx_soc_handle soc, uint8_t pdev_id)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* WLAN_FEATURE_LOCAL_PKT_CAPTURE */
|
||||
|
||||
#endif
|
@ -0,0 +1,660 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_mon_struct.h
|
||||
* Define the monitor mode API structure
|
||||
* shared by data path and the OS interface module
|
||||
*/
|
||||
|
||||
#ifndef _CDP_TXRX_MON_STRUCT_H_
|
||||
#define _CDP_TXRX_MON_STRUCT_H_
|
||||
|
||||
#ifdef QCA_SUPPORT_LITE_MONITOR
|
||||
|
||||
#define CDP_LITE_MON_PEER_MAX 16
|
||||
|
||||
#define CDP_MON_FRM_TYPE_MAX 3
|
||||
#define CDP_MON_FRM_FILTER_MODE_MAX 4
|
||||
|
||||
#define CDP_LITE_MON_LEN_64B 0x40
|
||||
#define CDP_LITE_MON_LEN_128B 0x80
|
||||
#define CDP_LITE_MON_LEN_256B 0x100
|
||||
#define CDP_LITE_MON_LEN_FULL 0xFFFF
|
||||
|
||||
#define CDP_LITE_MON_FILTER_ALL 0xFFFF
|
||||
|
||||
/* This should align with nac mac type enumerations in ieee80211_ioctl.h */
|
||||
#define CDP_LITE_MON_PEER_MAC_TYPE_CLIENT 2
|
||||
|
||||
/**
|
||||
* enum cdp_lite_mon_legacy_filter - legacy filters for tx/rx
|
||||
* @LEGACY_FILTER_DISABLED: No filter / filter disabled
|
||||
* @LEGACY_FILTER_MCOPY: M_Copy filter
|
||||
* @LEGACY_FILTER_TX_CAPTURE: Tx_Capture filter
|
||||
* @LEGACY_FILTER_RX_ENH_CAPTURE: Rx Enhance capture filter
|
||||
* @LEGACY_FILTER_ADV_MON_FILTER: Advance Monitor filter
|
||||
*
|
||||
* Use to identify which filter is currently enabled using lite mon
|
||||
*/
|
||||
enum cdp_lite_mon_legacy_filter {
|
||||
LEGACY_FILTER_DISABLED = 0,
|
||||
LEGACY_FILTER_MCOPY = 1,
|
||||
LEGACY_FILTER_TX_CAPTURE = 2,
|
||||
LEGACY_FILTER_RX_ENH_CAPTURE = 3,
|
||||
LEGACY_FILTER_ADV_MON_FILTER = 4,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum cdp_lite_mon_level- lite mon frame levels
|
||||
* @CDP_LITE_MON_LEVEL_INVALID: level invalid
|
||||
* @CDP_LITE_MON_LEVEL_MSDU: level msdu
|
||||
* @CDP_LITE_MON_LEVEL_MPDU: level mpdu
|
||||
* @CDP_LITE_MON_LEVEL_PPDU: level ppdu
|
||||
*/
|
||||
enum cdp_lite_mon_level {
|
||||
CDP_LITE_MON_LEVEL_INVALID = 0,
|
||||
CDP_LITE_MON_LEVEL_MSDU = 1,
|
||||
CDP_LITE_MON_LEVEL_MPDU = 2,
|
||||
CDP_LITE_MON_LEVEL_PPDU = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum cdp_lite_mon_peer_action- lite mon peer action
|
||||
* @CDP_LITE_MON_PEER_ADD: peer add
|
||||
* @CDP_LITE_MON_PEER_REMOVE: peer remove
|
||||
*/
|
||||
enum cdp_lite_mon_peer_action {
|
||||
CDP_LITE_MON_PEER_ADD = 0,
|
||||
CDP_LITE_MON_PEER_REMOVE = 1,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum cdp_lite_mon_direction - lite mon config direction
|
||||
* @CDP_LITE_MON_DIRECTION_RX: lite mon config direction rx
|
||||
* @CDP_LITE_MON_DIRECTION_TX: lite mon config direction tx
|
||||
*/
|
||||
enum cdp_lite_mon_direction {
|
||||
CDP_LITE_MON_DIRECTION_RX = 1,
|
||||
CDP_LITE_MON_DIRECTION_TX = 2,
|
||||
};
|
||||
#endif
|
||||
/* MU max user to sniff */
|
||||
#define CDP_MU_SNIF_USER_MAX 4
|
||||
/* EHT max type and compression mode */
|
||||
#define CDP_EHT_TYPE_MODE_MAX 3
|
||||
/* Same as MAX_20MHZ_SEGMENTS */
|
||||
#define CDP_MAX_20MHZ_SEGS 16
|
||||
/* Same as MAX_ANTENNA_EIGHT */
|
||||
#define CDP_MAX_NUM_ANTENNA 8
|
||||
|
||||
/* XXX not really a mode; there are really multiple PHY's */
|
||||
enum cdp_mon_phymode {
|
||||
/* autoselect */
|
||||
CDP_IEEE80211_MODE_AUTO = 0,
|
||||
/* 5GHz, OFDM */
|
||||
CDP_IEEE80211_MODE_11A = 1,
|
||||
/* 2GHz, CCK */
|
||||
CDP_IEEE80211_MODE_11B = 2,
|
||||
/* 2GHz, OFDM */
|
||||
CDP_IEEE80211_MODE_11G = 3,
|
||||
/* 2GHz, GFSK */
|
||||
CDP_IEEE80211_MODE_FH = 4,
|
||||
/* 5GHz, OFDM, 2x clock dynamic turbo */
|
||||
CDP_IEEE80211_MODE_TURBO_A = 5,
|
||||
/* 2GHz, OFDM, 2x clock dynamic turbo */
|
||||
CDP_IEEE80211_MODE_TURBO_G = 6,
|
||||
/* 5Ghz, HT20 */
|
||||
CDP_IEEE80211_MODE_11NA_HT20 = 7,
|
||||
/* 2Ghz, HT20 */
|
||||
CDP_IEEE80211_MODE_11NG_HT20 = 8,
|
||||
/* 5Ghz, HT40 (ext ch +1) */
|
||||
CDP_IEEE80211_MODE_11NA_HT40PLUS = 9,
|
||||
/* 5Ghz, HT40 (ext ch -1) */
|
||||
CDP_IEEE80211_MODE_11NA_HT40MINUS = 10,
|
||||
/* 2Ghz, HT40 (ext ch +1) */
|
||||
CDP_IEEE80211_MODE_11NG_HT40PLUS = 11,
|
||||
/* 2Ghz, HT40 (ext ch -1) */
|
||||
CDP_IEEE80211_MODE_11NG_HT40MINUS = 12,
|
||||
/* 2Ghz, Auto HT40 */
|
||||
CDP_IEEE80211_MODE_11NG_HT40 = 13,
|
||||
/* 5Ghz, Auto HT40 */
|
||||
CDP_IEEE80211_MODE_11NA_HT40 = 14,
|
||||
/* 5Ghz, VHT20 */
|
||||
CDP_IEEE80211_MODE_11AC_VHT20 = 15,
|
||||
/* 5Ghz, VHT40 (Ext ch +1) */
|
||||
CDP_IEEE80211_MODE_11AC_VHT40PLUS = 16,
|
||||
/* 5Ghz VHT40 (Ext ch -1) */
|
||||
CDP_IEEE80211_MODE_11AC_VHT40MINUS = 17,
|
||||
/* 5Ghz, VHT40 */
|
||||
CDP_IEEE80211_MODE_11AC_VHT40 = 18,
|
||||
/* 5Ghz, VHT80 */
|
||||
CDP_IEEE80211_MODE_11AC_VHT80 = 19,
|
||||
/* 5Ghz, VHT160 */
|
||||
CDP_IEEE80211_MODE_11AC_VHT160 = 20,
|
||||
/* 5Ghz, VHT80_80 */
|
||||
CDP_IEEE80211_MODE_11AC_VHT80_80 = 21,
|
||||
};
|
||||
|
||||
enum {
|
||||
CDP_PKT_TYPE_OFDM = 0,
|
||||
CDP_PKT_TYPE_CCK,
|
||||
CDP_PKT_TYPE_HT,
|
||||
CDP_PKT_TYPE_VHT,
|
||||
CDP_PKT_TYPE_HE,
|
||||
CDP_PKT_TYPE_EHT,
|
||||
CDP_PKT_TYPE_NO_SUP,
|
||||
CDP_PKT_TYPE_MAX,
|
||||
};
|
||||
|
||||
enum {
|
||||
CDP_SGI_0_8_US = 0,
|
||||
CDP_SGI_0_4_US,
|
||||
CDP_SGI_1_6_US,
|
||||
CDP_SGI_3_2_US,
|
||||
};
|
||||
|
||||
enum {
|
||||
CDP_RX_TYPE_SU = 0,
|
||||
CDP_RX_TYPE_MU_MIMO,
|
||||
CDP_RX_TYPE_MU_OFDMA,
|
||||
CDP_RX_TYPE_MU_OFDMA_MIMO,
|
||||
CDP_RX_TYPE_MAX,
|
||||
};
|
||||
|
||||
enum {
|
||||
CDP_MU_TYPE_DL = 0,
|
||||
CDP_MU_TYPE_UL,
|
||||
CDP_MU_TYPE_MAX,
|
||||
};
|
||||
|
||||
/*
|
||||
*Band Width Types
|
||||
*/
|
||||
enum CMN_BW_TYPES {
|
||||
CMN_BW_20MHZ,
|
||||
CMN_BW_40MHZ,
|
||||
CMN_BW_80MHZ,
|
||||
CMN_BW_160MHZ,
|
||||
CMN_BW_80_80MHZ,
|
||||
#ifdef WLAN_FEATURE_11BE
|
||||
CMN_BW_320MHZ,
|
||||
#endif
|
||||
CMN_BW_CNT,
|
||||
CMN_BW_IDLE = 0xFF, /*default BW state */
|
||||
};
|
||||
|
||||
enum cdp_punctured_modes {
|
||||
NO_PUNCTURE,
|
||||
#ifdef WLAN_FEATURE_11BE
|
||||
PUNCTURED_20MHZ,
|
||||
PUNCTURED_40MHZ,
|
||||
PUNCTURED_80MHZ,
|
||||
PUNCTURED_120MHZ,
|
||||
#endif
|
||||
PUNCTURED_MODE_CNT,
|
||||
};
|
||||
|
||||
struct cdp_mon_status {
|
||||
/* bss color value 1-63 used for update on ppdu_desc bsscolor */
|
||||
uint8_t bsscolor;
|
||||
int rs_numchains;
|
||||
int rs_flags;
|
||||
#define IEEE80211_RX_FCS_ERROR 0x01
|
||||
#define IEEE80211_RX_MIC_ERROR 0x02
|
||||
#define IEEE80211_RX_DECRYPT_ERROR 0x04
|
||||
/* holes in flags here between, ATH_RX_XXXX to IEEE80211_RX_XXX */
|
||||
#define IEEE80211_RX_KEYMISS 0x200
|
||||
#define IEEE80211_RX_PN_ERROR 0x400
|
||||
int rs_rssi; /* RSSI (noise floor adjusted) */
|
||||
int rs_abs_rssi; /* absolute RSSI */
|
||||
int rs_datarate; /* data rate received */
|
||||
int rs_rateieee;
|
||||
int rs_ratephy1;
|
||||
int rs_ratephy2;
|
||||
int rs_ratephy3;
|
||||
|
||||
/* Keep the same as ATH_MAX_ANTENNA */
|
||||
#define IEEE80211_MAX_ANTENNA 3
|
||||
/* RSSI (noise floor adjusted) */
|
||||
u_int8_t rs_rssictl[IEEE80211_MAX_ANTENNA];
|
||||
/* RSSI (noise floor adjusted) */
|
||||
u_int8_t rs_rssiextn[IEEE80211_MAX_ANTENNA];
|
||||
/* rs_rssi is valid or not */
|
||||
u_int8_t rs_isvalidrssi;
|
||||
|
||||
enum cdp_mon_phymode rs_phymode;
|
||||
int rs_freq;
|
||||
|
||||
union {
|
||||
u_int8_t data[8];
|
||||
u_int64_t tsf;
|
||||
} rs_tstamp;
|
||||
|
||||
/*
|
||||
* Detail channel structure of recv frame.
|
||||
* It could be NULL if not available
|
||||
*/
|
||||
|
||||
|
||||
#ifdef ATH_SUPPORT_AOW
|
||||
u_int16_t rs_rxseq; /* WLAN Sequence number */
|
||||
#endif
|
||||
#ifdef ATH_VOW_EXT_STATS
|
||||
/* Lower 16 bits holds the udp checksum offset in the data pkt */
|
||||
u_int32_t vow_extstats_offset;
|
||||
/* Higher 16 bits contains offset in the data pkt at which vow
|
||||
* ext stats are embedded
|
||||
*/
|
||||
#endif
|
||||
u_int8_t rs_isaggr;
|
||||
u_int8_t rs_isapsd;
|
||||
int16_t rs_noisefloor;
|
||||
u_int16_t rs_channel;
|
||||
#ifdef ATH_SUPPORT_TxBF
|
||||
u_int32_t rs_rpttstamp; /* txbf report time stamp*/
|
||||
#endif
|
||||
|
||||
/* The following counts are meant to assist in stats calculation.
|
||||
* These variables are incremented only in specific situations, and
|
||||
* should not be relied upon for any purpose other than the original
|
||||
* stats related purpose they have been introduced for.
|
||||
*/
|
||||
|
||||
u_int16_t rs_cryptodecapcount; /* Crypto bytes decapped/demic'ed. */
|
||||
u_int8_t rs_padspace; /* No. of padding bytes present after
|
||||
header in wbuf. */
|
||||
u_int8_t rs_qosdecapcount; /* QoS/HTC bytes decapped. */
|
||||
|
||||
/* End of stats calculation related counts. */
|
||||
|
||||
/*
|
||||
* uint8_t rs_lsig[IEEE80211_LSIG_LEN];
|
||||
* uint8_t rs_htsig[IEEE80211_HTSIG_LEN];
|
||||
* uint8_t rs_servicebytes[IEEE80211_SB_LEN];
|
||||
* uint8_t rs_fcs_error;
|
||||
*/
|
||||
|
||||
/* cdp convergence monitor mode status */
|
||||
union {
|
||||
u_int8_t cdp_data[8];
|
||||
u_int64_t cdp_tsf;
|
||||
} cdp_rs_tstamp;
|
||||
|
||||
uint8_t cdp_rs_pream_type;
|
||||
uint32_t cdp_rs_user_rssi;
|
||||
uint8_t cdp_rs_stbc;
|
||||
uint8_t cdp_rs_sgi;
|
||||
uint32_t cdf_rs_rate_mcs;
|
||||
uint32_t cdp_rs_reception_type;
|
||||
uint32_t cdp_rs_bw;
|
||||
uint32_t cdp_rs_nss;
|
||||
uint8_t cdp_rs_fcs_err;
|
||||
bool cdp_rs_rxdma_err;
|
||||
};
|
||||
|
||||
enum {
|
||||
CDP_MON_PPDU_START = 0,
|
||||
CDP_MON_PPDU_END,
|
||||
};
|
||||
|
||||
#ifdef QCA_UNDECODED_METADATA_SUPPORT
|
||||
/*
|
||||
* enum cdp_mon_phyrx_abort_reason_code: Phy err code to store the reason
|
||||
* why PHY generated an abort request.
|
||||
*/
|
||||
enum cdp_mon_phyrx_abort_reason_code {
|
||||
CDP_PHYRX_ERR_PHY_OFF = 0,
|
||||
CDP_PHYRX_ERR_SYNTH_OFF,
|
||||
CDP_PHYRX_ERR_OFDMA_TIMING,
|
||||
CDP_PHYRX_ERR_OFDMA_SIGNAL_PARITY,
|
||||
CDP_PHYRX_ERR_OFDMA_RATE_ILLEGAL,
|
||||
CDP_PHYRX_ERR_OFDMA_LENGTH_ILLEGAL,
|
||||
CDP_PHYRX_ERR_OFDMA_RESTART,
|
||||
CDP_PHYRX_ERR_OFDMA_SERVICE,
|
||||
CDP_PHYRX_ERR_PPDU_OFDMA_POWER_DROP,
|
||||
CDP_PHYRX_ERR_CCK_BLOKKER,
|
||||
CDP_PHYRX_ERR_CCK_TIMING = 10,
|
||||
CDP_PHYRX_ERR_CCK_HEADER_CRC,
|
||||
CDP_PHYRX_ERR_CCK_RATE_ILLEGAL,
|
||||
CDP_PHYRX_ERR_CCK_LENGTH_ILLEGAL,
|
||||
CDP_PHYRX_ERR_CCK_RESTART,
|
||||
CDP_PHYRX_ERR_CCK_SERVICE,
|
||||
CDP_PHYRX_ERR_CCK_POWER_DROP,
|
||||
CDP_PHYRX_ERR_HT_CRC_ERR,
|
||||
CDP_PHYRX_ERR_HT_LENGTH_ILLEGAL,
|
||||
CDP_PHYRX_ERR_HT_RATE_ILLEGAL,
|
||||
CDP_PHYRX_ERR_HT_ZLF = 20,
|
||||
CDP_PHYRX_ERR_FALSE_RADAR_EXT,
|
||||
CDP_PHYRX_ERR_GREEN_FIELD,
|
||||
CDP_PHYRX_ERR_BW_GT_DYN_BW,
|
||||
CDP_PHYRX_ERR_HT_LSIG_RATE_MISMATCH,
|
||||
CDP_PHYRX_ERR_VHT_CRC_ERROR,
|
||||
CDP_PHYRX_ERR_VHT_SIGA_UNSUPPORTED,
|
||||
CDP_PHYRX_ERR_VHT_LSIG_LEN_INVALID,
|
||||
CDP_PHYRX_ERR_VHT_NDP_OR_ZLF,
|
||||
CDP_PHYRX_ERR_VHT_NSYM_LT_ZERO,
|
||||
CDP_PHYRX_ERR_VHT_RX_EXTRA_SYMBOL_MISMATCH = 30,
|
||||
CDP_PHYRX_ERR_VHT_RX_SKIP_GROUP_ID0,
|
||||
CDP_PHYRX_ERR_VHT_RX_SKIP_GROUP_ID1TO62,
|
||||
CDP_PHYRX_ERR_VHT_RX_SKIP_GROUP_ID63,
|
||||
CDP_PHYRX_ERR_OFDM_LDPC_DECODER_DISABLED,
|
||||
CDP_PHYRX_ERR_DEFER_NAP,
|
||||
CDP_PHYRX_ERR_FDOMAIN_TIMEOUT,
|
||||
CDP_PHYRX_ERR_LSIG_REL_CHECK,
|
||||
CDP_PHYRX_ERR_BT_COLLISION,
|
||||
CDP_PHYRX_ERR_UNSUPPORTED_MU_FEEDBACK,
|
||||
CDP_PHYRX_ERR_PPDU_TX_INTERRUPT_RX = 40,
|
||||
CDP_PHYRX_ERR_UNSUPPORTED_CBF,
|
||||
CDP_PHYRX_ERR_OTHER,
|
||||
CDP_PHYRX_ERR_HE_SIGA_UNSUPPORTED,
|
||||
CDP_PHYRX_ERR_HE_SIGA_CRC_ERROR,
|
||||
CDP_PHYRX_ERR_HE_SIGB_UNSUPPORTED,
|
||||
CDP_PHYRX_ERR_HE_SIGB_CRC_ERROR,
|
||||
CDP_PHYRX_ERR_HE_MU_MODE_UNSUPPORTED,
|
||||
CDP_PHYRX_ERR_HE_NDP_OR_ZLF,
|
||||
CDP_PHYRX_ERR_HE_NSYM_LT_ZERO,
|
||||
CDP_PHYRX_ERR_HE_RU_PARAMS_UNSUPPORTED = 50,
|
||||
CDP_PHYRX_ERR_HE_NUM_USERS_UNSUPPORTED,
|
||||
CDP_PHYRX_ERR_HE_SOUNDING_PARAMS_UNSUPPORTED,
|
||||
CDP_PHYRX_ERR_HE_EXT_SU_UNSUPPORTED,
|
||||
CDP_PHYRX_ERR_HE_TRIG_UNSUPPORTED,
|
||||
CDP_PHYRX_ERR_HE_LSIG_LEN_INVALID = 55,
|
||||
CDP_PHYRX_ERR_HE_LSIG_RATE_MISMATCH,
|
||||
CDP_PHYRX_ERR_OFDMA_SIGNAL_RELIABILITY,
|
||||
CDP_PHYRX_ERR_HT_NSYM_LT_ZERO,
|
||||
CDP_PHYRX_ERR_VHT_LSIG_RATE_MISMATCH,
|
||||
CDP_PHYRX_ERR_VHT_PAID_GID_MISMATCH = 60,
|
||||
CDP_PHYRX_ERR_VHT_UNSUPPORTED_BW,
|
||||
CDP_PHYRX_ERR_VHT_GI_DISAM_MISMATCH,
|
||||
CDP_PHYRX_ERR_RX_WDG_TIMEOUT = 63,
|
||||
CDP_PHYRX_ERR_MAX
|
||||
};
|
||||
#endif
|
||||
|
||||
#define MAX_PPDU_ID_HIST 128
|
||||
|
||||
/**
|
||||
* struct cdp_pdev_mon_stats
|
||||
* @status_ppdu_state: state on PPDU start and end
|
||||
* @status_ppdu_start: status ring PPDU start TLV count
|
||||
* @status_ppdu_end: status ring PPDU end TLV count
|
||||
* @status_ppdu_compl: status ring matching start and end count on PPDU
|
||||
* @status_ppdu_start_mis: status ring missing start TLV count on PPDU
|
||||
* @status_ppdu_end_mis: status ring missing end TLV count on PPDU
|
||||
* @mpdu_cnt_fcs_ok: MPDU ok count per pkt and reception type DL-UL and user
|
||||
* @mpdu_cnt_fcs_err: MPDU err count per pkt and reception type DL-UL and user
|
||||
* @ppdu_eht_type_mode: PPDU count per type compression mode and DL-UL
|
||||
* @end_user_stats_cnt: PPDU end user TLV count
|
||||
* @start_user_info_cnt: PPDU start user info TLV count
|
||||
* @status_ppdu_done: status ring PPDU done TLV count
|
||||
* @dest_ppdu_done: destination ring PPDU count
|
||||
* @dest_mpdu_done: destination ring MPDU count
|
||||
* @dest_mpdu_drop:
|
||||
* @dup_mon_linkdesc_cnt: duplicate link descriptor indications from HW
|
||||
* @dup_mon_buf_cnt: duplicate buffer indications from HW
|
||||
* @dup_mon_sw_desc: Duplicate sw desc from HW
|
||||
* @stat_ring_ppdu_id_hist:
|
||||
* @dest_ring_ppdu_id_hist:
|
||||
* @ppdu_id_hist_idx:
|
||||
* @mon_rx_dest_stuck:
|
||||
* @tlv_tag_status_err: status not correct in the tlv tag
|
||||
* @status_buf_done_war: Number of status ring buffers for which DMA not done
|
||||
* WAR is applied.
|
||||
* @mon_rx_bufs_replenished_dest: Rx buffers replenish count
|
||||
* @mon_rx_bufs_reaped_dest: Rx buffer reap count
|
||||
* @ppdu_id_mismatch: counter to track ppdu id mismatch in
|
||||
* mointor status and monitor destination ring
|
||||
* @ppdu_id_match: counter to track ppdu id match in
|
||||
* mointor status and monitor destination ring
|
||||
* @status_ppdu_drop: Number of ppdu dropped from monitor status ring
|
||||
* @dest_ppdu_drop: Number of ppdu dropped from monitor destination ring
|
||||
* @mon_link_desc_invalid: msdu link desc invalid count
|
||||
* @mon_rx_desc_invalid: rx_desc invalid count
|
||||
* @mon_nbuf_sanity_err:
|
||||
* @mpdu_ppdu_id_mismatch_drop: mpdu's ppdu id did not match destination
|
||||
* ring ppdu id
|
||||
* @mpdu_decap_type_invalid: mpdu decap type invalid count
|
||||
* @rx_undecoded_count: Received undecoded frame count
|
||||
* @rx_undecoded_error: Rx undecoded errors
|
||||
* @rx_hdr_not_received: Rx HDR not received for MPDU
|
||||
* @invalid_dma_length: Invalid length received for packet buffer
|
||||
* @parent_buf_alloc: Numder of parent nbuf allocated for MPDU
|
||||
* @parent_buf_free: Number of parent nbuf freed
|
||||
* @pkt_buf_count: Number of packet buffers received
|
||||
* @mpdus_buf_to_stack: Number of MPDUs delivered to stack
|
||||
* @status_buf_count: Number of status buffer received
|
||||
* @empty_desc_ppdu: Number of empty desc received
|
||||
* @total_ppdu_info_enq: Number of PPDUs enqueued to wq
|
||||
* @total_ppdu_info_drop: Number of PPDUs dropped
|
||||
* @total_ppdu_info_alloc: Number of PPDU info allocated
|
||||
* @total_ppdu_info_free: Number of PPDU info freed
|
||||
* @ppdu_drop_cnt: Total PPDU drop count
|
||||
* @mpdu_drop_cnt: Total MPDU drop count
|
||||
* @end_of_ppdu_drop_cnt: Total end of ppdu drop count
|
||||
* @tlv_drop_cnt: TLV drop count
|
||||
* @rx_hdr_invalid_cnt: Rx header invalid count
|
||||
* @null_status_desc: NULL packet desc count
|
||||
* @null_pkt_desc: NULL packet desc count
|
||||
* @desc_magic_mismatch: desc magic number mismatch count;
|
||||
* @null_pkt_addr: NULL packet address count;
|
||||
* @pending_desc_count: Pending desc_count during pdev deinit
|
||||
*/
|
||||
struct cdp_pdev_mon_stats {
|
||||
#ifndef REMOVE_MON_DBG_STATS
|
||||
uint32_t status_ppdu_state;
|
||||
uint32_t status_ppdu_start;
|
||||
uint32_t status_ppdu_end;
|
||||
uint32_t status_ppdu_compl;
|
||||
uint32_t status_ppdu_start_mis;
|
||||
uint32_t status_ppdu_end_mis;
|
||||
#endif
|
||||
uint32_t mpdu_cnt_fcs_ok[CDP_PKT_TYPE_MAX][CDP_RX_TYPE_MAX]
|
||||
[CDP_MU_TYPE_MAX][CDP_MU_SNIF_USER_MAX];
|
||||
uint32_t mpdu_cnt_fcs_err[CDP_PKT_TYPE_MAX][CDP_RX_TYPE_MAX]
|
||||
[CDP_MU_TYPE_MAX][CDP_MU_SNIF_USER_MAX];
|
||||
uint32_t ppdu_eht_type_mode[CDP_EHT_TYPE_MODE_MAX][CDP_MU_TYPE_MAX];
|
||||
uint32_t end_user_stats_cnt;
|
||||
uint32_t start_user_info_cnt;
|
||||
uint32_t status_ppdu_done;
|
||||
uint32_t dest_ppdu_done;
|
||||
uint32_t dest_mpdu_done;
|
||||
uint32_t dest_mpdu_drop;
|
||||
uint32_t dup_mon_linkdesc_cnt;
|
||||
uint32_t dup_mon_buf_cnt;
|
||||
uint32_t dup_mon_sw_desc;
|
||||
uint32_t stat_ring_ppdu_id_hist[MAX_PPDU_ID_HIST];
|
||||
uint32_t dest_ring_ppdu_id_hist[MAX_PPDU_ID_HIST];
|
||||
uint32_t ppdu_id_hist_idx;
|
||||
uint32_t mon_rx_dest_stuck;
|
||||
uint32_t tlv_tag_status_err;
|
||||
uint32_t status_buf_done_war;
|
||||
uint32_t mon_rx_bufs_replenished_dest;
|
||||
uint32_t mon_rx_bufs_reaped_dest;
|
||||
uint32_t ppdu_id_mismatch;
|
||||
uint32_t ppdu_id_match;
|
||||
uint32_t status_ppdu_drop;
|
||||
uint32_t dest_ppdu_drop;
|
||||
uint32_t mon_link_desc_invalid;
|
||||
uint32_t mon_rx_desc_invalid;
|
||||
uint32_t mon_nbuf_sanity_err;
|
||||
uint32_t mpdu_ppdu_id_mismatch_drop;
|
||||
uint32_t mpdu_decap_type_invalid;
|
||||
#ifdef QCA_UNDECODED_METADATA_SUPPORT
|
||||
uint32_t rx_undecoded_count;
|
||||
uint32_t rx_undecoded_error[CDP_PHYRX_ERR_MAX];
|
||||
#endif
|
||||
uint32_t rx_hdr_not_received;
|
||||
uint32_t invalid_dma_length;
|
||||
uint32_t parent_buf_alloc;
|
||||
uint32_t parent_buf_free;
|
||||
uint32_t pkt_buf_count;
|
||||
uint32_t mpdus_buf_to_stack;
|
||||
uint32_t status_buf_count;
|
||||
uint32_t empty_desc_ppdu;
|
||||
uint32_t total_ppdu_info_enq;
|
||||
uint32_t total_ppdu_info_drop;
|
||||
uint32_t total_ppdu_info_alloc;
|
||||
uint32_t total_ppdu_info_free;
|
||||
uint32_t ppdu_drop_cnt;
|
||||
uint32_t mpdu_drop_cnt;
|
||||
uint32_t end_of_ppdu_drop_cnt;
|
||||
uint32_t tlv_drop_cnt;
|
||||
uint32_t rx_hdr_invalid_cnt;
|
||||
uint32_t null_status_desc;
|
||||
uint32_t null_pkt_desc;
|
||||
uint32_t desc_magic_mismatch;
|
||||
uint32_t null_pkt_addr;
|
||||
uint32_t pending_desc_count;
|
||||
};
|
||||
|
||||
#ifdef QCA_SUPPORT_LITE_MONITOR
|
||||
/**
|
||||
* struct cdp_lite_mon_filter_config - lite mon set/get filter config
|
||||
* @direction: direction tx/rx
|
||||
* @disable: disables lite mon
|
||||
* @level: MSDU/MPDU/PPDU levels
|
||||
* @metadata: meta information to be added
|
||||
* @mgmt_filter: mgmt filter for modes fp,md,mo
|
||||
* @ctrl_filter: ctrl filter for modes fp,md,mo
|
||||
* @data_filter: data filter for modes fp,md,mo
|
||||
* @len: mgmt/ctrl/data frame lens
|
||||
* @debug: debug options
|
||||
* @vdev_id: output vdev id
|
||||
* @legacy_filter_enabled: legacy filter currently enabled
|
||||
*/
|
||||
struct cdp_lite_mon_filter_config {
|
||||
uint8_t direction;
|
||||
uint8_t disable;
|
||||
uint8_t level;
|
||||
uint8_t metadata;
|
||||
uint16_t mgmt_filter[CDP_MON_FRM_FILTER_MODE_MAX];
|
||||
uint16_t ctrl_filter[CDP_MON_FRM_FILTER_MODE_MAX];
|
||||
uint16_t data_filter[CDP_MON_FRM_FILTER_MODE_MAX];
|
||||
uint16_t len[CDP_MON_FRM_TYPE_MAX];
|
||||
uint8_t debug;
|
||||
uint8_t vdev_id;
|
||||
uint8_t legacy_filter_enabled;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cdp_lite_mon_peer_config - lite mon set peer config
|
||||
* @direction: direction tx/rx
|
||||
* @action: add/del
|
||||
* @vdev_id: peer vdev id
|
||||
* @mac: peer mac
|
||||
*/
|
||||
struct cdp_lite_mon_peer_config {
|
||||
uint8_t direction;
|
||||
uint8_t action;
|
||||
uint8_t vdev_id;
|
||||
uint8_t mac[QDF_MAC_ADDR_SIZE];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cdp_lite_mon_peer_info - lite mon get peer config
|
||||
* @direction: direction tx/rx
|
||||
* @count: no of peers
|
||||
* @mac: peer macs
|
||||
*/
|
||||
struct cdp_lite_mon_peer_info {
|
||||
uint8_t direction;
|
||||
uint8_t count;
|
||||
uint8_t mac[CDP_LITE_MON_PEER_MAX][QDF_MAC_ADDR_SIZE];
|
||||
};
|
||||
#endif
|
||||
/* channel operating width */
|
||||
enum cdp_channel_width {
|
||||
CHAN_WIDTH_20 = 0,
|
||||
CHAN_WIDTH_40,
|
||||
CHAN_WIDTH_80,
|
||||
CHAN_WIDTH_160,
|
||||
CHAN_WIDTH_80P80,
|
||||
CHAN_WIDTH_5,
|
||||
CHAN_WIDTH_10,
|
||||
CHAN_WIDTH_165,
|
||||
CHAN_WIDTH_160P160,
|
||||
CHAN_WIDTH_320,
|
||||
|
||||
CHAN_WIDTH_MAX,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cdp_rssi_temp_off_param_dp
|
||||
* @rssi_temp_offset: Temperature based rssi offset , send every 30 secs
|
||||
*/
|
||||
|
||||
struct cdp_rssi_temp_off_param_dp {
|
||||
int32_t rssi_temp_offset;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cdp_rssi_dbm_conv_param_dp
|
||||
* @curr_bw: Current bandwidth
|
||||
* @curr_rx_chainmask: Current rx chainmask
|
||||
* @xbar_config: 4 bytes, used for BB to RF Chain mapping
|
||||
* @xlna_bypass_offset: Low noise amplifier bypass offset
|
||||
* @xlna_bypass_threshold: Low noise amplifier bypass threshold
|
||||
* @nf_hw_dbm: HW noise floor in dBm per chain, per 20MHz subband
|
||||
*/
|
||||
struct cdp_rssi_dbm_conv_param_dp {
|
||||
uint32_t curr_bw;
|
||||
uint32_t curr_rx_chainmask;
|
||||
uint32_t xbar_config;
|
||||
int32_t xlna_bypass_offset;
|
||||
int32_t xlna_bypass_threshold;
|
||||
int8_t nf_hw_dbm[CDP_MAX_NUM_ANTENNA][CDP_MAX_20MHZ_SEGS];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cdp_rssi_db2dbm_param_dp
|
||||
* @pdev_id: pdev_id
|
||||
* @rssi_temp_off_present: to check temp offset values present or not
|
||||
* @rssi_dbm_info_present: to check rssi dbm conversion parameters
|
||||
* present or not
|
||||
* @temp_off_param: cdp_rssi_temp_off_param_dp structure value
|
||||
* @rssi_dbm_param: cdp_rssi_dbm_conv_param_dp staructure value
|
||||
*/
|
||||
struct cdp_rssi_db2dbm_param_dp {
|
||||
uint32_t pdev_id;
|
||||
bool rssi_temp_off_present;
|
||||
bool rssi_dbm_info_present;
|
||||
struct cdp_rssi_temp_off_param_dp temp_off_param;
|
||||
struct cdp_rssi_dbm_conv_param_dp rssi_dbm_param;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum cdp_mon_reap_source - trigger source of the reap timer of
|
||||
* monitor status ring
|
||||
* @CDP_MON_REAP_SOURCE_PKTLOG: pktlog
|
||||
* @CDP_MON_REAP_SOURCE_CFR: CFR
|
||||
* @CDP_MON_REAP_SOURCE_EMESH: easy mesh
|
||||
* @CDP_MON_REAP_SOURCE_NUM: total number of the sources
|
||||
* @CDP_MON_REAP_SOURCE_ANY: any of the sources
|
||||
*/
|
||||
enum cdp_mon_reap_source {
|
||||
CDP_MON_REAP_SOURCE_PKTLOG,
|
||||
CDP_MON_REAP_SOURCE_CFR,
|
||||
CDP_MON_REAP_SOURCE_EMESH,
|
||||
|
||||
/* keep last */
|
||||
CDP_MON_REAP_SOURCE_NUM,
|
||||
CDP_MON_REAP_SOURCE_ANY,
|
||||
};
|
||||
#endif
|
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_mscs.h
|
||||
* Define the host data path MSCS API functions
|
||||
* called by the host control SW and the OS interface module
|
||||
*/
|
||||
#ifndef _CDP_TXRX_MSCS_H_
|
||||
#define _CDP_TXRX_MSCS_H_
|
||||
#include "cdp_txrx_handle.h"
|
||||
#ifdef WLAN_SUPPORT_MSCS
|
||||
/**
|
||||
* cdp_mscs_peer_lookup_n_get_priority() - find MSCS enabled peer for this mac
|
||||
* address and validate priority
|
||||
* @soc: SoC handle
|
||||
* @src_mac: source mac address of peer
|
||||
* @dst_mac: destination mac address of peer
|
||||
* @nbuf: nbuf pointer
|
||||
*
|
||||
* This function checks if there is a peer for this mac address with MSCS
|
||||
* enabled flag set and nbuf priority is valid from user priority bitmap.
|
||||
*
|
||||
* Return: 0 for non error case, 1 for failure
|
||||
*/
|
||||
static inline int
|
||||
cdp_mscs_peer_lookup_n_get_priority(ol_txrx_soc_handle soc,
|
||||
uint8_t *src_mac, uint8_t *dst_mac,
|
||||
qdf_nbuf_t nbuf)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->mscs_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (soc->ops->mscs_ops->mscs_peer_lookup_n_get_priority)
|
||||
return soc->ops->mscs_ops->mscs_peer_lookup_n_get_priority(soc,
|
||||
src_mac, dst_mac, nbuf);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif
|
68
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_ocb.h
Normal file
68
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_ocb.h
Normal file
@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _CDP_TXRX_OCB_H_
|
||||
#define _CDP_TXRX_OCB_H_
|
||||
#include <cdp_txrx_mob_def.h>
|
||||
#include "cdp_txrx_handle.h"
|
||||
/**
|
||||
* cdp_set_ocb_chan_info() - set OCB channel info to vdev.
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: vdev_id corresponding to vdev start
|
||||
* @ocb_set_chan: OCB channel information to be set in vdev.
|
||||
*
|
||||
* Return: NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_set_ocb_chan_info(ol_txrx_soc_handle soc, uint8_t vdev_id,
|
||||
struct ol_txrx_ocb_set_chan ocb_set_chan)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->ocb_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->ocb_ops->set_ocb_chan_info)
|
||||
soc->ops->ocb_ops->set_ocb_chan_info(soc, vdev_id,
|
||||
ocb_set_chan);
|
||||
|
||||
}
|
||||
/**
|
||||
* cdp_get_ocb_chan_info() - return handle to vdev ocb_channel_info
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: vdev_id corresponding to vdev start
|
||||
*
|
||||
* Return: handle to struct ol_txrx_ocb_chan_info
|
||||
*/
|
||||
static inline struct ol_txrx_ocb_chan_info *
|
||||
cdp_get_ocb_chan_info(ol_txrx_soc_handle soc, uint8_t vdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->ocb_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (soc->ops->ocb_ops->get_ocb_chan_info)
|
||||
return soc->ops->ocb_ops->get_ocb_chan_info(soc, vdev_id);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
#endif /* _CDP_TXRX_OCB_H_ */
|
2647
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_ops.h
Normal file
2647
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_ops.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,596 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_peer_ops.h
|
||||
* Define the host data path peer API functions
|
||||
* called by the host control SW and the OS interface module
|
||||
*/
|
||||
#ifndef _CDP_TXRX_PEER_H_
|
||||
#define _CDP_TXRX_PEER_H_
|
||||
#include <cdp_txrx_ops.h>
|
||||
#include "cdp_txrx_handle.h"
|
||||
|
||||
/**
|
||||
* cdp_peer_register() - Register peer into physical device
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: data path device instance id
|
||||
* @sta_desc: peer description
|
||||
*
|
||||
* Register peer into physical device
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS registration success
|
||||
* QDF_STATUS_E_NOSUPPORT not support this feature
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_peer_register(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
struct ol_txrx_desc_type *sta_desc)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->register_peer)
|
||||
return soc->ops->peer_ops->register_peer(soc, pdev_id,
|
||||
sta_desc);
|
||||
|
||||
return QDF_STATUS_E_NOSUPPORT;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_clear_peer() - remove peer from physical device
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: data path device instance id
|
||||
* @peer_addr: peer mac address
|
||||
*
|
||||
* remove peer from physical device
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS registration success
|
||||
* QDF_STATUS_E_NOSUPPORT not support this feature
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_clear_peer(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
struct qdf_mac_addr peer_addr)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->clear_peer)
|
||||
return soc->ops->peer_ops->clear_peer(soc, pdev_id, peer_addr);
|
||||
|
||||
return QDF_STATUS_E_NOSUPPORT;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_register_ocb_peer() - register ocb peer from physical device
|
||||
* @soc: data path soc handle
|
||||
* @mac_addr: mac address for ocb self peer
|
||||
*
|
||||
* register ocb peer from physical device
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS registration success
|
||||
* QDF_STATUS_E_NOSUPPORT not support this feature
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_peer_register_ocb_peer(ol_txrx_soc_handle soc,
|
||||
uint8_t *mac_addr)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->register_ocb_peer)
|
||||
return soc->ops->peer_ops->register_ocb_peer(mac_addr);
|
||||
|
||||
return QDF_STATUS_E_NOSUPPORT;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_find_peer_exist - Find if peer already exists
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: data path device instance id
|
||||
* @peer_addr: peer mac address
|
||||
*
|
||||
* Return: true or false
|
||||
*/
|
||||
static inline bool
|
||||
cdp_find_peer_exist(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
uint8_t *peer_addr)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->find_peer_exist)
|
||||
return soc->ops->peer_ops->find_peer_exist(soc, pdev_id,
|
||||
peer_addr);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_find_peer_exist_on_vdev - Find if duplicate peer exists
|
||||
* on the given vdev
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: data path virtual interface id
|
||||
* @peer_addr: peer mac address
|
||||
*
|
||||
* Return: true or false
|
||||
*/
|
||||
static inline bool
|
||||
cdp_find_peer_exist_on_vdev(ol_txrx_soc_handle soc, uint8_t vdev_id,
|
||||
uint8_t *peer_addr)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->find_peer_exist_on_vdev)
|
||||
return soc->ops->peer_ops->find_peer_exist_on_vdev(soc, vdev_id,
|
||||
peer_addr);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_find_peer_exist_on_other_vdev - Find if duplicate peer exists
|
||||
* on other than the given vdev
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: data path virtual interface id
|
||||
* @peer_addr: peer mac address
|
||||
* @max_bssid: max number of bssids
|
||||
*
|
||||
* Return: true or false
|
||||
*/
|
||||
static inline bool
|
||||
cdp_find_peer_exist_on_other_vdev(ol_txrx_soc_handle soc, uint8_t vdev_id,
|
||||
uint8_t *peer_addr, uint16_t max_bssid)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->find_peer_exist_on_other_vdev)
|
||||
return soc->ops->peer_ops->find_peer_exist_on_other_vdev(
|
||||
soc, vdev_id,
|
||||
peer_addr,
|
||||
max_bssid);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_state_update() - update peer local state
|
||||
* @soc: data path soc handle
|
||||
* @peer_addr: peer mac address
|
||||
* @state: new peer local state
|
||||
*
|
||||
* update peer local state
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS registration success
|
||||
* QDF_STATUS_E_NOSUPPORT not support this feature
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_peer_state_update(ol_txrx_soc_handle soc, uint8_t *peer_addr,
|
||||
enum ol_txrx_peer_state state)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->peer_state_update)
|
||||
return soc->ops->peer_ops->peer_state_update(soc, peer_addr,
|
||||
state);
|
||||
|
||||
return QDF_STATUS_E_NOSUPPORT;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_state_get() - Get local peer state
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: virtual interface id
|
||||
* @peer_mac: peer mac addr
|
||||
* @slowpath: call from slow path or not
|
||||
*
|
||||
* Get local peer state
|
||||
*
|
||||
* Return: peer status
|
||||
*/
|
||||
static inline int
|
||||
cdp_peer_state_get(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac, bool slowpath)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->get_peer_state)
|
||||
return soc->ops->peer_ops->get_peer_state(soc, vdev_id,
|
||||
peer_mac,
|
||||
slowpath);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_get_vdevid() - Get virtual interface id which peer registered
|
||||
* @soc: data path soc handle
|
||||
* @peer_mac: peer mac address
|
||||
* @vdev_id: virtual interface id which peer registered
|
||||
*
|
||||
* Get virtual interface id which peer registered
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS registration success
|
||||
* QDF_STATUS_E_NOSUPPORT not support this feature
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_peer_get_vdevid(ol_txrx_soc_handle soc,
|
||||
uint8_t *peer_mac, uint8_t *vdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->get_vdevid)
|
||||
return soc->ops->peer_ops->get_vdevid(soc, peer_mac, vdev_id);
|
||||
|
||||
return QDF_STATUS_E_NOSUPPORT;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_get_vdev_by_peer_addr() - Get vdev instance by local peer address
|
||||
* @soc: data path soc handle
|
||||
* @pdev: data path device instance
|
||||
* @peer_addr: peer mac address
|
||||
*
|
||||
* Get virtual interface id by local peer id
|
||||
*
|
||||
* Return: Virtual interface instance
|
||||
* NULL in case cannot find
|
||||
*/
|
||||
static inline struct cdp_vdev
|
||||
*cdp_peer_get_vdev_by_peer_addr(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
|
||||
struct qdf_mac_addr peer_addr)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->get_vdev_by_peer_addr)
|
||||
return soc->ops->peer_ops->get_vdev_by_peer_addr(pdev,
|
||||
peer_addr);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_get_peer_mac_addr() - Get peer mac address
|
||||
* @soc: data path soc handle
|
||||
* @peer: peer instance
|
||||
*
|
||||
* Get peer mac address
|
||||
*
|
||||
* Return: peer mac address pointer
|
||||
* NULL in case cannot find
|
||||
*/
|
||||
static inline uint8_t
|
||||
*cdp_peer_get_peer_mac_addr(ol_txrx_soc_handle soc, void *peer)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->peer_get_peer_mac_addr)
|
||||
return soc->ops->peer_ops->peer_get_peer_mac_addr(peer);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_update_ibss_add_peer_num_of_vdev() - update number of peer
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: virtual interface instance id
|
||||
* @peer_num_delta: number of peer should be updated
|
||||
*
|
||||
* update number of peer
|
||||
*
|
||||
* Return: updated number of peer
|
||||
* 0 fail
|
||||
*/
|
||||
static inline int16_t
|
||||
cdp_peer_update_ibss_add_peer_num_of_vdev(ol_txrx_soc_handle soc,
|
||||
uint8_t vdev_id,
|
||||
int16_t peer_num_delta)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->update_ibss_add_peer_num_of_vdev)
|
||||
return soc->ops->peer_ops->update_ibss_add_peer_num_of_vdev(
|
||||
soc, vdev_id,
|
||||
peer_num_delta);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_copy_mac_addr_raw() - copy peer mac address
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: virtual interface instance id
|
||||
* @bss_addr: mac address should be copied
|
||||
*
|
||||
* copy peer mac address
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
static inline void
|
||||
cdp_peer_copy_mac_addr_raw(ol_txrx_soc_handle soc,
|
||||
uint8_t vdev_id, uint8_t *bss_addr)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->copy_mac_addr_raw)
|
||||
return soc->ops->peer_ops->copy_mac_addr_raw(soc, vdev_id,
|
||||
bss_addr);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_add_last_real_peer() - Add peer with last peer marking
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: data path device instance id
|
||||
* @vdev_id: virtual interface instance id
|
||||
*
|
||||
* copy peer mac address
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
static inline void
|
||||
cdp_peer_add_last_real_peer(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
uint8_t vdev_id)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->add_last_real_peer)
|
||||
return soc->ops->peer_ops->add_last_real_peer(soc, pdev_id,
|
||||
vdev_id);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_is_vdev_restore_last_peer() - restore last peer
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: virtual interface id
|
||||
* @peer_mac: peer mac address
|
||||
*
|
||||
* restore last peer
|
||||
*
|
||||
* Return: true, restore success
|
||||
* false, restore fail
|
||||
*/
|
||||
static inline bool
|
||||
cdp_peer_is_vdev_restore_last_peer(ol_txrx_soc_handle soc, uint8_t vdev_id,
|
||||
uint8_t *peer_mac)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->is_vdev_restore_last_peer)
|
||||
return soc->ops->peer_ops->is_vdev_restore_last_peer(soc,
|
||||
vdev_id,
|
||||
peer_mac);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_update_last_real_peer() - update last real peer
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: data path device instance id
|
||||
* @vdev_id: virtual interface id
|
||||
* @restore_last_peer: restore last peer or not
|
||||
*
|
||||
* update last real peer
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
static inline void
|
||||
cdp_peer_update_last_real_peer(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
uint8_t vdev_id, bool restore_last_peer)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->update_last_real_peer)
|
||||
return soc->ops->peer_ops->update_last_real_peer(
|
||||
soc, pdev_id, vdev_id,
|
||||
restore_last_peer);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_detach_force_delete() - Detach and delete a peer's data object
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: data path virtual interface id
|
||||
* @peer_mac: peer mac address
|
||||
*
|
||||
* Detach a peer and force the peer object to be removed. It is called during
|
||||
* roaming scenario when the firmware has already deleted a peer.
|
||||
* Peer object is freed immediately to avoid duplicate peers during roam sync
|
||||
* indication processing.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline void cdp_peer_detach_force_delete(ol_txrx_soc_handle soc,
|
||||
uint8_t vdev_id,
|
||||
uint8_t *peer_mac)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->peer_detach_force_delete)
|
||||
return soc->ops->peer_ops->peer_detach_force_delete(soc,
|
||||
vdev_id,
|
||||
peer_mac);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* is_cdp_peer_detach_force_delete_supported() - To check if force delete
|
||||
* operation is supported
|
||||
* @soc: pointer to SOC handle
|
||||
*
|
||||
* Some of the platforms support force delete operation and some of them
|
||||
* don't. This API returns true if API which handles force delete operation
|
||||
* is registered and false otherwise.
|
||||
*
|
||||
* Return: true if API which handles force delete operation is registered
|
||||
* false in all other cases
|
||||
*/
|
||||
static inline bool
|
||||
is_cdp_peer_detach_force_delete_supported(ol_txrx_soc_handle soc)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->peer_detach_force_delete)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_set_peer_as_tdls() - To set peer as tdls peer
|
||||
* @soc: pointer to SOC handle
|
||||
* @vdev_id: virtual interface id
|
||||
* @peer_mac: peer mac address
|
||||
* @val: true or false
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static inline void
|
||||
cdp_peer_set_peer_as_tdls(ol_txrx_soc_handle soc, uint8_t vdev_id,
|
||||
uint8_t *peer_mac, bool val)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->set_peer_as_tdls_peer)
|
||||
soc->ops->peer_ops->set_peer_as_tdls_peer(soc, vdev_id,
|
||||
peer_mac, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_set_tdls_offchan_enabled() - Set tdls offchan operation as enabled
|
||||
* @soc: pointer to SOC handle
|
||||
* @vdev_id: virtual interface id
|
||||
* @peer_mac: peer mac address
|
||||
* @val: true or false
|
||||
*
|
||||
* update tdls_offchan_enabled
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
static inline void
|
||||
cdp_peer_set_tdls_offchan_enabled(ol_txrx_soc_handle soc, uint8_t vdev_id,
|
||||
uint8_t *peer_mac, bool val)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->set_tdls_offchan_enabled)
|
||||
soc->ops->peer_ops->set_tdls_offchan_enabled(soc, vdev_id,
|
||||
peer_mac, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_peer_flush_frags() - Flush frags on peer
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: virtual interface id
|
||||
* @peer_mac: peer mac addr
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline void
|
||||
cdp_peer_flush_frags(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->peer_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->peer_ops->peer_flush_frags)
|
||||
soc->ops->peer_ops->peer_flush_frags(soc, vdev_id, peer_mac);
|
||||
}
|
||||
#endif /* _CDP_TXRX_PEER_H_ */
|
@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017,2019-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_pflow.h
|
||||
* Define the host data path peer flow API functions
|
||||
* called by the host control SW and the OS interface module
|
||||
*/
|
||||
#ifndef _CDP_TXRX_PFLOW_H_
|
||||
#define _CDP_TXRX_PFLOW_H_
|
||||
|
||||
#include <cdp_txrx_stats_struct.h>
|
||||
#include "cdp_txrx_ops.h"
|
||||
#include "cdp_txrx_handle.h"
|
||||
#include <cdp_txrx_cmn.h>
|
||||
|
||||
static inline uint32_t cdp_pflow_update_pdev_params
|
||||
(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
enum _dp_param_t param, uint32_t val, void *ctx)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!soc->ops->pflow_ops ||
|
||||
!soc->ops->pflow_ops->pflow_update_pdev_params)
|
||||
return 0;
|
||||
|
||||
return soc->ops->pflow_ops->pflow_update_pdev_params
|
||||
(soc, pdev_id, param, val, ctx);
|
||||
}
|
||||
#endif
|
52
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_pmf.h
Normal file
52
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_pmf.h
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _CDP_TXRX_PMF_H_
|
||||
#define _CDP_TXRX_PMF_H_
|
||||
|
||||
/**
|
||||
* cdp_get_pn_info() - Returns pn info from peer
|
||||
* @soc: data path soc handle
|
||||
* @peer_mac: peer mac address
|
||||
* @vdev_id: virtual device/interface id
|
||||
* @last_pn_valid: return last_rmf_pn_valid value from peer.
|
||||
* @last_pn: return last_rmf_pn value from peer.
|
||||
* @rmf_pn_replays: return rmf_pn_replays value from peer.
|
||||
*
|
||||
* Return: NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_get_pn_info(ol_txrx_soc_handle soc, uint8_t *peer_mac, uint8_t vdev_id,
|
||||
uint8_t **last_pn_valid, uint64_t **last_pn,
|
||||
uint32_t **rmf_pn_replays)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->pmf_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->pmf_ops->get_pn_info)
|
||||
return soc->ops->pmf_ops->get_pn_info(soc, peer_mac, vdev_id,
|
||||
last_pn_valid,
|
||||
last_pn, rmf_pn_replays);
|
||||
|
||||
return;
|
||||
}
|
||||
#endif /* _CDP_TXRX_PMF_H_ */
|
229
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_ppe.h
Normal file
229
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_ppe.h
Normal file
@ -0,0 +1,229 @@
|
||||
/*
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _CDP_TXRX_PPE_H_
|
||||
#define _CDP_TXRX_PPE_H_
|
||||
|
||||
/**
|
||||
* cdp_ppesds_vp_setup_fw_recovery() - Setup DS VP on FW recovery.
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: vdev id
|
||||
* @profile_idx: DS profile index.
|
||||
*
|
||||
* return: qdf_status where DS VP setup is done or not.
|
||||
*/
|
||||
static inline
|
||||
QDF_STATUS cdp_ppesds_vp_setup_fw_recovery(struct cdp_soc_t *soc,
|
||||
uint8_t vdev_id,
|
||||
uint16_t profile_idx)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->ppeds_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return QDF_STATUS_E_NOSUPPORT;
|
||||
}
|
||||
|
||||
if (soc->ops->ppeds_ops->ppeds_vp_setup_recovery)
|
||||
return soc->ops->ppeds_ops->ppeds_vp_setup_recovery(soc,
|
||||
vdev_id,
|
||||
profile_idx);
|
||||
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* cdp_ppesds_update_dev_stats() - Update dev stats for PPE-DS mode.
|
||||
* @soc: data path soc handle
|
||||
* @vp_params: VP params
|
||||
* @vdev_id: vdev id
|
||||
* @stats: stats pointer from ppe
|
||||
*
|
||||
* return: void
|
||||
*/
|
||||
static inline
|
||||
void cdp_ppesds_update_dev_stats(struct cdp_soc_t *soc,
|
||||
struct cdp_ds_vp_params *vp_params,
|
||||
uint16_t vdev_id, void *stats)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->ppeds_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->ppeds_ops->ppeds_stats_sync)
|
||||
return soc->ops->ppeds_ops->ppeds_stats_sync(soc,
|
||||
vdev_id,
|
||||
vp_params,
|
||||
stats);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_ppesds_entry_attach() - attach the ppe vp interface.
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: vdev id
|
||||
* @vpai: PPE VP opaque
|
||||
* @ppe_vp_num: Allocated VP Port number
|
||||
* @vp_params: VP params
|
||||
*
|
||||
* return: qdf_status where vp entry got allocated or not.
|
||||
*/
|
||||
static inline
|
||||
QDF_STATUS cdp_ppesds_entry_attach(struct cdp_soc_t *soc, uint8_t vdev_id,
|
||||
void *vpai, int32_t *ppe_vp_num,
|
||||
struct cdp_ds_vp_params *vp_params)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->ppeds_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return QDF_STATUS_E_NOSUPPORT;
|
||||
}
|
||||
|
||||
if (soc->ops->ppeds_ops->ppeds_entry_attach)
|
||||
return soc->ops->ppeds_ops->ppeds_entry_attach(soc, vdev_id,
|
||||
vpai,
|
||||
ppe_vp_num,
|
||||
vp_params);
|
||||
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_ppesds_entry_detach() - Detach the PPE VP interface.
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: vdev ID
|
||||
* @vp_params: VP params
|
||||
*
|
||||
* return: void
|
||||
*/
|
||||
static inline
|
||||
void cdp_ppesds_entry_detach(struct cdp_soc_t *soc, uint8_t vdev_id,
|
||||
struct cdp_ds_vp_params *vp_params)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->ppeds_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->ppeds_ops->ppeds_entry_detach)
|
||||
return soc->ops->ppeds_ops->ppeds_entry_detach(soc,
|
||||
vdev_id,
|
||||
vp_params);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_ppeds_attached() - Check whether ppeds attached
|
||||
* @soc: data path soc handle
|
||||
*
|
||||
* return: true for ppeds attached otherwise false.
|
||||
*/
|
||||
static inline
|
||||
QDF_STATUS cdp_ppeds_attached(struct cdp_soc_t *soc)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->ppeds_ops)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_ppesds_set_int_pri2tid() - Set the INT_PRI to TID
|
||||
* @soc: data path soc handle
|
||||
* @pri2tid: PRI2TID table
|
||||
*
|
||||
* return: void
|
||||
*/
|
||||
static inline
|
||||
void cdp_ppesds_set_int_pri2tid(struct cdp_soc_t *soc,
|
||||
uint8_t *pri2tid)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->ppeds_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->ppeds_ops->ppeds_set_int_pri2tid)
|
||||
return soc->ops->ppeds_ops->ppeds_set_int_pri2tid(soc, pri2tid);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_ppesds_update_int_pri2tid() - Update the INT_PRI to TID
|
||||
* @soc: data path soc handle
|
||||
* @pri: Priority index
|
||||
* @tid: TID mapped to the input priority
|
||||
*
|
||||
* return: void
|
||||
*/
|
||||
static inline
|
||||
void cdp_ppesds_update_int_pri2tid(struct cdp_soc_t *soc,
|
||||
uint8_t pri, uint8_t tid)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->ppeds_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
}
|
||||
|
||||
if (soc->ops->ppeds_ops->ppeds_update_int_pri2tid)
|
||||
return soc->ops->ppeds_ops->ppeds_update_int_pri2tid(soc, pri,
|
||||
tid);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_ppesds_entry_dump() - Dump the PPE VP entries
|
||||
* @soc: data path soc handle
|
||||
*
|
||||
* return: void
|
||||
*/
|
||||
static inline
|
||||
void cdp_ppesds_entry_dump(struct cdp_soc_t *soc)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->ppeds_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->ppeds_ops->ppeds_entry_dump)
|
||||
soc->ops->ppeds_ops->ppeds_entry_dump(soc);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_ppesds_enable_pri2tid() - Enable PPE VP PRI2TID table
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: vdev id
|
||||
* @val: Boolean value to enable/disable
|
||||
*
|
||||
* return: QDF_STATUS
|
||||
*/
|
||||
static inline
|
||||
QDF_STATUS cdp_ppesds_enable_pri2tid(struct cdp_soc_t *soc,
|
||||
uint8_t vdev_id, bool val)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->ppeds_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (soc->ops->ppeds_ops->ppeds_enable_pri2tid)
|
||||
return soc->ops->ppeds_ops->ppeds_enable_pri2tid(soc, vdev_id,
|
||||
val);
|
||||
|
||||
return QDF_STATUS_E_NOSUPPORT;
|
||||
}
|
||||
#endif /* _CDP_TXRX_PPE_H_ */
|
64
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_raw.h
Normal file
64
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_raw.h
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017, 2019, 2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_raw.h
|
||||
* Define the host data path raw mode API functions
|
||||
* called by the host control SW and the OS interface module
|
||||
*/
|
||||
#ifndef _CDP_TXRX_RAW_H_
|
||||
#define _CDP_TXRX_RAW_H_
|
||||
|
||||
#include "cdp_txrx_handle.h"
|
||||
#include "cdp_txrx_ops.h"
|
||||
#include <cdp_txrx_cmn.h>
|
||||
|
||||
/**
|
||||
* cdp_rawsim_get_astentry() - finds the ast entry for the packet
|
||||
* @soc: soc handle
|
||||
* @vdev_id: id of the data virtual device object
|
||||
* @pnbuf: pointer to nbuf
|
||||
* @raw_ast: pointer to fill ast information
|
||||
*
|
||||
* Finds the ast entry i.e 4th address for the packet based on the
|
||||
* details in the netbuf.
|
||||
*
|
||||
* Return: 0 on success, -1 on error, 1 if more nbufs need to be consumed.
|
||||
*/
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_rawsim_get_astentry(ol_txrx_soc_handle soc, uint8_t vdev_id,
|
||||
qdf_nbuf_t *pnbuf, struct cdp_raw_ast *raw_ast)
|
||||
{
|
||||
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->raw_ops ||
|
||||
!soc->ops->raw_ops->rsim_get_astentry)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->raw_ops->rsim_get_astentry(soc, vdev_id,
|
||||
pnbuf, raw_ast);
|
||||
}
|
||||
|
||||
#endif
|
487
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_sawf.h
Normal file
487
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_sawf.h
Normal file
@ -0,0 +1,487 @@
|
||||
/*
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _CDP_TXRX_SAWF_H_
|
||||
#define _CDP_TXRX_SAWF_H_
|
||||
|
||||
#include <cdp_txrx_cmn_struct.h>
|
||||
#include <cdp_txrx_cmn.h>
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_peer_svcid_map(ol_txrx_soc_handle soc,
|
||||
uint8_t *mac, uint8_t svc_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->sawf_def_queues_map_req) {
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
return soc->ops->sawf_ops->sawf_def_queues_map_req(soc, mac, svc_id);
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_peer_unmap(ol_txrx_soc_handle soc,
|
||||
uint8_t *mac, uint8_t svc_id)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->sawf_def_queues_unmap_req) {
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
return soc->ops->sawf_ops->sawf_def_queues_unmap_req(soc, mac, svc_id);
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_peer_get_map_conf(ol_txrx_soc_handle soc,
|
||||
uint8_t *mac)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->sawf_def_queues_get_map_report) {
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
return soc->ops->sawf_ops->sawf_def_queues_get_map_report(soc, mac);
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_peer_get_msduq_info(ol_txrx_soc_handle soc, uint8_t *mac)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->sawf_get_peer_msduq_info) {
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
return soc->ops->sawf_ops->sawf_get_peer_msduq_info(soc, mac);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SAWF
|
||||
/**
|
||||
* cdp_get_peer_sawf_delay_stats() - Call to get SAWF delay stats
|
||||
* @soc: soc handle
|
||||
* @svc_id: service class ID
|
||||
* @mac: peer mac address
|
||||
* @data: opaque pointer
|
||||
*
|
||||
* return: status Success/Failure
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_get_peer_sawf_delay_stats(ol_txrx_soc_handle soc, uint32_t svc_id,
|
||||
uint8_t *mac, void *data)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->txrx_get_peer_sawf_delay_stats)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->sawf_ops->txrx_get_peer_sawf_delay_stats(soc, svc_id,
|
||||
mac, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_get_peer_sawf_tx_stats() - Call to get SAWF Tx stats
|
||||
* @soc: soc handle
|
||||
* @svc_id: service class ID
|
||||
* @mac: peer mac address
|
||||
* @data: opaque pointer
|
||||
*
|
||||
* return: status Success/Failure
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_get_peer_sawf_tx_stats(ol_txrx_soc_handle soc, uint32_t svc_id,
|
||||
uint8_t *mac, void *data)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->txrx_get_peer_sawf_tx_stats)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->sawf_ops->txrx_get_peer_sawf_tx_stats(soc, svc_id,
|
||||
mac, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_sawf_mpdu_stats_req() - Call to subscribe to MPDU stats TLV
|
||||
* @soc: soc handle
|
||||
* @enable: 1: enable 0: disable
|
||||
*
|
||||
* return: status Success/Failure
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_mpdu_stats_req(ol_txrx_soc_handle soc, uint8_t enable)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->sawf_mpdu_stats_req)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->sawf_ops->sawf_mpdu_stats_req(soc, enable);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_sawf_mpdu_details_stats_req - Call to subscribe to MPDU details stats TLV
|
||||
* @soc: soc handle
|
||||
* @enable: 1: enable 0: disable
|
||||
*
|
||||
* return: status Success/Failure
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_mpdu_details_stats_req(ol_txrx_soc_handle soc, uint8_t enable)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->sawf_mpdu_details_stats_req)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->sawf_ops->sawf_mpdu_details_stats_req(soc, enable);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_sawf_set_mov_avg_params - Set moving average pararms
|
||||
* @soc: SOC handle
|
||||
* @num_pkt: No of packets per window to calucalte moving average
|
||||
* @num_win: No of windows to calucalte moving average
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_set_mov_avg_params(ol_txrx_soc_handle soc,
|
||||
uint32_t num_pkt,
|
||||
uint32_t num_win)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->txrx_sawf_set_mov_avg_params)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->sawf_ops->txrx_sawf_set_mov_avg_params(num_pkt,
|
||||
num_win);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_sawf_set_sla_params - Set SLA pararms
|
||||
* @soc: SOC handle
|
||||
* @num_pkt: No of packets to detect SLA breach
|
||||
* @time_secs: Time ins secs to detect breach
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_set_sla_params(ol_txrx_soc_handle soc,
|
||||
uint32_t num_pkt,
|
||||
uint32_t time_secs)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->txrx_sawf_set_sla_params)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->sawf_ops->txrx_sawf_set_sla_params(num_pkt,
|
||||
time_secs);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_sawf_init_telemtery_params() - Initialize telemetry pararms
|
||||
* @soc: SOC handle
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_init_telemtery_params(ol_txrx_soc_handle soc)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->txrx_sawf_init_telemtery_params)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->sawf_ops->txrx_sawf_init_telemtery_params();
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_get_throughput_stats(ol_txrx_soc_handle soc, void *arg,
|
||||
uint64_t *in_bytes, uint64_t *in_cnt,
|
||||
uint64_t *tx_bytes, uint64_t *tx_cnt,
|
||||
uint8_t tid, uint8_t msduq)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->telemetry_get_throughput_stats)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->sawf_ops->telemetry_get_throughput_stats(
|
||||
arg, in_bytes, in_cnt, tx_bytes,
|
||||
tx_cnt, tid, msduq);
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_get_mpdu_stats(ol_txrx_soc_handle soc, void *arg,
|
||||
uint64_t *svc_int_pass, uint64_t *svc_int_fail,
|
||||
uint64_t *burst_pass, uint64_t *burst_fail,
|
||||
uint8_t tid, uint8_t msduq)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->telemetry_get_mpdu_stats)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->sawf_ops->telemetry_get_mpdu_stats(
|
||||
arg, svc_int_pass, svc_int_fail, burst_pass,
|
||||
burst_fail, tid, msduq);
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_get_drop_stats(ol_txrx_soc_handle soc, void *arg,
|
||||
uint64_t *pass, uint64_t *drop,
|
||||
uint64_t *drop_ttl,
|
||||
uint8_t tid, uint8_t msduq)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->telemetry_get_drop_stats)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
return soc->ops->sawf_ops->telemetry_get_drop_stats(
|
||||
arg, pass, drop, drop_ttl, tid, msduq);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_sawf_peer_config_ul - Config uplink QoS parameters
|
||||
* @soc: SOC handle
|
||||
* @mac_addr: MAC address
|
||||
* @tid: TID
|
||||
* @service_interval: Service Interval
|
||||
* @burst_size: Burst Size
|
||||
* @min_tput: Min throughput
|
||||
* @max_latency: Max latency
|
||||
* @add_or_sub: Add or Sub parameters
|
||||
* @peer_id: peer id
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_peer_config_ul(ol_txrx_soc_handle soc, uint8_t *mac_addr, uint8_t tid,
|
||||
uint32_t service_interval, uint32_t burst_size,
|
||||
uint32_t min_tput, uint32_t max_latency,
|
||||
uint8_t add_or_sub, uint16_t peer_id)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->peer_config_ul) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return false;
|
||||
}
|
||||
|
||||
return soc->ops->sawf_ops->peer_config_ul(soc, mac_addr, tid,
|
||||
service_interval, burst_size,
|
||||
min_tput, max_latency,
|
||||
add_or_sub, peer_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_sawf_peer_flow_count - Peer flow count in SAWF
|
||||
* @soc: SOC handle
|
||||
* @mac_addr: MAC address
|
||||
* @svc_id: Service Class ID
|
||||
* @direction: Indication of forward or reverse service class match
|
||||
* @start_or_stop: Indication of start or stop
|
||||
* @peer_mac: Peer MAC address
|
||||
* @peer_id: peer id
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_peer_flow_count(ol_txrx_soc_handle soc, uint8_t *mac_addr,
|
||||
uint8_t svc_id, uint8_t direction,
|
||||
uint8_t start_or_stop, uint8_t *peer_mac,
|
||||
uint16_t peer_id)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->sawf_peer_flow_count) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return false;
|
||||
}
|
||||
|
||||
return soc->ops->sawf_ops->sawf_peer_flow_count
|
||||
(soc, mac_addr, svc_id, direction, start_or_stop, peer_mac,
|
||||
peer_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_swaf_peer_sla_configuration() - Check if sla is configured for a peer
|
||||
* @soc: SOC handle
|
||||
* @mac_addr: peer mac address
|
||||
* @sla_mask: pointer to SLA mask
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_swaf_peer_sla_configuration(ol_txrx_soc_handle soc, uint8_t *mac_addr,
|
||||
uint16_t *sla_mask)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->swaf_peer_sla_configuration) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
return soc->ops->sawf_ops->swaf_peer_sla_configuration(soc, mac_addr,
|
||||
sla_mask);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_mpdu_stats_req(ol_txrx_soc_handle soc, uint8_t enable)
|
||||
{
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_mpdu_details_stats_req(ol_txrx_soc_handle soc, uint8_t enable)
|
||||
{
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_get_peer_sawf_delay_stats(ol_txrx_soc_handle soc, uint32_t svc_id,
|
||||
uint8_t *mac, void *data)
|
||||
{
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_get_peer_sawf_tx_stats(ol_txrx_soc_handle soc, uint32_t svc_id,
|
||||
uint8_t *mac, void *data)
|
||||
{
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_swaf_peer_sla_configuration(ol_txrx_soc_handle soc, uint8_t *mac_addr,
|
||||
uint16_t *sla_mask)
|
||||
{
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_FEATURE_11BE_MLO_3_LINK_TX
|
||||
static inline
|
||||
uint16_t cdp_sawf_get_peer_msduq(ol_txrx_soc_handle soc,
|
||||
struct net_device *netdev, uint8_t *dest_mac,
|
||||
uint32_t dscp_pcp, bool pcp)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->get_peer_msduq) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return false;
|
||||
}
|
||||
|
||||
return soc->ops->sawf_ops->get_peer_msduq
|
||||
(netdev, dest_mac, dscp_pcp, pcp);
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_sawf_3_link_peer_flow_count(ol_txrx_soc_handle soc, uint8_t *mac_addr,
|
||||
uint16_t peer_id, uint32_t mark_metadata)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->sawf_ops ||
|
||||
!soc->ops->sawf_ops->sawf_3_link_peer_flow_count) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return false;
|
||||
}
|
||||
|
||||
return soc->ops->sawf_ops->sawf_3_link_peer_flow_count
|
||||
(soc, mac_addr, peer_id, mark_metadata);
|
||||
}
|
||||
#endif
|
||||
#endif /* _CDP_TXRX_SAWF_H_ */
|
53
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_scs.h
Normal file
53
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_scs.h
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _CDP_TXRX_SCS_H_
|
||||
#define _CDP_TXRX_SCS_H_
|
||||
|
||||
#ifdef WLAN_SUPPORT_SCS
|
||||
#include "cdp_txrx_handle.h"
|
||||
|
||||
/**
|
||||
* cdp_scs_peer_lookup_n_rule_match() - Find peer and check if SCS rule
|
||||
* is applicable for the peer or not
|
||||
*
|
||||
* @soc: soc handle
|
||||
* @rule_id: scs rule id
|
||||
* @dst_mac_addr: destination mac addr for peer lookup
|
||||
*
|
||||
* Return: bool true on success and false on failure
|
||||
*/
|
||||
static inline
|
||||
bool cdp_scs_peer_lookup_n_rule_match(ol_txrx_soc_handle soc,
|
||||
uint32_t rule_id, uint8_t *dst_mac_addr)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!soc->ops->scs_ops ||
|
||||
!soc->ops->scs_ops->scs_peer_lookup_n_rule_match)
|
||||
return false;
|
||||
|
||||
return soc->ops->scs_ops->scs_peer_lookup_n_rule_match(soc, rule_id,
|
||||
dst_mac_addr);
|
||||
}
|
||||
#endif
|
||||
#endif
|
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017,2019,2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_stats.h
|
||||
* Define the host data path statistics API functions
|
||||
* called by the host control SW and the OS interface module
|
||||
*/
|
||||
#ifndef _CDP_TXRX_STATS_H_
|
||||
#define _CDP_TXRX_STATS_H_
|
||||
#include <cdp_txrx_ops.h>
|
||||
#include <cdp_txrx_cmn.h>
|
||||
|
||||
static inline QDF_STATUS
|
||||
cdp_clear_stats(ol_txrx_soc_handle soc, uint8_t pdev_id, uint8_t bitmap)
|
||||
{
|
||||
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (!soc->ops->mob_stats_ops ||
|
||||
!soc->ops->mob_stats_ops->clear_stats)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
return soc->ops->mob_stats_ops->clear_stats(soc, pdev_id, bitmap);
|
||||
}
|
||||
|
||||
static inline int
|
||||
cdp_stats(ol_txrx_soc_handle soc, uint8_t vdev_id, char *buffer,
|
||||
unsigned int buf_len)
|
||||
{
|
||||
if (!soc || !soc->ops) {
|
||||
dp_cdp_debug("Invalid Instance");
|
||||
QDF_BUG(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!soc->ops->mob_stats_ops ||
|
||||
!soc->ops->mob_stats_ops->stats)
|
||||
return 0;
|
||||
|
||||
return soc->ops->mob_stats_ops->stats(vdev_id, buffer, buf_len);
|
||||
}
|
||||
|
||||
#endif /* _CDP_TXRX_STATS_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,131 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_tx_delay.h
|
||||
* Define the host data path histogram API functions
|
||||
* called by the host control SW and the OS interface module
|
||||
*/
|
||||
#ifndef _CDP_TXRX_COMPUTE_TX_DELAY_H_
|
||||
#define _CDP_TXRX_COMPUTE_TX_DELAY_H_
|
||||
#include "cdp_txrx_handle.h"
|
||||
/**
|
||||
* cdp_tx_delay() - get tx packet delay
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
* @queue_delay_microsec: tx packet delay within queue, usec
|
||||
* @tx_delay_microsec: tx packet delay, usec
|
||||
* @category: packet category
|
||||
*
|
||||
* Return: NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_tx_delay(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
uint32_t *queue_delay_microsec, uint32_t *tx_delay_microsec,
|
||||
int category)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->delay_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->delay_ops->tx_delay)
|
||||
return soc->ops->delay_ops->tx_delay(soc, pdev_id,
|
||||
queue_delay_microsec, tx_delay_microsec, category);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_tx_delay_hist() - get tx packet delay histogram
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
* @bin_values: bin
|
||||
* @category: packet category
|
||||
*
|
||||
* Return: NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_tx_delay_hist(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
uint16_t *bin_values, int category)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->delay_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->delay_ops->tx_delay_hist)
|
||||
return soc->ops->delay_ops->tx_delay_hist(soc, pdev_id,
|
||||
bin_values, category);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_tx_packet_count() - get tx packet count
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
* @out_packet_count: packet count
|
||||
* @out_packet_loss_count: packet loss count
|
||||
* @category: packet category
|
||||
*
|
||||
* Return: NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_tx_packet_count(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
uint16_t *out_packet_count, uint16_t *out_packet_loss_count,
|
||||
int category)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->delay_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->delay_ops->tx_packet_count)
|
||||
return soc->ops->delay_ops->tx_packet_count(soc, pdev_id,
|
||||
out_packet_count, out_packet_loss_count, category);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_tx_set_compute_interval() - set tx packet stat compute interval
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
* @interval: compute interval
|
||||
*
|
||||
* Return: NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_tx_set_compute_interval(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
uint32_t interval)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->delay_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->delay_ops->tx_set_compute_interval)
|
||||
return soc->ops->delay_ops->tx_set_compute_interval(soc,
|
||||
pdev_id,
|
||||
interval);
|
||||
return;
|
||||
}
|
||||
#endif /* _CDP_TXRX_COMPUTE_TX_DELAY_H_ */
|
@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_tx_throttle.h
|
||||
* Define the host data path transmit throttle API
|
||||
* functions called by the host control SW and the OS interface
|
||||
* module
|
||||
*/
|
||||
#ifndef _CDP_TXRX_TX_THROTTLE_H_
|
||||
#define _CDP_TXRX_TX_THROTTLE_H_
|
||||
#include <cdp_txrx_ops.h>
|
||||
#include "cdp_txrx_handle.h"
|
||||
/**
|
||||
* cdp_throttle_init_period() - init tx throttle period
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
* @period: throttle period
|
||||
* @dutycycle_level: duty cycle level
|
||||
*
|
||||
* Return: NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_throttle_init_period(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
||||
int period, uint8_t *dutycycle_level)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->throttle_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->throttle_ops->throttle_init_period)
|
||||
return soc->ops->throttle_ops->throttle_init_period(
|
||||
soc, pdev_id, period, dutycycle_level);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_throttle_set_level() - init tx throttle level
|
||||
* @soc: data path soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
* @level: throttle level
|
||||
*
|
||||
* Return: NONE
|
||||
*/
|
||||
static inline void
|
||||
cdp_throttle_set_level(ol_txrx_soc_handle soc, uint8_t pdev_id, int level)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->throttle_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (soc->ops->throttle_ops->throttle_set_level)
|
||||
return soc->ops->throttle_ops->throttle_set_level(soc, pdev_id,
|
||||
level);
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* _CDP_TXRX_TX_THROTTLE_H_ */
|
104
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_wds.h
Normal file
104
qcom/opensource/wlan/qca-wifi-host-cmn/dp/inc/cdp_txrx_wds.h
Normal file
@ -0,0 +1,104 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: cdp_txrx_wds.h
|
||||
* Define the host data path WDS API functions
|
||||
* called by the host control SW and the OS interface module
|
||||
*/
|
||||
#ifndef _CDP_TXRX_WDS_H_
|
||||
#define _CDP_TXRX_WDS_H_
|
||||
#include "cdp_txrx_handle.h"
|
||||
|
||||
/**
|
||||
* cdp_set_wds_rx_policy() - set the wds rx filter policy of the device
|
||||
* @soc: psoc object
|
||||
* @vdev_id: id of the data virtual device object
|
||||
* @val: the wds rx policy bitmask
|
||||
*
|
||||
* This flag sets the wds rx policy on the vdev. Rx frames not compliant
|
||||
* with the policy will be dropped.
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_set_wds_rx_policy(ol_txrx_soc_handle soc,
|
||||
uint8_t vdev_id,
|
||||
u_int32_t val)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->wds_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (soc->ops->wds_ops->txrx_set_wds_rx_policy)
|
||||
soc->ops->wds_ops->txrx_set_wds_rx_policy(soc, vdev_id, val);
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_set_wds_tx_policy_update() - set the wds tx filter policy of the device
|
||||
* @soc: psoc object
|
||||
* @vdev_id: id of the data virtual device object
|
||||
* @peer_mac: peer mac address
|
||||
* @wds_tx_ucast: the wds unicast tx policy bitmask
|
||||
* @wds_tx_mcast: the wds multicast tx policy bitmask
|
||||
*
|
||||
* This flag sets the wds xx policy on the vdev. Tx frames not compliant
|
||||
* with the policy will be dropped.
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
cdp_set_wds_tx_policy_update(ol_txrx_soc_handle soc,
|
||||
uint8_t vdev_id, uint8_t *peer_mac,
|
||||
int wds_tx_ucast, int wds_tx_mcast)
|
||||
{
|
||||
if (!soc || !soc->ops || !soc->ops->wds_ops) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s invalid instance", __func__);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (soc->ops->wds_ops->txrx_wds_peer_tx_policy_update)
|
||||
soc->ops->wds_ops->txrx_wds_peer_tx_policy_update(
|
||||
soc, vdev_id, peer_mac, wds_tx_ucast,
|
||||
wds_tx_mcast);
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_vdev_set_wds() - Set/unset wds_enable flag in vdev
|
||||
* @soc: data path soc handle
|
||||
* @vdev_id: id of data path vap handle
|
||||
* @val: value to be set in wds_en flag
|
||||
*
|
||||
* This flag enables WDS source port learning feature on a vdev
|
||||
*
|
||||
* Return: 1 on success
|
||||
*/
|
||||
static inline int
|
||||
cdp_vdev_set_wds(ol_txrx_soc_handle soc, uint8_t vdev_id, uint32_t val)
|
||||
{
|
||||
if (soc->ops->wds_ops->vdev_set_wds)
|
||||
return soc->ops->wds_ops->vdev_set_wds(soc, vdev_id, val);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
4034
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.c
Normal file
4034
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.c
Normal file
File diff suppressed because it is too large
Load Diff
1164
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.h
Normal file
1164
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.h
Normal file
File diff suppressed because it is too large
Load Diff
2555
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_rx.c
Normal file
2555
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_rx.c
Normal file
File diff suppressed because it is too large
Load Diff
1012
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_rx.h
Normal file
1012
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_rx.h
Normal file
File diff suppressed because it is too large
Load Diff
2033
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_tx.c
Normal file
2033
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_tx.c
Normal file
File diff suppressed because it is too large
Load Diff
374
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_tx.h
Normal file
374
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_tx.h
Normal file
@ -0,0 +1,374 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
#ifndef __DP_BE_TX_H
|
||||
#define __DP_BE_TX_H
|
||||
/**
|
||||
* DOC: dp_be_tx.h
|
||||
*
|
||||
* BE specific TX Datapath header file. Need not be exposed to common DP code.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <dp_types.h>
|
||||
#include "dp_be.h"
|
||||
|
||||
struct __attribute__((__packed__)) dp_tx_comp_peer_id {
|
||||
uint16_t peer_id:13,
|
||||
ml_peer_valid:1,
|
||||
reserved:2;
|
||||
};
|
||||
|
||||
/* Invalid TX Bank ID value */
|
||||
#define DP_BE_INVALID_BANK_ID -1
|
||||
|
||||
/* Extraction of msdu queue information from per packet sawf metadata */
|
||||
#define DP_TX_HLOS_TID_GET(_var) \
|
||||
(((_var) & 0x0e) >> 1)
|
||||
#define DP_TX_FLOW_OVERRIDE_GET(_var) \
|
||||
((_var >> 3) & 0x1)
|
||||
#define DP_TX_WHO_CLFY_INF_SEL_GET(_var) \
|
||||
(((_var) & 0x30) >> 4)
|
||||
#define DP_TX_FLOW_OVERRIDE_ENABLE 0x1
|
||||
|
||||
#define DP_TX_FAST_DESC_SIZE 28
|
||||
#define DP_TX_L3_L4_CSUM_ENABLE 0x1f
|
||||
|
||||
#ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
|
||||
static inline uint16_t
|
||||
dp_tx_comp_adjust_peer_id_be(struct dp_soc *soc, uint16_t peer_id)
|
||||
{
|
||||
struct dp_tx_comp_peer_id *tx_peer_id =
|
||||
(struct dp_tx_comp_peer_id *)&peer_id;
|
||||
|
||||
return (tx_peer_id->peer_id |
|
||||
(tx_peer_id->ml_peer_valid << soc->peer_id_shift));
|
||||
}
|
||||
/**
|
||||
* dp_tx_comp_get_peer_id_be() - Get peer ID from TX Comp Desc
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @tx_comp_hal_desc: TX comp ring descriptor
|
||||
*
|
||||
* Return: Peer ID
|
||||
*/
|
||||
static inline uint16_t dp_tx_comp_get_peer_id_be(struct dp_soc *soc,
|
||||
void *tx_comp_hal_desc)
|
||||
{
|
||||
uint16_t peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc);
|
||||
|
||||
return dp_tx_comp_adjust_peer_id_be(soc, peer_id);
|
||||
}
|
||||
#else
|
||||
static inline uint16_t
|
||||
dp_tx_comp_adjust_peer_id_be(struct dp_soc *soc, uint16_t peer_id)
|
||||
{
|
||||
return peer_id;
|
||||
}
|
||||
static inline uint16_t dp_tx_comp_get_peer_id_be(struct dp_soc *soc,
|
||||
void *tx_comp_hal_desc)
|
||||
{
|
||||
return hal_tx_comp_get_peer_id(tx_comp_hal_desc);
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dp_tx_hw_enqueue_be() - Enqueue to TCL HW for transmit for BE target
|
||||
* @soc: DP Soc Handle
|
||||
* @vdev: DP vdev handle
|
||||
* @tx_desc: Tx Descriptor Handle
|
||||
* @fw_metadata: Metadata to send to Target Firmware along with frame
|
||||
* @metadata: Handle that holds exception path meta data
|
||||
* @msdu_info: msdu_info containing information about TX buffer
|
||||
*
|
||||
* Gets the next free TCL HW DMA descriptor and sets up required parameters
|
||||
* from software Tx descriptor
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS: success
|
||||
* QDF_STATUS_E_RESOURCES: Error return
|
||||
*/
|
||||
QDF_STATUS dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
struct dp_tx_desc_s *tx_desc,
|
||||
uint16_t fw_metadata,
|
||||
struct cdp_tx_exception_metadata *metadata,
|
||||
struct dp_tx_msdu_info_s *msdu_info);
|
||||
|
||||
#ifdef QCA_DP_TX_NBUF_LIST_FREE
|
||||
/**
|
||||
* dp_tx_fast_send_be() - Transmit a frame on a given VAP
|
||||
* @soc_hdl: DP soc handle
|
||||
* @vdev_id: id of DP vdev handle
|
||||
* @nbuf: skb
|
||||
*
|
||||
* Entry point for Core Tx layer (DP_TX) invoked from
|
||||
* hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
|
||||
* cases
|
||||
*
|
||||
* Return: NULL on success,
|
||||
* nbuf when it fails to send
|
||||
*/
|
||||
qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
qdf_nbuf_t nbuf);
|
||||
#else
|
||||
static inline qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc, uint8_t vdev_id,
|
||||
qdf_nbuf_t nbuf)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dp_tx_comp_get_params_from_hal_desc_be() - Get TX desc from HAL comp desc
|
||||
* @soc: DP soc handle
|
||||
* @tx_comp_hal_desc: HAL TX Comp Descriptor
|
||||
* @r_tx_desc: SW Tx Descriptor retrieved from HAL desc.
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
|
||||
void *tx_comp_hal_desc,
|
||||
struct dp_tx_desc_s **r_tx_desc);
|
||||
|
||||
/**
|
||||
* dp_tx_process_htt_completion_be() - Tx HTT Completion Indication Handler
|
||||
* @soc: Handle to DP soc structure
|
||||
* @tx_desc: software descriptor head pointer
|
||||
* @status: Tx completion status from HTT descriptor
|
||||
* @ring_id: ring number
|
||||
*
|
||||
* This function will process HTT Tx indication messages from Target
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_tx_process_htt_completion_be(struct dp_soc *soc,
|
||||
struct dp_tx_desc_s *tx_desc,
|
||||
uint8_t *status,
|
||||
uint8_t ring_id);
|
||||
|
||||
/**
|
||||
* dp_tx_init_bank_profiles() - Init TX bank profiles
|
||||
* @soc: DP soc handle
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS or QDF error code.
|
||||
*/
|
||||
QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *soc);
|
||||
|
||||
/**
|
||||
* dp_tx_deinit_bank_profiles() - De-Init TX bank profiles
|
||||
* @soc: DP soc handle
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_tx_deinit_bank_profiles(struct dp_soc_be *soc);
|
||||
|
||||
/**
|
||||
* dp_tx_get_bank_profile() - get TX bank profile for vdev
|
||||
* @soc: DP soc handle
|
||||
* @be_vdev: BE vdev pointer
|
||||
*
|
||||
* Return: bank profile allocated to vdev or DP_BE_INVALID_BANK_ID
|
||||
*/
|
||||
int dp_tx_get_bank_profile(struct dp_soc_be *soc,
|
||||
struct dp_vdev_be *be_vdev);
|
||||
|
||||
/**
|
||||
* dp_tx_put_bank_profile() - release TX bank profile for vdev
|
||||
* @soc: DP soc handle
|
||||
* @be_vdev: pointer to be_vdev structure
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_tx_put_bank_profile(struct dp_soc_be *soc, struct dp_vdev_be *be_vdev);
|
||||
|
||||
/**
|
||||
* dp_tx_update_bank_profile() - release existing and allocate new bank profile
|
||||
* @be_soc: DP soc handle
|
||||
* @be_vdev: pointer to be_vdev structure
|
||||
*
|
||||
* The function releases the existing bank profile allocated to the vdev and
|
||||
* looks for a new bank profile based on updated dp_vdev TX params.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
|
||||
struct dp_vdev_be *be_vdev);
|
||||
|
||||
/**
|
||||
* dp_tx_desc_pool_init_be() - Initialize Tx Descriptor pool(s)
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @num_elem: number of descriptor in pool
|
||||
* @pool_id: pool ID to allocate
|
||||
* @spcl_tx_desc: if special desc
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS - success, others - failure
|
||||
*/
|
||||
QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
|
||||
uint32_t num_elem,
|
||||
uint8_t pool_id,
|
||||
bool spcl_tx_desc);
|
||||
/**
|
||||
* dp_tx_desc_pool_deinit_be() - De-initialize Tx Descriptor pool(s)
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @tx_desc_pool: Tx descriptor pool handler
|
||||
* @pool_id: pool ID to deinit
|
||||
* @spcl_tx_desc: if special desc
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
|
||||
struct dp_tx_desc_pool_s *tx_desc_pool,
|
||||
uint8_t pool_id, bool spcl_tx_desc);
|
||||
|
||||
#ifdef WLAN_SUPPORT_PPEDS
|
||||
/**
|
||||
* dp_ppeds_tx_comp_handler()- Handle tx completions for ppe2tcl ring
|
||||
* @be_soc: Handle to DP Soc structure
|
||||
* @quota: Max number of tx completions to process
|
||||
*
|
||||
* Return: Number of tx completions processed
|
||||
*/
|
||||
int dp_ppeds_tx_comp_handler(struct dp_soc_be *be_soc, uint32_t quota);
|
||||
|
||||
/*
|
||||
* dp_ppeds_stats() - Accounting fw2wbm_tx_drop drops in Tx path
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @peer_id: Peer ID in the descriptor
|
||||
*
|
||||
* Return: NONE
|
||||
*/
|
||||
|
||||
static inline
|
||||
void dp_ppeds_stats(struct dp_soc *soc, uint16_t peer_id);
|
||||
|
||||
#endif
|
||||
#ifdef WLAN_FEATURE_11BE_MLO
|
||||
/**
|
||||
* dp_tx_mlo_mcast_handler_be() - Tx handler for Mcast packets
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @vdev: DP vdev handle
|
||||
* @nbuf: nbuf to be enqueued
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
|
||||
struct dp_vdev *vdev,
|
||||
qdf_nbuf_t nbuf);
|
||||
|
||||
/**
|
||||
* dp_tx_mlo_is_mcast_primary_be() - Function to check for primary mcast vdev
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @vdev: DP vdev handle
|
||||
*
|
||||
* Return: True if vdev is mcast primary
|
||||
* False for all othercase
|
||||
*/
|
||||
bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
|
||||
struct dp_vdev *vdev);
|
||||
#ifdef WLAN_MCAST_MLO
|
||||
#ifdef WLAN_MLO_MULTI_CHIP
|
||||
#ifdef CONFIG_MLO_SINGLE_DEV
|
||||
/**
|
||||
* dp_tx_mlo_mcast_send_be() - Tx send handler for mlo mcast enhance
|
||||
* @soc: DP soc handle
|
||||
* @vdev: DP vdev handle
|
||||
* @nbuf: skb
|
||||
* @tx_exc_metadata: Handle that holds exception path meta data
|
||||
*
|
||||
* Return: NULL for success
|
||||
* nbuf for failure
|
||||
*/
|
||||
|
||||
qdf_nbuf_t dp_tx_mlo_mcast_send_be(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
qdf_nbuf_t nbuf,
|
||||
struct cdp_tx_exception_metadata
|
||||
*tx_exc_metadata);
|
||||
#endif
|
||||
/**
|
||||
* dp_tx_mlo_mcast_pkt_send() - handler to send MLO Mcast packets
|
||||
* @be_vdev: Handle to DP be_vdev structure
|
||||
* @ptnr_vdev: DP ptnr_vdev handle
|
||||
* @arg: nbuf to be enqueued
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev,
|
||||
struct dp_vdev *ptnr_vdev,
|
||||
void *arg);
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_FEATURE_NEAR_FULL_IRQ
|
||||
/**
|
||||
* dp_tx_comp_nf_handler() - Tx completion ring Near full scenario handler
|
||||
* @int_ctx: Interrupt context
|
||||
* @soc: Datapath SoC handle
|
||||
* @hal_ring_hdl: TX completion ring handle
|
||||
* @ring_id: TX completion ring number
|
||||
* @quota: Quota of the work to be done
|
||||
*
|
||||
* Return: work done
|
||||
*/
|
||||
uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
|
||||
hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
|
||||
uint32_t quota);
|
||||
#else
|
||||
static inline
|
||||
uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
|
||||
hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
|
||||
uint32_t quota)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* WLAN_FEATURE_NEAR_FULL_IRQ */
|
||||
|
||||
/**
|
||||
* dp_tx_compute_tx_delay_be() - Compute HW Tx completion delay
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @vdev: vdev
|
||||
* @ts: Tx completion status
|
||||
* @delay_us: Delay to be calculated in microseconds
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_tx_compute_tx_delay_be(struct dp_soc *soc,
|
||||
struct dp_vdev *vdev,
|
||||
struct hal_tx_completion_status *ts,
|
||||
uint32_t *delay_us);
|
||||
|
||||
/**
|
||||
* dp_tx_desc_pool_alloc_be() - Allocate TX descriptor pool
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @num_elem: Number of elements to allocate
|
||||
* @pool_id: TCL descriptor pool ID
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_tx_desc_pool_alloc_be(struct dp_soc *soc, uint32_t num_elem,
|
||||
uint8_t pool_id);
|
||||
|
||||
/**
|
||||
* dp_tx_desc_pool_free_be() - Free TX descriptor pool
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @pool_id: TCL descriptor pool ID
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_tx_desc_pool_free_be(struct dp_soc *soc, uint8_t pool_id);
|
||||
#endif
|
1803
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/mlo/dp_mlo.c
Normal file
1803
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/be/mlo/dp_mlo.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,262 @@
|
||||
/*
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
#ifndef __DP_MLO_H
|
||||
#define __DP_MLO_H
|
||||
|
||||
#include <dp_types.h>
|
||||
#include <dp_peer.h>
|
||||
|
||||
/* Max number of chips that can participate in MLO */
|
||||
#define DP_MAX_MLO_CHIPS WLAN_MAX_MLO_CHIPS
|
||||
|
||||
/* Max number of peers supported */
|
||||
#define DP_MAX_MLO_PEER 512
|
||||
|
||||
/* Max number of chips supported */
|
||||
#define DP_MLO_MAX_DEST_CHIP_ID 4
|
||||
|
||||
/*
|
||||
* NB: intentionally not using kernel-doc comment because the kernel-doc
|
||||
* script does not handle the TAILQ_HEAD macro
|
||||
* struct dp_mlo_ctxt - datapath MLO context
|
||||
*
|
||||
* @ctrl_ctxt: opaque handle of cp mlo mgr
|
||||
* @ml_soc_list: list of socs which are mlo enabled. This also maintains
|
||||
* mlo_chip_id to dp_soc mapping
|
||||
* @ml_soc_cnt: number of SOCs
|
||||
* @ml_soc_list_lock: lock to protect ml_soc_list
|
||||
* @mld_peer_hash: peer hash table for ML peers
|
||||
* Associated peer with this MAC address)
|
||||
* @mld_peer_hash_lock: lock to protect mld_peer_hash
|
||||
* @toeplitz_hash_ipv4:
|
||||
* @toeplitz_hash_ipv6:
|
||||
* @link_to_pdev_map: link to pdev mapping
|
||||
* @rx_fst: pointer to rx_fst handle
|
||||
* @rx_fst_ref_cnt: ref count of rx_fst
|
||||
* @grp_umac_reset_ctx: UMAC reset context at mlo group level
|
||||
* @mlo_dev_list: list of MLO device context
|
||||
* @mlo_dev_list_lock: lock to protect MLO device ctxt
|
||||
*/
|
||||
struct dp_mlo_ctxt {
|
||||
struct cdp_ctrl_mlo_mgr *ctrl_ctxt;
|
||||
struct dp_soc *ml_soc_list[DP_MAX_MLO_CHIPS];
|
||||
uint8_t ml_soc_cnt;
|
||||
qdf_spinlock_t ml_soc_list_lock;
|
||||
struct {
|
||||
uint32_t mask;
|
||||
uint32_t idx_bits;
|
||||
|
||||
TAILQ_HEAD(, dp_peer) * bins;
|
||||
} mld_peer_hash;
|
||||
|
||||
qdf_spinlock_t mld_peer_hash_lock;
|
||||
uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
|
||||
uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
|
||||
struct dp_pdev_be *link_to_pdev_map[WLAN_MAX_MLO_CHIPS *
|
||||
WLAN_MAX_MLO_LINKS_PER_SOC];
|
||||
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||
struct dp_soc_mlo_umac_reset_ctx grp_umac_reset_ctx;
|
||||
#endif
|
||||
/* MLO device ctxt list */
|
||||
TAILQ_HEAD(, dp_mlo_dev_ctxt) mlo_dev_list;
|
||||
qdf_spinlock_t mlo_dev_list_lock;
|
||||
};
|
||||
|
||||
/**
|
||||
* dp_mlo_ctx_to_cdp() - typecast dp mlo context to CDP context
|
||||
* @mlo_ctxt: DP MLO context
|
||||
*
|
||||
* Return: struct cdp_mlo_ctxt pointer
|
||||
*/
|
||||
static inline
|
||||
struct cdp_mlo_ctxt *dp_mlo_ctx_to_cdp(struct dp_mlo_ctxt *mlo_ctxt)
|
||||
{
|
||||
return (struct cdp_mlo_ctxt *)mlo_ctxt;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdp_mlo_ctx_to_dp() - typecast CDP MLO context to DP MLO context
|
||||
* @mlo_ctxt: CDP MLO context
|
||||
*
|
||||
* Return: struct dp_soc pointer
|
||||
*/
|
||||
static inline
|
||||
struct dp_mlo_ctxt *cdp_mlo_ctx_to_dp(struct cdp_mlo_ctxt *mlo_ctxt)
|
||||
{
|
||||
return (struct dp_mlo_ctxt *)mlo_ctxt;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_soc_mlo_fill_params() - update SOC mlo params
|
||||
* @soc: DP soc
|
||||
* @params: soc attach params
|
||||
*
|
||||
* Return: struct dp_soc pointer
|
||||
*/
|
||||
void dp_soc_mlo_fill_params(struct dp_soc *soc,
|
||||
struct cdp_soc_attach_params *params);
|
||||
|
||||
/**
|
||||
* dp_pdev_mlo_fill_params() - update PDEV mlo params
|
||||
* @pdev: DP PDEV
|
||||
* @params: PDEV attach params
|
||||
*
|
||||
* Return: struct dp_soc pointer
|
||||
*/
|
||||
void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
|
||||
struct cdp_pdev_attach_params *params);
|
||||
|
||||
/**
|
||||
* dp_mlo_get_soc_ref_by_chip_id() - Get DP soc from DP ML context.
|
||||
* @ml_ctxt: DP ML context handle
|
||||
* @chip_id: MLO chip id
|
||||
*
|
||||
* This API will increment a reference count for DP soc. Caller has
|
||||
* to take care for decrementing refcount.
|
||||
*
|
||||
* Return: dp_soc
|
||||
*/
|
||||
struct dp_soc*
|
||||
dp_mlo_get_soc_ref_by_chip_id(struct dp_mlo_ctxt *ml_ctxt, uint8_t chip_id);
|
||||
|
||||
/**
|
||||
* dp_mlo_get_rx_hash_key() - Get Rx hash key from MLO context
|
||||
* @soc: DP SOC
|
||||
* @lro_hash: Hash params
|
||||
*
|
||||
*/
|
||||
void dp_mlo_get_rx_hash_key(struct dp_soc *soc,
|
||||
struct cdp_lro_hash_config *lro_hash);
|
||||
|
||||
/**
|
||||
* dp_mlo_rx_fst_deref() - decrement rx_fst
|
||||
* @soc: dp soc
|
||||
*
|
||||
* return: soc cnt
|
||||
*/
|
||||
uint8_t dp_mlo_rx_fst_deref(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_mlo_rx_fst_ref() - increment ref of rx_fst
|
||||
* @soc: dp soc
|
||||
*
|
||||
*/
|
||||
void dp_mlo_rx_fst_ref(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_mlo_get_rx_fst() - Get Rx FST from MLO context
|
||||
* @soc: DP SOC
|
||||
*
|
||||
* Return: struct dp_rx_fst pointer
|
||||
*/
|
||||
struct dp_rx_fst *dp_mlo_get_rx_fst(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_mlo_set_rx_fst() - Set Rx FST in MLO context
|
||||
* @soc: DP SOC
|
||||
* @fst: pointer dp_rx_fst
|
||||
*
|
||||
*/
|
||||
void dp_mlo_set_rx_fst(struct dp_soc *soc, struct dp_rx_fst *fst);
|
||||
|
||||
/**
|
||||
* dp_mlo_update_link_to_pdev_map() - map link-id to pdev mapping
|
||||
* @soc: DP SOC
|
||||
* @pdev: DP PDEV
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev);
|
||||
|
||||
/**
|
||||
* dp_mlo_update_link_to_pdev_unmap() - unmap link-id to pdev mapping
|
||||
* @soc: DP SOC
|
||||
* @pdev: DP PDEV
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev);
|
||||
|
||||
/**
|
||||
* dp_mlo_get_delta_tsf2_wrt_mlo_offset() - Get delta between mlo timestamp
|
||||
* offset and delta tsf2
|
||||
* @soc: DP SOC
|
||||
* @hw_link_id: link id
|
||||
*
|
||||
* Return: int32_t
|
||||
*/
|
||||
int32_t dp_mlo_get_delta_tsf2_wrt_mlo_offset(struct dp_soc *soc,
|
||||
uint8_t hw_link_id);
|
||||
|
||||
/**
|
||||
* dp_mlo_get_delta_tqm_wrt_mlo_offset() - Get delta between mlo timestamp
|
||||
* offset and delta tqm
|
||||
* @soc: DP SOC
|
||||
*
|
||||
* Return: int32_t
|
||||
*/
|
||||
int32_t dp_mlo_get_delta_tqm_wrt_mlo_offset(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_get_interface_stats_be() - get vdev stats for ath interface
|
||||
* @soc_hdl: CDP SoC handle
|
||||
* @vdev_id: vdev Id
|
||||
* @buf: buffer for vdev stats
|
||||
* @is_aggregate: for aggregation
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_get_interface_stats_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
void *buf, bool is_aggregate);
|
||||
|
||||
/*
|
||||
* dp_mlo_debug_print_ptnr_info() - print partner info
|
||||
* @vdev: DP VDEV
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_mlo_debug_print_ptnr_info(struct dp_vdev *vdev);
|
||||
|
||||
/*
|
||||
* dp_mlo_get_chip_id() - return MLO chip id
|
||||
* @soc: DP soc
|
||||
*
|
||||
* Return: chip_id
|
||||
*/
|
||||
uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
|
||||
|
||||
/*
|
||||
* dp_mlo_link_peer_hash_find_by_chip_id() - returns mlo link peer on chip_id
|
||||
* peer_hash_table matching vdev_id and mac_address
|
||||
* @soc: partner soc handle in MLO
|
||||
* @peer_mac_addr: peer mac address
|
||||
* @mac_addr_is_aligned: is mac addr aligned
|
||||
* @vdev_id: vdev_id
|
||||
* @chip_id: mlo_chip_id
|
||||
* @mod_id: id of module requesting reference
|
||||
*
|
||||
* return: peer in success
|
||||
* NULL in failure
|
||||
*/
|
||||
struct dp_peer *
|
||||
dp_mlo_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
|
||||
uint8_t *peer_mac_addr,
|
||||
int mac_addr_is_aligned,
|
||||
uint8_t vdev_id,
|
||||
uint8_t chip_id,
|
||||
enum dp_mod_id mod_id);
|
||||
#endif /* __DP_MLO_H */
|
@ -0,0 +1,99 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "dp_types.h"
|
||||
#include "cdp_txrx_cmn_reg.h"
|
||||
|
||||
void dp_configure_arch_ops(struct dp_soc *soc);
|
||||
qdf_size_t dp_get_soc_context_size(uint16_t device_id);
|
||||
|
||||
#ifdef CONFIG_LITHIUM
|
||||
void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops);
|
||||
qdf_size_t dp_get_soc_context_size_li(void);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BERYLLIUM
|
||||
void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
|
||||
qdf_size_t dp_get_soc_context_size_be(void);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RHINE
|
||||
void dp_initialize_arch_ops_rh(struct dp_arch_ops *arch_ops);
|
||||
qdf_size_t dp_get_soc_context_size_rh(void);
|
||||
#endif
|
||||
|
||||
static void dp_initialize_default_arch_ops(struct dp_arch_ops *arch_ops)
|
||||
{
|
||||
/* assign dummy functions for arch_ops which are architecture specific */
|
||||
}
|
||||
|
||||
qdf_size_t dp_get_soc_context_size(uint16_t device_id)
|
||||
|
||||
{
|
||||
switch (cdp_get_arch_type_from_devid(device_id)) {
|
||||
#ifdef CONFIG_LITHIUM
|
||||
case CDP_ARCH_TYPE_LI:
|
||||
return dp_get_soc_context_size_li();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BERYLLIUM
|
||||
case CDP_ARCH_TYPE_BE:
|
||||
return dp_get_soc_context_size_be();
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_RHINE
|
||||
case CDP_ARCH_TYPE_RH:
|
||||
return dp_get_soc_context_size_rh();
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
QDF_BUG(0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dp_configure_arch_ops(struct dp_soc *soc)
|
||||
{
|
||||
dp_initialize_default_arch_ops(&soc->arch_ops);
|
||||
|
||||
switch (cdp_get_arch_type_from_devid(soc->device_id)) {
|
||||
#ifdef CONFIG_LITHIUM
|
||||
case CDP_ARCH_TYPE_LI:
|
||||
dp_initialize_arch_ops_li(&soc->arch_ops);
|
||||
break;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BERYLLIUM
|
||||
case CDP_ARCH_TYPE_BE:
|
||||
dp_initialize_arch_ops_be(&soc->arch_ops);
|
||||
break;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RHINE
|
||||
case CDP_ARCH_TYPE_RH:
|
||||
dp_initialize_arch_ops_rh(&soc->arch_ops);
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
QDF_BUG(0);
|
||||
}
|
||||
}
|
@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: dp_cal_client_api.h
|
||||
* define timer to update DP stats
|
||||
*/
|
||||
#ifndef _DP_CAL_CLIENT_H_
|
||||
#define _DP_CAL_CLIENT_H_
|
||||
|
||||
#include<cdp_txrx_stats_struct.h>
|
||||
#include <qdf_timer.h>
|
||||
#include <qdf_mem.h>
|
||||
#include <cdp_txrx_handle.h>
|
||||
|
||||
/*timer will run every 1 sec*/
|
||||
#define DP_CAL_CLIENT_TIME 1000
|
||||
|
||||
struct cal_client {
|
||||
qdf_timer_t cal_client_timer;
|
||||
void (*iterate_update_peer_list)(struct cdp_pdev *ctx);
|
||||
struct cdp_pdev *pdev_hdl;
|
||||
};
|
||||
|
||||
void dp_cal_client_attach(struct cdp_cal_client **cal_client_ctx,
|
||||
struct cdp_pdev *pdev, qdf_device_t osdev,
|
||||
void (*iterate_peer_list)(struct cdp_pdev *));
|
||||
void dp_cal_client_detach(struct cdp_cal_client **cal_client_ctx);
|
||||
void dp_cal_client_timer_start(void *ctx);
|
||||
void dp_cal_client_timer_stop(void *ctx);
|
||||
void dp_cal_client_stats_timer_fn(void *pdev_hdl);
|
||||
void dp_cal_client_update_peer_stats(struct cdp_peer_stats *peer_stats);
|
||||
void dp_cal_client_update_peer_stats_wifi3(struct cdp_calibr_stats_intf *stats_intf,
|
||||
struct cdp_calibr_stats *calibr_stats);
|
||||
|
||||
#ifndef ATH_SUPPORT_EXT_STAT
|
||||
void dp_cal_client_attach(struct cdp_cal_client **cal_client_ctx,
|
||||
struct cdp_pdev *pdev, qdf_device_t osdev,
|
||||
void (*iterate_peer_list)(struct cdp_pdev *))
|
||||
{
|
||||
}
|
||||
|
||||
void dp_cal_client_detach(struct cdp_cal_client **cal_client_ctx)
|
||||
{
|
||||
}
|
||||
|
||||
void dp_cal_client_timer_start(void *ctx)
|
||||
{
|
||||
}
|
||||
|
||||
void dp_cal_client_timer_stop(void *ctx)
|
||||
{
|
||||
}
|
||||
|
||||
void dp_cal_client_stats_timer_fn(void *pdev_hdl)
|
||||
{
|
||||
}
|
||||
|
||||
void dp_cal_client_update_peer_stats(struct cdp_peer_stats *peer_stats)
|
||||
{
|
||||
}
|
||||
|
||||
void dp_cal_client_update_peer_stats_wifi3(struct cdp_calibr_stats_intf *stats_intf,
|
||||
struct cdp_calibr_stats *calibr_stats)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_DP_CAL_CLIENT_H_*/
|
333
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_hist.c
Normal file
333
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_hist.c
Normal file
@ -0,0 +1,333 @@
|
||||
/*
|
||||
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <qdf_util.h>
|
||||
#include <qdf_mem.h>
|
||||
#include <cdp_txrx_hist_struct.h>
|
||||
#include "dp_hist.h"
|
||||
|
||||
#ifndef WLAN_CONFIG_TX_DELAY
|
||||
/*
|
||||
* dp_hist_sw_enq_dbucket: Software enqueue delay bucket in ms
|
||||
* @index_0 = 0_1 ms
|
||||
* @index_1 = 1_2 ms
|
||||
* @index_2 = 2_3 ms
|
||||
* @index_3 = 3_4 ms
|
||||
* @index_4 = 4_5 ms
|
||||
* @index_5 = 5_6 ms
|
||||
* @index_6 = 6_7 ms
|
||||
* @index_7 = 7_8 ms
|
||||
* @index_8 = 8_9 ms
|
||||
* @index_9 = 9_10 ms
|
||||
* @index_10 = 10_11 ms
|
||||
* @index_11 = 11_12 ms
|
||||
* @index_12 = 12+ ms
|
||||
*/
|
||||
static uint16_t dp_hist_sw_enq_dbucket[CDP_HIST_BUCKET_MAX] = {
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
|
||||
|
||||
/*
|
||||
* cdp_hist_fw2hw_dbucket: HW enqueue to Completion Delay
|
||||
* @index_0 = 0_10 ms
|
||||
* @index_1 = 10_20 ms
|
||||
* @index_2 = 20_30ms
|
||||
* @index_3 = 30_40 ms
|
||||
* @index_4 = 40_50 ms
|
||||
* @index_5 = 50_60 ms
|
||||
* @index_6 = 60_70 ms
|
||||
* @index_7 = 70_80 ms
|
||||
* @index_8 = 80_90 ms
|
||||
* @index_9 = 90_100 ms
|
||||
* @index_10 = 100_250 ms
|
||||
* @index_11 = 250_500 ms
|
||||
* @index_12 = 500+ ms
|
||||
*/
|
||||
static uint16_t dp_hist_fw2hw_dbucket[CDP_HIST_BUCKET_MAX] = {
|
||||
0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
|
||||
#else
|
||||
/*
|
||||
* dp_hist_sw_enq_dbucket: Software enqueue delay bucket in us
|
||||
* @index_0 = 0_250 us
|
||||
* @index_1 = 250_500 us
|
||||
* @index_2 = 500_750 us
|
||||
* @index_3 = 750_1000 us
|
||||
* @index_4 = 1000_1500 us
|
||||
* @index_5 = 1500_2000 us
|
||||
* @index_6 = 2000_2500 us
|
||||
* @index_7 = 2500_5000 us
|
||||
* @index_8 = 5000_6000 us
|
||||
* @index_9 = 6000_7000 us
|
||||
* @index_10 = 7000_8000 us
|
||||
* @index_11 = 8000_9000 us
|
||||
* @index_12 = 9000+ us
|
||||
*/
|
||||
static uint16_t dp_hist_sw_enq_dbucket[CDP_HIST_BUCKET_MAX] = {
|
||||
0, 250, 500, 750, 1000, 1500, 2000, 2500, 5000, 6000, 7000, 8000, 9000};
|
||||
|
||||
/*
|
||||
* cdp_hist_fw2hw_dbucket: HW enqueue to Completion Delay in us
|
||||
* @index_0 = 0_250 us
|
||||
* @index_1 = 250_500 us
|
||||
* @index_2 = 500_750 us
|
||||
* @index_3 = 750_1000 us
|
||||
* @index_4 = 1000_1500 us
|
||||
* @index_5 = 1500_2000 us
|
||||
* @index_6 = 2000_2500 us
|
||||
* @index_7 = 2500_5000 us
|
||||
* @index_8 = 5000_6000 us
|
||||
* @index_9 = 6000_7000 us
|
||||
* @index_10 = 7000_8000 us
|
||||
* @index_11 = 8000_9000 us
|
||||
* @index_12 = 9000+ us
|
||||
*/
|
||||
|
||||
static uint16_t dp_hist_fw2hw_dbucket[CDP_HIST_BUCKET_MAX] = {
|
||||
0, 250, 500, 750, 1000, 1500, 2000, 2500, 5000, 6000, 7000, 8000, 9000};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* dp_hist_reap2stack_bucket: Reap to stack bucket
|
||||
* @index_0 = 0_5 ms
|
||||
* @index_1 = 5_10 ms
|
||||
* @index_2 = 10_15 ms
|
||||
* @index_3 = 15_20 ms
|
||||
* @index_4 = 20_25 ms
|
||||
* @index_5 = 25_30 ms
|
||||
* @index_6 = 30_35 ms
|
||||
* @index_7 = 35_40 ms
|
||||
* @index_8 = 40_45 ms
|
||||
* @index_9 = 46_50 ms
|
||||
* @index_10 = 51_55 ms
|
||||
* @index_11 = 56_60 ms
|
||||
* @index_12 = 60+ ms
|
||||
*/
|
||||
static uint16_t dp_hist_reap2stack_bucket[CDP_HIST_BUCKET_MAX] = {
|
||||
0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
|
||||
|
||||
/*
|
||||
* dp_hist_hw_tx_comp_dbucket: tx hw completion delay bucket in us
|
||||
* @index_0 = 0_250 us
|
||||
* @index_1 = 250_500 us
|
||||
* @index_2 = 500_750 us
|
||||
* @index_3 = 750_1000 us
|
||||
* @index_4 = 1000_1500 us
|
||||
* @index_5 = 1500_2000 us
|
||||
* @index_6 = 2000_2500 us
|
||||
* @index_7 = 2500_5000 us
|
||||
* @index_8 = 5000_6000 us
|
||||
* @index_9 = 6000_7000 us
|
||||
* @index_10 = 7000_8000 us
|
||||
* @index_11 = 8000_9000 us
|
||||
* @index_12 = 9000+ us
|
||||
*/
|
||||
static uint16_t dp_hist_hw_tx_comp_dbucket[CDP_HIST_BUCKET_MAX] = {
|
||||
0, 250, 500, 750, 1000, 1500, 2000, 2500, 5000, 6000, 7000, 8000, 9000};
|
||||
|
||||
static const char *dp_hist_hw_tx_comp_dbucket_str[CDP_HIST_BUCKET_MAX + 1] = {
|
||||
"0 to 250 us", "250 to 500 us",
|
||||
"500 to 750 us", "750 to 1000 us",
|
||||
"1000 to 1500 us", "1500 to 2000 us",
|
||||
"2000 to 2500 us", "2500 to 5000 us",
|
||||
"5000 to 6000 us", "6000 to 7000 ms",
|
||||
"7000 to 8000 us", "8000 to 9000 us", "9000+ us"
|
||||
};
|
||||
|
||||
const char *dp_hist_tx_hw_delay_str(uint8_t index)
|
||||
{
|
||||
if (index > CDP_HIST_BUCKET_MAX)
|
||||
return "Invalid index";
|
||||
return dp_hist_hw_tx_comp_dbucket_str[index];
|
||||
}
|
||||
|
||||
/*
|
||||
* dp_hist_delay_percentile_dbucket: tx hw completion delay bucket in delay
|
||||
* bound percentile
|
||||
* @index_0 = 0_10
|
||||
* @index_1 = 10_20
|
||||
* @index_2 = 20_30
|
||||
* @index_3 = 30_40
|
||||
* @index_4 = 40_50
|
||||
* @index_5 = 50_60
|
||||
* @index_6 = 60_70
|
||||
* @index_7 = 70_80
|
||||
* @index_8 = 80_100
|
||||
* @index_9 = 90_100
|
||||
* @index_10 = 100_150
|
||||
* @index_11 = 150_200
|
||||
* @index_12 = 200+
|
||||
*/
|
||||
static uint16_t dp_hist_delay_percentile_dbucket[CDP_HIST_BUCKET_MAX] = {
|
||||
0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 150, 200};
|
||||
|
||||
static
|
||||
const char *dp_hist_delay_percentile_dbucket_str[CDP_HIST_BUCKET_MAX + 1] = {
|
||||
"0 to 10%", "10 to 20%",
|
||||
"20 to 30%", "30 to 40%",
|
||||
"40 to 50%", "50 to 60%",
|
||||
"60 to 70%", "70 to 80%",
|
||||
"80 to 90% ", "90 to 100%",
|
||||
"100 to 150% ", "150 to 200%", "200+%"
|
||||
};
|
||||
|
||||
const char *dp_hist_delay_percentile_str(uint8_t index)
|
||||
{
|
||||
if (index > CDP_HIST_BUCKET_MAX)
|
||||
return "Invalid index";
|
||||
return dp_hist_delay_percentile_dbucket_str[index];
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_hist_find_bucket_idx() - Find the bucket index
|
||||
* @bucket_array: Bucket array
|
||||
* @value: Frequency value
|
||||
*
|
||||
* Return: The bucket index
|
||||
*/
|
||||
static int dp_hist_find_bucket_idx(int16_t *bucket_array, int value)
|
||||
{
|
||||
uint8_t idx = CDP_HIST_BUCKET_0;
|
||||
|
||||
for (; idx < (CDP_HIST_BUCKET_MAX - 1); idx++) {
|
||||
if (value < bucket_array[idx + 1])
|
||||
break;
|
||||
}
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_hist_fill_buckets() - Fill the histogram frequency buckets
|
||||
* @hist_bucket: Histogram bukcets
|
||||
* @value: Frequency value
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static void dp_hist_fill_buckets(struct cdp_hist_bucket *hist_bucket, int value)
|
||||
{
|
||||
enum cdp_hist_types hist_type;
|
||||
int idx = CDP_HIST_BUCKET_MAX;
|
||||
|
||||
if (qdf_unlikely(!hist_bucket))
|
||||
return;
|
||||
|
||||
hist_type = hist_bucket->hist_type;
|
||||
|
||||
/* Identify the bucket the bucket and update. */
|
||||
switch (hist_type) {
|
||||
case CDP_HIST_TYPE_SW_ENQEUE_DELAY:
|
||||
idx = dp_hist_find_bucket_idx(&dp_hist_sw_enq_dbucket[0],
|
||||
value);
|
||||
break;
|
||||
case CDP_HIST_TYPE_HW_COMP_DELAY:
|
||||
idx = dp_hist_find_bucket_idx(&dp_hist_fw2hw_dbucket[0],
|
||||
value);
|
||||
break;
|
||||
case CDP_HIST_TYPE_REAP_STACK:
|
||||
idx = dp_hist_find_bucket_idx(
|
||||
&dp_hist_reap2stack_bucket[0], value);
|
||||
break;
|
||||
case CDP_HIST_TYPE_HW_TX_COMP_DELAY:
|
||||
idx = dp_hist_find_bucket_idx(
|
||||
&dp_hist_hw_tx_comp_dbucket[0], value);
|
||||
break;
|
||||
case CDP_HIST_TYPE_DELAY_PERCENTILE:
|
||||
idx = dp_hist_find_bucket_idx(
|
||||
&dp_hist_delay_percentile_dbucket[0], value);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (idx == CDP_HIST_BUCKET_MAX)
|
||||
return;
|
||||
|
||||
hist_bucket->freq[idx]++;
|
||||
}
|
||||
|
||||
void dp_hist_update_stats(struct cdp_hist_stats *hist_stats, int value)
|
||||
{
|
||||
if (qdf_unlikely(!hist_stats))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Fill the histogram buckets according to the delay
|
||||
*/
|
||||
dp_hist_fill_buckets(&hist_stats->hist, value);
|
||||
|
||||
/*
|
||||
* Compute the min, max and average. Average computed is weighted
|
||||
* average
|
||||
*/
|
||||
if (value < hist_stats->min)
|
||||
hist_stats->min = value;
|
||||
|
||||
if (value > hist_stats->max)
|
||||
hist_stats->max = value;
|
||||
|
||||
if (qdf_unlikely(!hist_stats->avg))
|
||||
hist_stats->avg = value;
|
||||
else
|
||||
hist_stats->avg = (hist_stats->avg + value) / 2;
|
||||
}
|
||||
|
||||
void dp_copy_hist_stats(struct cdp_hist_stats *src_hist_stats,
|
||||
struct cdp_hist_stats *dst_hist_stats)
|
||||
{
|
||||
uint8_t index;
|
||||
|
||||
for (index = 0; index < CDP_HIST_BUCKET_MAX; index++)
|
||||
dst_hist_stats->hist.freq[index] =
|
||||
src_hist_stats->hist.freq[index];
|
||||
dst_hist_stats->min = src_hist_stats->min;
|
||||
dst_hist_stats->max = src_hist_stats->max;
|
||||
dst_hist_stats->avg = src_hist_stats->avg;
|
||||
}
|
||||
|
||||
void dp_accumulate_hist_stats(struct cdp_hist_stats *src_hist_stats,
|
||||
struct cdp_hist_stats *dst_hist_stats)
|
||||
{
|
||||
uint8_t index, hist_stats_valid = 0;
|
||||
|
||||
for (index = 0; index < CDP_HIST_BUCKET_MAX; index++) {
|
||||
dst_hist_stats->hist.freq[index] +=
|
||||
src_hist_stats->hist.freq[index];
|
||||
if (src_hist_stats->hist.freq[index])
|
||||
hist_stats_valid = 1;
|
||||
}
|
||||
/*
|
||||
* If at least one hist-bucket has non-zero count,
|
||||
* proceed with the detailed calculation.
|
||||
*/
|
||||
if (hist_stats_valid) {
|
||||
dst_hist_stats->min = QDF_MIN(src_hist_stats->min,
|
||||
dst_hist_stats->min);
|
||||
dst_hist_stats->max = QDF_MAX(src_hist_stats->max,
|
||||
dst_hist_stats->max);
|
||||
dst_hist_stats->avg = (src_hist_stats->avg +
|
||||
dst_hist_stats->avg) >> 1;
|
||||
}
|
||||
}
|
||||
|
||||
void dp_hist_init(struct cdp_hist_stats *hist_stats,
|
||||
enum cdp_hist_types hist_type)
|
||||
{
|
||||
qdf_mem_zero(hist_stats, sizeof(*hist_stats));
|
||||
hist_stats->min = INT_MAX;
|
||||
hist_stats->hist.hist_type = hist_type;
|
||||
}
|
69
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_hist.h
Normal file
69
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_hist.h
Normal file
@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: dp_hist.h
|
||||
* histogram header file
|
||||
*/
|
||||
|
||||
#ifndef __DP_HIST_H_
|
||||
#define __DP_HIST_H_
|
||||
|
||||
#define HIST_AVG_WEIGHT_DENOM 4
|
||||
|
||||
/**
|
||||
* dp_hist_update_stats() - Update histogram stats
|
||||
* @hist_stats: Delay histogram
|
||||
* @value: Delay value
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void dp_hist_update_stats(struct cdp_hist_stats *hist_stats, int value);
|
||||
|
||||
/**
|
||||
* dp_hist_init() - Initialize the histogram object
|
||||
* @hist_stats: Hist stats object
|
||||
* @hist_type: Histogram type
|
||||
*/
|
||||
void dp_hist_init(struct cdp_hist_stats *hist_stats,
|
||||
enum cdp_hist_types hist_type);
|
||||
|
||||
/**
|
||||
* dp_accumulate_hist_stats() - Accumulate the hist src to dst
|
||||
* @src_hist_stats: Source histogram stats
|
||||
* @dst_hist_stats: Destination histogram stats
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void dp_accumulate_hist_stats(struct cdp_hist_stats *src_hist_stats,
|
||||
struct cdp_hist_stats *dst_hist_stats);
|
||||
|
||||
/**
|
||||
* dp_copy_hist_stats() - Copy the histogram stats
|
||||
* @src_hist_stats: Source histogram stats
|
||||
* @dst_hist_stats: Destination histogram stats
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void dp_copy_hist_stats(struct cdp_hist_stats *src_hist_stats,
|
||||
struct cdp_hist_stats *dst_hist_stats);
|
||||
|
||||
const char *dp_hist_tx_hw_delay_str(uint8_t index);
|
||||
const char *dp_hist_delay_percentile_str(uint8_t index);
|
||||
#endif /* __DP_HIST_H_ */
|
5852
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c
Normal file
5852
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c
Normal file
File diff suppressed because it is too large
Load Diff
1256
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.h
Normal file
1256
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.h
Normal file
File diff suppressed because it is too large
Load Diff
5937
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h
Normal file
5937
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h
Normal file
File diff suppressed because it is too large
Load Diff
4380
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c
Normal file
4380
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c
Normal file
File diff suppressed because it is too large
Load Diff
749
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.h
Normal file
749
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.h
Normal file
@ -0,0 +1,749 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DP_IPA_H_
|
||||
#define _DP_IPA_H_
|
||||
|
||||
#if defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_KIWI_V2)
|
||||
/* Index into soc->tcl_data_ring[] */
|
||||
#define IPA_TCL_DATA_RING_IDX 3
|
||||
#else
|
||||
#define IPA_TCL_DATA_RING_IDX 2
|
||||
#endif
|
||||
/* Index into soc->tx_comp_ring[] */
|
||||
#define IPA_TX_COMP_RING_IDX IPA_TCL_DATA_RING_IDX
|
||||
|
||||
#ifdef IPA_OFFLOAD
|
||||
|
||||
#define DP_IPA_MAX_IFACE 3
|
||||
#define IPA_REO_DEST_RING_IDX 3
|
||||
#define IPA_REO_DEST_RING_IDX_2 7
|
||||
|
||||
#define IPA_RX_REFILL_BUF_RING_IDX 2
|
||||
|
||||
#define IPA_ALT_REO_DEST_RING_IDX 2
|
||||
#define IPA_RX_ALT_REFILL_BUF_RING_IDX 3
|
||||
|
||||
/* Adding delay before disabling ipa pipes if any Tx Completions are pending */
|
||||
#define TX_COMP_DRAIN_WAIT_MS 50
|
||||
#define TX_COMP_DRAIN_WAIT_TIMEOUT_MS 100
|
||||
|
||||
#ifdef IPA_WDI3_TX_TWO_PIPES
|
||||
#if defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_KIWI_V2)
|
||||
/* Index into soc->tcl_data_ring[] and soc->tx_comp_ring[] */
|
||||
#define IPA_TX_ALT_RING_IDX 4
|
||||
#define IPA_TX_ALT_COMP_RING_IDX IPA_TX_ALT_RING_IDX
|
||||
#elif defined(QCA_WIFI_QCN9224)
|
||||
#define IPA_TX_ALT_RING_IDX 3
|
||||
#define IPA_TX_ALT_COMP_RING_IDX IPA_TX_ALT_RING_IDX
|
||||
#else /* !KIWI */
|
||||
#define IPA_TX_ALT_RING_IDX 1
|
||||
/*
|
||||
* must be same as IPA_TX_ALT_RING_IDX as tcl and wbm ring
|
||||
* are initialized with same index as a pair.
|
||||
*/
|
||||
#define IPA_TX_ALT_COMP_RING_IDX 1
|
||||
#endif /* KIWI */
|
||||
|
||||
#define IPA_SESSION_ID_SHIFT 1
|
||||
#endif /* IPA_WDI3_TX_TWO_PIPES */
|
||||
|
||||
/**
|
||||
* struct dp_ipa_uc_tx_hdr - full tx header registered to IPA hardware
|
||||
* @eth: ether II header
|
||||
*/
|
||||
struct dp_ipa_uc_tx_hdr {
|
||||
struct ethhdr eth;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct dp_ipa_uc_tx_vlan_hdr - full tx header registered to IPA hardware
|
||||
* @eth: ether II header
|
||||
*/
|
||||
struct dp_ipa_uc_tx_vlan_hdr {
|
||||
struct vlan_ethhdr eth;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct dp_ipa_uc_rx_hdr - full rx header registered to IPA hardware
|
||||
* @eth: ether II header
|
||||
*/
|
||||
struct dp_ipa_uc_rx_hdr {
|
||||
struct ethhdr eth;
|
||||
} __packed;
|
||||
|
||||
#define DP_IPA_UC_WLAN_TX_HDR_LEN sizeof(struct dp_ipa_uc_tx_hdr)
|
||||
#define DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN sizeof(struct dp_ipa_uc_tx_vlan_hdr)
|
||||
#define DP_IPA_UC_WLAN_RX_HDR_LEN sizeof(struct dp_ipa_uc_rx_hdr)
|
||||
/* 28 <bytes of rx_msdu_end_tlv> + 16 <bytes of attn tlv> +
|
||||
* 52 <bytes of rx_mpdu_start_tlv> + <L2 Header>
|
||||
*/
|
||||
#define DP_IPA_UC_WLAN_RX_HDR_LEN_AST 110
|
||||
#define DP_IPA_UC_WLAN_RX_HDR_LEN_AST_VLAN 114
|
||||
#define DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET 0
|
||||
|
||||
#define DP_IPA_HDL_INVALID 0xFF
|
||||
#define DP_IPA_HDL_FIRST 0
|
||||
#define DP_IPA_HDL_SECOND 1
|
||||
#define DP_IPA_HDL_THIRD 2
|
||||
/**
|
||||
* wlan_ipa_get_hdl() - Get ipa handle from IPA component
|
||||
* @psoc: control psoc object
|
||||
* @pdev_id: pdev id
|
||||
*
|
||||
* IPA component will return the IPA handle based on pdev_id
|
||||
*
|
||||
* Return: IPA handle
|
||||
*/
|
||||
qdf_ipa_wdi_hdl_t wlan_ipa_get_hdl(void *psoc, uint8_t pdev_id);
|
||||
|
||||
/**
|
||||
* dp_ipa_get_resource() - Client request resource information
|
||||
* @soc_hdl: data path soc handle
|
||||
* @pdev_id: device instance id
|
||||
*
|
||||
* IPA client will request IPA UC related resource information
|
||||
* Resource information will be distributed to IPA module
|
||||
* All of the required resources should be pre-allocated
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
|
||||
|
||||
/**
|
||||
* dp_ipa_set_doorbell_paddr() - Set doorbell register physical address to SRNG
|
||||
* @soc_hdl: data path soc handle
|
||||
* @pdev_id: device instance id
|
||||
*
|
||||
* Set TX_COMP_DOORBELL register physical address to WBM Head_Ptr_MemAddr_LSB
|
||||
* Set RX_READ_DOORBELL register physical address to REO Head_Ptr_MemAddr_LSB
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t pdev_id);
|
||||
|
||||
/**
|
||||
* dp_ipa_iounmap_doorbell_vaddr() - unmap ipa RX db vaddr
|
||||
* @soc_hdl: data path soc handle
|
||||
* @pdev_id: device instance id
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_iounmap_doorbell_vaddr(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t pdev_id);
|
||||
|
||||
/**
|
||||
* dp_ipa_op_response() - Handle OP command response from firmware
|
||||
* @soc_hdl: data path soc handle
|
||||
* @pdev_id: device instance id
|
||||
* @op_msg: op response message from firmware
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
||||
uint8_t *op_msg);
|
||||
|
||||
/**
|
||||
* dp_ipa_register_op_cb() - Register OP handler function
|
||||
* @soc_hdl: data path soc handle
|
||||
* @pdev_id: device instance id
|
||||
* @op_cb: handler function pointer
|
||||
* @usr_ctxt: user context passed back to handler function
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
||||
ipa_uc_op_cb_type op_cb, void *usr_ctxt);
|
||||
|
||||
/**
|
||||
* dp_ipa_deregister_op_cb() - Deregister OP handler function
|
||||
* @soc_hdl: data path soc handle
|
||||
* @pdev_id: device instance id
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_ipa_deregister_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
|
||||
|
||||
/**
|
||||
* dp_ipa_get_stat() - Get firmware wdi status
|
||||
* @soc_hdl: data path soc handle
|
||||
* @pdev_id: device instance id
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
|
||||
|
||||
/**
|
||||
* dp_tx_send_ipa_data_frame() - send IPA data frame
|
||||
* @soc_hdl: datapath soc handle
|
||||
* @vdev_id: virtual device/interface id
|
||||
* @skb: skb
|
||||
*
|
||||
* Return: skb/ NULL is for success
|
||||
*/
|
||||
qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
qdf_nbuf_t skb);
|
||||
|
||||
/**
|
||||
* dp_ipa_enable_autonomy() - Enable autonomy RX path
|
||||
* @soc_hdl: data path soc handle
|
||||
* @pdev_id: device instance id
|
||||
*
|
||||
* Set all RX packet route to IPA REO ring
|
||||
* Program Destination_Ring_Ctrl_IX_0 REO register to point IPA REO ring
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
|
||||
|
||||
/**
|
||||
* dp_ipa_disable_autonomy() - Disable autonomy RX path
|
||||
* @soc_hdl: data path soc handle
|
||||
* @pdev_id: device instance id
|
||||
*
|
||||
* Disable RX packet routing to IPA REO
|
||||
* Program Destination_Ring_Ctrl_IX_0 REO register to disable
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
|
||||
defined(CONFIG_IPA_WDI_UNIFIED_API)
|
||||
/**
|
||||
* dp_ipa_setup() - Setup and connect IPA pipes
|
||||
* @soc_hdl: data path soc handle
|
||||
* @pdev_id: device instance id
|
||||
* @ipa_i2w_cb: IPA to WLAN callback
|
||||
* @ipa_w2i_cb: WLAN to IPA callback
|
||||
* @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
|
||||
* @ipa_desc_size: IPA descriptor size
|
||||
* @ipa_priv: handle to the HTT instance
|
||||
* @is_rm_enabled: Is IPA RM enabled or not
|
||||
* @tx_pipe_handle: pointer to Tx pipe handle
|
||||
* @rx_pipe_handle: pointer to Rx pipe handle
|
||||
* @is_smmu_enabled: Is SMMU enabled or not
|
||||
* @sys_in: parameters to setup sys pipe in mcc mode
|
||||
* @over_gsi:
|
||||
* @hdl: IPA handle
|
||||
* @id: IPA instance id
|
||||
* @ipa_ast_notify_cb: IPA to WLAN callback for ast create and update
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
||||
void *ipa_i2w_cb, void *ipa_w2i_cb,
|
||||
void *ipa_wdi_meter_notifier_cb,
|
||||
uint32_t ipa_desc_size, void *ipa_priv,
|
||||
bool is_rm_enabled, uint32_t *tx_pipe_handle,
|
||||
uint32_t *rx_pipe_handle,
|
||||
bool is_smmu_enabled,
|
||||
qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi,
|
||||
qdf_ipa_wdi_hdl_t hdl, qdf_ipa_wdi_hdl_t id,
|
||||
void *ipa_ast_notify_cb);
|
||||
#else /* CONFIG_IPA_WDI_UNIFIED_API */
|
||||
/**
|
||||
* dp_ipa_setup() - Setup and connect IPA pipes
|
||||
* @soc_hdl: data path soc handle
|
||||
* @pdev_id: device instance id
|
||||
* @ipa_i2w_cb: IPA to WLAN callback
|
||||
* @ipa_w2i_cb: WLAN to IPA callback
|
||||
* @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
|
||||
* @ipa_desc_size: IPA descriptor size
|
||||
* @ipa_priv: handle to the HTT instance
|
||||
* @is_rm_enabled: Is IPA RM enabled or not
|
||||
* @tx_pipe_handle: pointer to Tx pipe handle
|
||||
* @rx_pipe_handle: pointer to Rx pipe handle
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
||||
void *ipa_i2w_cb, void *ipa_w2i_cb,
|
||||
void *ipa_wdi_meter_notifier_cb,
|
||||
uint32_t ipa_desc_size, void *ipa_priv,
|
||||
bool is_rm_enabled, uint32_t *tx_pipe_handle,
|
||||
uint32_t *rx_pipe_handle);
|
||||
#endif /* CONFIG_IPA_WDI_UNIFIED_API */
|
||||
|
||||
/**
|
||||
* dp_ipa_cleanup() - Disconnect IPA pipes
|
||||
* @soc_hdl: dp soc handle
|
||||
* @pdev_id: dp pdev id
|
||||
* @tx_pipe_handle: Tx pipe handle
|
||||
* @rx_pipe_handle: Rx pipe handle
|
||||
* @hdl: IPA handle
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
||||
uint32_t tx_pipe_handle, uint32_t rx_pipe_handle,
|
||||
qdf_ipa_wdi_hdl_t hdl);
|
||||
|
||||
/**
|
||||
* dp_ipa_setup_iface() - Setup IPA header and register interface
|
||||
* @ifname: Interface name
|
||||
* @mac_addr: Interface MAC address
|
||||
* @prod_client: IPA prod client type
|
||||
* @cons_client: IPA cons client type
|
||||
* @session_id: Session ID
|
||||
* @is_ipv6_enabled: Is IPV6 enabled or not
|
||||
* @hdl: IPA handle
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
|
||||
qdf_ipa_client_type_t prod_client,
|
||||
qdf_ipa_client_type_t cons_client,
|
||||
uint8_t session_id, bool is_ipv6_enabled,
|
||||
qdf_ipa_wdi_hdl_t hdl);
|
||||
|
||||
/**
|
||||
* dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
|
||||
* @ifname: Interface name
|
||||
* @is_ipv6_enabled: Is IPV6 enabled or not
|
||||
* @hdl: IPA handle
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled,
|
||||
qdf_ipa_wdi_hdl_t hdl);
|
||||
|
||||
/**
|
||||
* dp_ipa_enable_pipes() - Enable and resume traffic on Tx/Rx pipes
|
||||
* @soc_hdl: handle to the soc
|
||||
* @pdev_id: pdev id number, to get the handle
|
||||
* @hdl: IPA handle
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
||||
qdf_ipa_wdi_hdl_t hdl);
|
||||
|
||||
/**
|
||||
* dp_ipa_disable_pipes() - Suspend traffic and disable Tx/Rx pipes
|
||||
* @soc_hdl: handle to the soc
|
||||
* @pdev_id: pdev id number, to get the handle
|
||||
* @hdl: IPA handle
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
||||
qdf_ipa_wdi_hdl_t hdl);
|
||||
|
||||
/**
|
||||
* dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
|
||||
* @client: Client type
|
||||
* @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
|
||||
* @hdl: IPA handle
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps,
|
||||
qdf_ipa_wdi_hdl_t hdl);
|
||||
#ifdef IPA_OPT_WIFI_DP
|
||||
QDF_STATUS dp_ipa_rx_super_rule_setup(struct cdp_soc_t *soc_hdl,
|
||||
void *flt_params);
|
||||
int dp_ipa_pcie_link_up(struct cdp_soc_t *soc_hdl);
|
||||
void dp_ipa_pcie_link_down(struct cdp_soc_t *soc_hdl);
|
||||
#endif
|
||||
|
||||
#ifdef QCA_SUPPORT_WDS_EXTENDED
|
||||
/**
|
||||
* dp_ipa_rx_wdsext_iface() - Forward RX exception packets to wdsext interface
|
||||
* @soc_hdl: data path SoC handle
|
||||
* @peer_id: Peer ID to get respective peer
|
||||
* @skb: socket buffer
|
||||
*
|
||||
* Return: bool
|
||||
*/
|
||||
bool dp_ipa_rx_wdsext_iface(struct cdp_soc_t *soc_hdl, uint8_t peer_id,
|
||||
qdf_nbuf_t skb);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dp_ipa_rx_intrabss_fwd() - Perform intra-bss fwd for IPA RX path
|
||||
*
|
||||
* @soc_hdl: data path soc handle
|
||||
* @vdev_id: virtual device/interface id
|
||||
* @nbuf: pointer to skb of ethernet packet received from IPA RX path
|
||||
* @fwd_success: pointer to indicate if skb succeeded in intra-bss TX
|
||||
*
|
||||
* This function performs intra-bss forwarding for WDI 3.0 IPA RX path.
|
||||
*
|
||||
* Return: true if packet is intra-bss fwd-ed and no need to pass to
|
||||
* network stack. false if packet needs to be passed to network stack.
|
||||
*/
|
||||
bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
qdf_nbuf_t nbuf, bool *fwd_success);
|
||||
int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev);
|
||||
int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev);
|
||||
|
||||
/**
|
||||
* dp_ipa_ring_resource_setup() - setup IPA ring resources
|
||||
* @soc: data path SoC handle
|
||||
* @pdev:
|
||||
*
|
||||
* Return: status
|
||||
*/
|
||||
int dp_ipa_ring_resource_setup(struct dp_soc *soc,
|
||||
struct dp_pdev *pdev);
|
||||
|
||||
bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
|
||||
uint32_t *remap1, uint32_t *remap2);
|
||||
bool dp_ipa_is_mdm_platform(void);
|
||||
|
||||
/**
|
||||
* dp_ipa_handle_rx_reo_reinject() - Handle RX REO reinject skb buffer
|
||||
* @soc: soc
|
||||
* @nbuf: skb
|
||||
*
|
||||
* Return: nbuf if success and otherwise NULL
|
||||
*/
|
||||
qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf);
|
||||
|
||||
QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
|
||||
qdf_nbuf_t nbuf,
|
||||
uint32_t size,
|
||||
bool create,
|
||||
const char *func,
|
||||
uint32_t line);
|
||||
/**
|
||||
* dp_ipa_tx_buf_smmu_mapping() - Create SMMU mappings for IPA
|
||||
* allocated TX buffers
|
||||
* @soc_hdl: handle to the soc
|
||||
* @pdev_id: pdev id number, to get the handle
|
||||
* @func: caller function
|
||||
* @line: line number
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_tx_buf_smmu_mapping(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t pdev_id, const char *func,
|
||||
uint32_t line);
|
||||
|
||||
/**
|
||||
* dp_ipa_tx_buf_smmu_unmapping() - Release SMMU mappings for IPA
|
||||
* allocated TX buffers
|
||||
* @soc_hdl: handle to the soc
|
||||
* @pdev_id: pdev id number, to get the handle
|
||||
* @func: caller function
|
||||
* @line: line number
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t pdev_id, const char *func,
|
||||
uint32_t line);
|
||||
QDF_STATUS dp_ipa_rx_buf_pool_smmu_mapping(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t pdev_id,
|
||||
bool create,
|
||||
const char *func,
|
||||
uint32_t line);
|
||||
QDF_STATUS dp_ipa_set_smmu_mapped(struct cdp_soc_t *soc, int val);
|
||||
int dp_ipa_get_smmu_mapped(struct cdp_soc_t *soc);
|
||||
|
||||
#ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
|
||||
static inline void
|
||||
dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc)
|
||||
{
|
||||
if (soc->ipa_rx_buf_map_lock_initialized)
|
||||
qdf_spin_lock_bh(&soc->ipa_rx_buf_map_lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc)
|
||||
{
|
||||
if (soc->ipa_rx_buf_map_lock_initialized)
|
||||
qdf_spin_unlock_bh(&soc->ipa_rx_buf_map_lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc,
|
||||
uint32_t reo_ring_num)
|
||||
{
|
||||
if (!soc->ipa_reo_ctx_lock_required[reo_ring_num])
|
||||
return;
|
||||
|
||||
qdf_spin_lock_bh(&soc->ipa_rx_buf_map_lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc,
|
||||
uint32_t reo_ring_num)
|
||||
{
|
||||
if (!soc->ipa_reo_ctx_lock_required[reo_ring_num])
|
||||
return;
|
||||
|
||||
qdf_spin_unlock_bh(&soc->ipa_rx_buf_map_lock);
|
||||
}
|
||||
#else
|
||||
|
||||
static inline void
|
||||
dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc,
|
||||
uint32_t reo_ring_num)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc,
|
||||
uint32_t reo_ring_num)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef IPA_WDS_EASYMESH_FEATURE
|
||||
/**
|
||||
* dp_ipa_ast_create() - Create/update AST entry in AST table
|
||||
* for learning/roaming packets from IPA
|
||||
* @soc_hdl: data path soc handle
|
||||
* @data: Structure used for updating the AST table
|
||||
*
|
||||
* Create/update AST entry in AST table for learning/roaming packets from IPA
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_ipa_ast_create(struct cdp_soc_t *soc_hdl,
|
||||
qdf_ipa_ast_info_type_t *data);
|
||||
|
||||
/**
|
||||
* dp_ipa_ast_notify_cb() - Provide ast notify cb to IPA
|
||||
* @pipe_in: WDI conn pipe in params
|
||||
* @ipa_ast_notify_cb: ipa ast notify cb
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline void
|
||||
dp_ipa_ast_notify_cb(qdf_ipa_wdi_conn_in_params_t *pipe_in,
|
||||
void *ipa_ast_notify_cb)
|
||||
{
|
||||
QDF_IPA_WDI_CONN_IN_PARAMS_AST_NOTIFY(pipe_in) = ipa_ast_notify_cb;
|
||||
}
|
||||
#else
|
||||
static inline void
|
||||
dp_ipa_ast_notify_cb(qdf_ipa_wdi_conn_in_params_t *pipe_in,
|
||||
void *ipa_ast_notify_cb)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef IPA_OPT_WIFI_DP
|
||||
static inline void dp_ipa_opt_dp_ixo_remap(uint8_t *ix0_map)
|
||||
{
|
||||
ix0_map[0] = REO_REMAP_SW1;
|
||||
ix0_map[1] = REO_REMAP_SW1;
|
||||
ix0_map[2] = REO_REMAP_SW2;
|
||||
ix0_map[3] = REO_REMAP_SW3;
|
||||
ix0_map[4] = REO_REMAP_SW4;
|
||||
ix0_map[5] = REO_REMAP_RELEASE;
|
||||
ix0_map[6] = REO_REMAP_FW;
|
||||
ix0_map[7] = REO_REMAP_FW;
|
||||
}
|
||||
#else
|
||||
static inline void dp_ipa_opt_dp_ixo_remap(uint8_t *ix0_map)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#ifdef QCA_ENHANCED_STATS_SUPPORT
|
||||
/**
|
||||
* dp_ipa_txrx_get_peer_stats - fetch peer stats
|
||||
* @soc: soc handle
|
||||
* @vdev_id: id of vdev handle
|
||||
* @peer_mac: peer mac address
|
||||
* @peer_stats: buffer to hold peer stats
|
||||
*
|
||||
* Return: status success/failure
|
||||
*/
|
||||
QDF_STATUS dp_ipa_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
|
||||
uint8_t *peer_mac,
|
||||
struct cdp_peer_stats *peer_stats);
|
||||
|
||||
/**
|
||||
* dp_ipa_txrx_get_vdev_stats - fetch vdev stats
|
||||
* @soc_hdl: soc handle
|
||||
* @vdev_id: id of vdev handle
|
||||
* @buf: buffer to hold vdev stats
|
||||
* @is_aggregate: for aggregation
|
||||
*
|
||||
* Return: int
|
||||
*/
|
||||
int dp_ipa_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
void *buf, bool is_aggregate);
|
||||
|
||||
/**
|
||||
* dp_ipa_txrx_get_pdev_stats() - fetch pdev stats
|
||||
* @soc: DP soc handle
|
||||
* @pdev_id: id of DP pdev handle
|
||||
* @pdev_stats: buffer to hold pdev stats
|
||||
*
|
||||
* Return: status success/failure
|
||||
*/
|
||||
QDF_STATUS dp_ipa_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
|
||||
struct cdp_pdev_stats *pdev_stats);
|
||||
|
||||
/**
|
||||
* dp_ipa_update_peer_rx_stats() - update peer rx stats
|
||||
* @soc: soc handle
|
||||
* @vdev_id: vdev id
|
||||
* @peer_mac: Peer Mac Address
|
||||
* @nbuf: data nbuf
|
||||
*
|
||||
* Return: status success/failure
|
||||
*/
|
||||
QDF_STATUS dp_ipa_update_peer_rx_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
|
||||
uint8_t *peer_mac, qdf_nbuf_t nbuf);
|
||||
#endif
|
||||
/**
|
||||
* dp_ipa_get_wdi_version() - Get WDI version
|
||||
* @soc_hdl: data path soc handle
|
||||
* @wdi_ver: Out parameter for wdi version
|
||||
*
|
||||
* Get WDI version based on soc arch
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_ipa_get_wdi_version(struct cdp_soc_t *soc_hdl, uint8_t *wdi_ver);
|
||||
|
||||
/**
|
||||
* dp_ipa_is_ring_ipa_tx() - Check if the TX ring is used by IPA
|
||||
*
|
||||
* @soc: DP SoC
|
||||
* @ring_id: TX ring id
|
||||
*
|
||||
* Return: bool
|
||||
*/
|
||||
bool dp_ipa_is_ring_ipa_tx(struct dp_soc *soc, uint8_t ring_id);
|
||||
#else
|
||||
static inline int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline int dp_ipa_ring_resource_setup(struct dp_soc *soc,
|
||||
struct dp_pdev *pdev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
|
||||
qdf_nbuf_t nbuf,
|
||||
uint32_t size,
|
||||
bool create,
|
||||
const char *func,
|
||||
uint32_t line)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc,
|
||||
uint32_t reo_ring_num)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc,
|
||||
uint32_t reo_ring_num)
|
||||
{
|
||||
}
|
||||
|
||||
static inline qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc,
|
||||
qdf_nbuf_t nbuf)
|
||||
{
|
||||
return nbuf;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS dp_ipa_tx_buf_smmu_mapping(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t pdev_id,
|
||||
const char *func,
|
||||
uint32_t line)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t pdev_id,
|
||||
const char *func,
|
||||
uint32_t line)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS dp_ipa_rx_buf_pool_smmu_mapping(
|
||||
struct cdp_soc_t *soc_hdl,
|
||||
uint8_t pdev_id,
|
||||
bool create,
|
||||
const char *func,
|
||||
uint32_t line)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS dp_ipa_set_smmu_mapped(struct cdp_soc_t *soc, int val)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline int dp_ipa_get_smmu_mapped(struct cdp_soc_t *soc)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
#ifdef IPA_WDS_EASYMESH_FEATURE
|
||||
static inline QDF_STATUS dp_ipa_ast_create(struct cdp_soc_t *soc_hdl,
|
||||
qdf_ipa_ast_info_type_t *data)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
static inline void dp_ipa_get_wdi_version(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t *wdi_ver)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool
|
||||
dp_ipa_is_ring_ipa_tx(struct dp_soc *soc, uint8_t ring_id)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
#endif /* _DP_IPA_H_ */
|
14913
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c
Normal file
14913
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c
Normal file
File diff suppressed because it is too large
Load Diff
4264
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c
Normal file
4264
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c
Normal file
File diff suppressed because it is too large
Load Diff
2616
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h
Normal file
2616
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h
Normal file
File diff suppressed because it is too large
Load Diff
204
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_reo.c
Normal file
204
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_reo.c
Normal file
@ -0,0 +1,204 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "dp_types.h"
|
||||
#include "hal_reo.h"
|
||||
#include "dp_internal.h"
|
||||
#include <qdf_time.h>
|
||||
|
||||
#define dp_reo_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_REO, params)
|
||||
#define dp_reo_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_REO, params)
|
||||
#define dp_reo_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_REO, params)
|
||||
#define dp_reo_info(params...) \
|
||||
__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_REO, ## params)
|
||||
#define dp_reo_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_REO, params)
|
||||
|
||||
#ifdef WLAN_FEATURE_DP_EVENT_HISTORY
|
||||
/**
|
||||
* dp_reo_cmd_srng_event_record() - Record reo cmds posted
|
||||
* to the reo cmd ring
|
||||
* @soc: dp soc handle
|
||||
* @type: reo cmd type
|
||||
* @post_status: command error status
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static
|
||||
void dp_reo_cmd_srng_event_record(struct dp_soc *soc,
|
||||
enum hal_reo_cmd_type type,
|
||||
int post_status)
|
||||
{
|
||||
struct reo_cmd_event_history *cmd_event_history =
|
||||
&soc->stats.cmd_event_history;
|
||||
struct reo_cmd_event_record *record = cmd_event_history->cmd_record;
|
||||
int record_index;
|
||||
|
||||
record_index = (qdf_atomic_inc_return(&cmd_event_history->index)) &
|
||||
(REO_CMD_EVENT_HIST_MAX - 1);
|
||||
|
||||
record[record_index].cmd_type = type;
|
||||
record[record_index].cmd_return_status = post_status;
|
||||
record[record_index].timestamp = qdf_get_log_timestamp();
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
void dp_reo_cmd_srng_event_record(struct dp_soc *soc,
|
||||
enum hal_reo_cmd_type type,
|
||||
int post_status)
|
||||
{
|
||||
}
|
||||
#endif /*WLAN_FEATURE_DP_EVENT_HISTORY */
|
||||
|
||||
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||
void dp_pause_reo_send_cmd(struct dp_soc *soc)
|
||||
{
|
||||
hal_unregister_reo_send_cmd(soc->hal_soc);
|
||||
}
|
||||
|
||||
void dp_resume_reo_send_cmd(struct dp_soc *soc)
|
||||
{
|
||||
hal_register_reo_send_cmd(soc->hal_soc);
|
||||
}
|
||||
|
||||
void
|
||||
dp_reset_rx_reo_tid_queue(struct dp_soc *soc, void *hw_qdesc_vaddr,
|
||||
uint32_t size)
|
||||
{
|
||||
hal_reset_rx_reo_tid_queue(soc->hal_soc, hw_qdesc_vaddr, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type,
|
||||
struct hal_reo_cmd_params *params,
|
||||
void (*callback_fn), void *data)
|
||||
{
|
||||
struct dp_reo_cmd_info *reo_cmd;
|
||||
int num;
|
||||
|
||||
num = hal_reo_send_cmd(soc->hal_soc, soc->reo_cmd_ring.hal_srng, type,
|
||||
params);
|
||||
if (num < 0)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
dp_reo_cmd_srng_event_record(soc, type, num);
|
||||
|
||||
if (num < 0) {
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (callback_fn) {
|
||||
reo_cmd = qdf_mem_malloc(sizeof(*reo_cmd));
|
||||
if (!reo_cmd) {
|
||||
dp_err_log("alloc failed for REO cmd:%d!!",
|
||||
type);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
reo_cmd->cmd = num;
|
||||
reo_cmd->cmd_type = type;
|
||||
reo_cmd->handler = callback_fn;
|
||||
reo_cmd->data = data;
|
||||
qdf_spin_lock_bh(&soc->rx.reo_cmd_lock);
|
||||
TAILQ_INSERT_TAIL(&soc->rx.reo_cmd_list, reo_cmd,
|
||||
reo_cmd_list_elem);
|
||||
qdf_spin_unlock_bh(&soc->rx.reo_cmd_lock);
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx, struct dp_soc *soc)
|
||||
{
|
||||
hal_ring_desc_t reo_desc;
|
||||
struct dp_reo_cmd_info *reo_cmd = NULL;
|
||||
union hal_reo_status reo_status;
|
||||
int num;
|
||||
int processed_count = 0;
|
||||
|
||||
if (dp_srng_access_start(int_ctx, soc, soc->reo_status_ring.hal_srng)) {
|
||||
return processed_count;
|
||||
}
|
||||
reo_desc = hal_srng_dst_get_next(soc->hal_soc,
|
||||
soc->reo_status_ring.hal_srng);
|
||||
|
||||
while (reo_desc) {
|
||||
uint16_t tlv = HAL_GET_TLV(reo_desc);
|
||||
QDF_STATUS status;
|
||||
|
||||
processed_count++;
|
||||
|
||||
status = hal_reo_status_update(soc->hal_soc,
|
||||
reo_desc,
|
||||
&reo_status, tlv, &num);
|
||||
if (status != QDF_STATUS_SUCCESS)
|
||||
goto next;
|
||||
|
||||
qdf_spin_lock_bh(&soc->rx.reo_cmd_lock);
|
||||
TAILQ_FOREACH(reo_cmd, &soc->rx.reo_cmd_list,
|
||||
reo_cmd_list_elem) {
|
||||
if (reo_cmd->cmd == num) {
|
||||
TAILQ_REMOVE(&soc->rx.reo_cmd_list, reo_cmd,
|
||||
reo_cmd_list_elem);
|
||||
break;
|
||||
}
|
||||
}
|
||||
qdf_spin_unlock_bh(&soc->rx.reo_cmd_lock);
|
||||
|
||||
if (reo_cmd) {
|
||||
reo_cmd->handler(soc, reo_cmd->data,
|
||||
&reo_status);
|
||||
qdf_mem_free(reo_cmd);
|
||||
}
|
||||
|
||||
next:
|
||||
reo_desc = hal_srng_dst_get_next(soc,
|
||||
soc->reo_status_ring.hal_srng);
|
||||
} /* while */
|
||||
|
||||
dp_srng_access_end(int_ctx, soc, soc->reo_status_ring.hal_srng);
|
||||
return processed_count;
|
||||
}
|
||||
|
||||
void dp_reo_cmdlist_destroy(struct dp_soc *soc)
|
||||
{
|
||||
struct dp_reo_cmd_info *reo_cmd = NULL;
|
||||
struct dp_reo_cmd_info *tmp_cmd = NULL;
|
||||
union hal_reo_status reo_status;
|
||||
|
||||
reo_status.queue_status.header.status =
|
||||
HAL_REO_CMD_DRAIN;
|
||||
|
||||
qdf_spin_lock_bh(&soc->rx.reo_cmd_lock);
|
||||
TAILQ_FOREACH_SAFE(reo_cmd, &soc->rx.reo_cmd_list,
|
||||
reo_cmd_list_elem, tmp_cmd) {
|
||||
TAILQ_REMOVE(&soc->rx.reo_cmd_list, reo_cmd,
|
||||
reo_cmd_list_elem);
|
||||
reo_cmd->handler(soc, reo_cmd->data, &reo_status);
|
||||
qdf_mem_free(reo_cmd);
|
||||
}
|
||||
qdf_spin_unlock_bh(&soc->rx.reo_cmd_lock);
|
||||
}
|
||||
|
||||
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||
void dp_cleanup_reo_cmd_module(struct dp_soc *soc)
|
||||
{
|
||||
dp_reo_cmdlist_destroy(soc);
|
||||
dp_reo_desc_freelist_destroy(soc);
|
||||
}
|
||||
#endif
|
888
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rings.h
Normal file
888
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rings.h
Normal file
@ -0,0 +1,888 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DP_RINGS_H_
|
||||
#define _DP_RINGS_H_
|
||||
|
||||
#include <dp_types.h>
|
||||
#include <dp_internal.h>
|
||||
#ifdef WIFI_MONITOR_SUPPORT
|
||||
#include <dp_mon.h>
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_FEATURE_DP_EVENT_HISTORY
|
||||
static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
|
||||
struct dp_intr *intr_ctx)
|
||||
{
|
||||
if (intr_ctx->rx_mon_ring_mask)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
|
||||
struct dp_intr *intr_ctx)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef QCA_HOST_MODE_WIFI_DISABLED
|
||||
|
||||
/**
|
||||
* dp_srng_get_cpu() - Get the smp processor id for srng processing
|
||||
*
|
||||
* Return: smp processor id
|
||||
*/
|
||||
static inline int dp_srng_get_cpu(void)
|
||||
{
|
||||
return qdf_get_cpu();
|
||||
}
|
||||
|
||||
#else /* QCA_HOST_MODE_WIFI_DISABLED */
|
||||
|
||||
/**
|
||||
* dp_srng_get_cpu() - Get the smp processor id for srng processing
|
||||
*
|
||||
* Return: smp processor id
|
||||
*/
|
||||
static inline int dp_srng_get_cpu(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* QCA_HOST_MODE_WIFI_DISABLED */
|
||||
|
||||
/**
|
||||
* dp_interrupt_timer() - timer poll for interrupts
|
||||
* @arg: SoC Handle
|
||||
*
|
||||
* Return:
|
||||
*
|
||||
*/
|
||||
void dp_interrupt_timer(void *arg);
|
||||
|
||||
/**
|
||||
* dp_soc_print_inactive_objects() - prints inactive peer and vdev list
|
||||
* @soc: DP SOC handle
|
||||
*
|
||||
*/
|
||||
void dp_soc_print_inactive_objects(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_get_tx_pending() - read pending tx
|
||||
* @pdev_handle: Datapath PDEV handle
|
||||
*
|
||||
* Return: outstanding tx
|
||||
*/
|
||||
int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle);
|
||||
|
||||
/**
|
||||
* dp_find_missing_tx_comp() - check for leaked descriptor in tx path
|
||||
* @soc: DP SOC context
|
||||
*
|
||||
* Parse through descriptors in all pools and validate magic number and
|
||||
* completion time. Trigger self recovery if magic value is corrupted.
|
||||
*
|
||||
* Return: None.
|
||||
*/
|
||||
void dp_find_missing_tx_comp(struct dp_soc *soc);
|
||||
|
||||
void dp_enable_verbose_debug(struct dp_soc *soc);
|
||||
|
||||
QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer);
|
||||
|
||||
uint32_t dp_service_srngs_wrapper(void *dp_ctx, uint32_t dp_budget, int cpu);
|
||||
|
||||
void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
|
||||
int *irq_id_map, int *num_irq);
|
||||
void dp_srng_msi_setup(struct dp_soc *soc, struct dp_srng *srng,
|
||||
struct hal_srng_params *ring_params,
|
||||
int ring_type, int ring_num);
|
||||
void
|
||||
dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
|
||||
struct hal_srng_params *ring_params,
|
||||
int ring_type, int ring_num,
|
||||
int num_entries);
|
||||
|
||||
int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget);
|
||||
|
||||
/**
|
||||
* dp_service_lmac_rings()- timer to reap lmac rings
|
||||
* @arg: SoC Handle
|
||||
*
|
||||
* Return:
|
||||
*
|
||||
*/
|
||||
void dp_service_lmac_rings(void *arg);
|
||||
|
||||
/**
|
||||
* dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
|
||||
* @dp_ctx: DP SOC handle
|
||||
* @dp_budget: Number of frames/descriptors that can be processed in one shot
|
||||
* @cpu: CPU on which this instance is running
|
||||
*
|
||||
* Return: remaining budget/quota for the soc device
|
||||
*/
|
||||
uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu);
|
||||
|
||||
/**
|
||||
* dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
|
||||
* @soc: DP soc handle
|
||||
*
|
||||
* Set the appropriate interrupt mode flag in the soc
|
||||
*/
|
||||
void dp_soc_set_interrupt_mode(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
|
||||
* @ring_num: ring num of the ring being queried
|
||||
* @grp_mask: the grp_mask array for the ring type in question.
|
||||
*
|
||||
* The grp_mask array is indexed by group number and the bit fields correspond
|
||||
* to ring numbers. We are finding which interrupt group a ring belongs to.
|
||||
*
|
||||
* Return: the index in the grp_mask array with the ring number.
|
||||
* -QDF_STATUS_E_NOENT if no entry is found
|
||||
*/
|
||||
static inline int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
|
||||
{
|
||||
int ext_group_num;
|
||||
uint8_t mask = 1 << ring_num;
|
||||
|
||||
for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
|
||||
ext_group_num++) {
|
||||
if (mask & grp_mask[ext_group_num])
|
||||
return ext_group_num;
|
||||
}
|
||||
|
||||
return -QDF_STATUS_E_NOENT;
|
||||
}
|
||||
|
||||
/* MCL specific functions */
|
||||
#if defined(DP_CON_MON)
|
||||
|
||||
#ifdef DP_CON_MON_MSI_ENABLED
|
||||
/**
|
||||
* dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
|
||||
* @soc: pointer to dp_soc handle
|
||||
* @intr_ctx_num: interrupt context number for which mon mask is needed
|
||||
*
|
||||
* For MCL, monitor mode rings are being processed in timer contexts (polled).
|
||||
* This function is returning 0, since in interrupt mode(softirq based RX),
|
||||
* we donot want to process monitor mode rings in a softirq.
|
||||
*
|
||||
* So, in case packet log is enabled for SAP/STA/P2P modes,
|
||||
* regular interrupt processing will not process monitor mode rings. It would be
|
||||
* done in a separate timer context.
|
||||
*
|
||||
* Return: 0
|
||||
*/
|
||||
static inline uint32_t
|
||||
dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
|
||||
{
|
||||
return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
|
||||
}
|
||||
#else
|
||||
/**
|
||||
* dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
|
||||
* @soc: pointer to dp_soc handle
|
||||
* @intr_ctx_num: interrupt context number for which mon mask is needed
|
||||
*
|
||||
* For MCL, monitor mode rings are being processed in timer contexts (polled).
|
||||
* This function is returning 0, since in interrupt mode(softirq based RX),
|
||||
* we donot want to process monitor mode rings in a softirq.
|
||||
*
|
||||
* So, in case packet log is enabled for SAP/STA/P2P modes,
|
||||
* regular interrupt processing will not process monitor mode rings. It would be
|
||||
* done in a separate timer context.
|
||||
*
|
||||
* Return: 0
|
||||
*/
|
||||
static inline uint32_t
|
||||
dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
/**
|
||||
* dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
|
||||
* @soc: pointer to dp_soc handle
|
||||
* @intr_ctx_num: interrupt context number for which mon mask is needed
|
||||
*
|
||||
* Return: mon mask value
|
||||
*/
|
||||
static inline
|
||||
uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc,
|
||||
int intr_ctx_num)
|
||||
{
|
||||
return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef DISABLE_MON_RING_MSI_CFG
|
||||
/**
|
||||
* dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
|
||||
* @soc: DP SoC context
|
||||
* @ring_type: sring type
|
||||
*
|
||||
* Return: True if msi cfg should be skipped for srng type else false
|
||||
*/
|
||||
static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
|
||||
{
|
||||
if (ring_type == RXDMA_MONITOR_STATUS)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
#ifdef DP_CON_MON_MSI_ENABLED
|
||||
#ifdef WLAN_SOFTUMAC_SUPPORT
|
||||
static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
|
||||
{
|
||||
if (soc->cdp_soc.ol_ops->get_con_mode &&
|
||||
soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
|
||||
if (ring_type != RXDMA_MONITOR_STATUS)
|
||||
return true;
|
||||
} else if (ring_type == RXDMA_MONITOR_STATUS &&
|
||||
!dp_mon_mode_local_pkt_capture(soc)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
|
||||
{
|
||||
if (soc->cdp_soc.ol_ops->get_con_mode &&
|
||||
soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
|
||||
if (ring_type == REO_DST || ring_type == RXDMA_DST)
|
||||
return true;
|
||||
} else if (ring_type == RXDMA_MONITOR_STATUS &&
|
||||
!dp_mon_mode_local_pkt_capture(soc)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* DP_CON_MON_MSI_ENABLED */
|
||||
#endif /* DISABLE_MON_RING_MSI_CFG */
|
||||
|
||||
/**
|
||||
* dp_is_msi_group_number_invalid() - check msi_group_number valid or not
|
||||
* @soc: dp_soc
|
||||
* @msi_group_number: MSI group number.
|
||||
* @msi_data_count: MSI data count.
|
||||
*
|
||||
* Return: true if msi_group_number is invalid.
|
||||
*/
|
||||
static inline bool dp_is_msi_group_number_invalid(struct dp_soc *soc,
|
||||
int msi_group_number,
|
||||
int msi_data_count)
|
||||
{
|
||||
if (soc && soc->osdev && soc->osdev->dev &&
|
||||
pld_is_one_msi(soc->osdev->dev))
|
||||
return false;
|
||||
|
||||
return msi_group_number > msi_data_count;
|
||||
}
|
||||
|
||||
#ifndef WLAN_SOFTUMAC_SUPPORT
|
||||
/**
|
||||
* dp_soc_attach_poll() - Register handlers for DP interrupts
|
||||
* @txrx_soc: DP SOC handle
|
||||
*
|
||||
* Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
|
||||
* contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
|
||||
* rx_monitor_ring mask to indicate the rings that are processed by the handler.
|
||||
*
|
||||
* Return: 0 for success, nonzero for failure.
|
||||
*/
|
||||
QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc);
|
||||
|
||||
/**
|
||||
* dp_soc_interrupt_attach() - Register handlers for DP interrupts
|
||||
* @txrx_soc: DP SOC handle
|
||||
*
|
||||
* Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
|
||||
* contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
|
||||
* rx_monitor_ring mask to indicate the rings that are processed by the handler.
|
||||
*
|
||||
* Return: 0 for success. nonzero for failure.
|
||||
*/
|
||||
QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
|
||||
|
||||
/**
|
||||
* dp_hw_link_desc_ring_free() - Free h/w link desc rings
|
||||
* @soc: DP SOC handle
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_hw_link_desc_ring_free(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
|
||||
* @soc: DP SOC handle
|
||||
*
|
||||
* Allocate memory for WBM_IDLE_LINK srng ring if the number of
|
||||
* link descriptors is less then the max_allocated size. else
|
||||
* allocate memory for wbm_idle_scatter_buffer.
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS: success
|
||||
* QDF_STATUS_E_NO_MEM: No memory (Failure)
|
||||
*/
|
||||
QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_hw_link_desc_ring_init() - Initialize hw link desc rings
|
||||
* @soc: DP SOC handle
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS: success
|
||||
* QDF_STATUS_E_FAILURE: failure
|
||||
*/
|
||||
QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
|
||||
* @soc: DP SOC handle
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_hw_link_desc_ring_deinit(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_ipa_hal_tx_init_alt_data_ring() - IPA hal init data rings
|
||||
* @soc: DP SOC handle
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc);
|
||||
|
||||
/*
|
||||
* dp_soc_reset_ring_map() - Reset cpu ring map
|
||||
* @soc: Datapath soc handler
|
||||
*
|
||||
* This api resets the default cpu ring map
|
||||
*/
|
||||
void dp_soc_reset_cpu_ring_map(struct dp_soc *soc);
|
||||
|
||||
/*
|
||||
* dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
|
||||
* ring for vlan tagged traffic
|
||||
* @dp_soc - DP Soc handle
|
||||
*
|
||||
* Return: Return void
|
||||
*/
|
||||
void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc);
|
||||
|
||||
/*
|
||||
* dp_soc_reset_intr_mask() - reset interrupt mask
|
||||
* @dp_soc - DP Soc handle
|
||||
*
|
||||
* Return: Return void
|
||||
*/
|
||||
void dp_soc_reset_intr_mask(struct dp_soc *soc);
|
||||
|
||||
/*
|
||||
* dp_reo_frag_dst_set() - configure reo register to set the
|
||||
* fragment destination ring
|
||||
* @soc : Datapath soc
|
||||
* @frag_dst_ring : output parameter to set fragment destination ring
|
||||
*
|
||||
* Based on offload_radio below fragment destination rings is selected
|
||||
* 0 - TCL
|
||||
* 1 - SW1
|
||||
* 2 - SW2
|
||||
* 3 - SW3
|
||||
* 4 - SW4
|
||||
* 5 - Release
|
||||
* 6 - FW
|
||||
* 7 - alternate select
|
||||
*
|
||||
* return: void
|
||||
*/
|
||||
void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring);
|
||||
|
||||
/**
|
||||
* dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
|
||||
* @pdev: DP_PDEV handle
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void
|
||||
dp_dscp_tid_map_setup(struct dp_pdev *pdev);
|
||||
|
||||
/**
|
||||
* dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
|
||||
* @pdev: DP_PDEV handle
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void
|
||||
dp_pcp_tid_map_setup(struct dp_pdev *pdev);
|
||||
|
||||
/**
|
||||
* dp_soc_deinit() - Deinitialize txrx SOC
|
||||
* @txrx_soc: Opaque DP SOC handle
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_soc_deinit(void *txrx_soc);
|
||||
|
||||
#ifdef QCA_HOST2FW_RXBUF_RING
|
||||
void
|
||||
dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
|
||||
int lmac_id);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* dp_peer_setup_wifi3() - initialize the peer
|
||||
* @soc_hdl: soc handle object
|
||||
* @vdev_id : vdev_id of vdev object
|
||||
* @peer_mac: Peer's mac address
|
||||
* @peer_setup_info: peer setup info for MLO
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
uint8_t *peer_mac,
|
||||
struct cdp_peer_setup_info *setup_info);
|
||||
|
||||
uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl);
|
||||
|
||||
/*
|
||||
* dp_set_ba_aging_timeout() - set ba aging timeout per AC
|
||||
* @txrx_soc: cdp soc handle
|
||||
* @ac: Access category
|
||||
* @value: timeout value in millisec
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
|
||||
uint8_t ac, uint32_t value);
|
||||
|
||||
/*
|
||||
* dp_get_ba_aging_timeout() - get ba aging timeout per AC
|
||||
* @txrx_soc: cdp soc handle
|
||||
* @ac: access category
|
||||
* @value: timeout value in millisec
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
|
||||
uint8_t ac, uint32_t *value);
|
||||
|
||||
/*
|
||||
* dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
|
||||
* @txrx_soc: cdp soc handle
|
||||
* @pdev_id: id of physical device object
|
||||
* @val: reo destination ring index (1 - 4)
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
|
||||
enum cdp_host_reo_dest_ring val);
|
||||
|
||||
/*
|
||||
* dp_get_pdev_reo_dest() - get the reo destination for this pdev
|
||||
* @txrx_soc: cdp soc handle
|
||||
* @pdev_id: id of physical device object
|
||||
*
|
||||
* Return: reo destination ring index
|
||||
*/
|
||||
enum cdp_host_reo_dest_ring
|
||||
dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id);
|
||||
|
||||
/**
|
||||
* dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
|
||||
* @psoc: dp soc handle
|
||||
* @pdev_id: id of DP_PDEV handle
|
||||
* @pcp: pcp value
|
||||
* @tid: tid value passed by the user
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS on success
|
||||
*/
|
||||
QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
|
||||
uint8_t pdev_id,
|
||||
uint8_t pcp, uint8_t tid);
|
||||
|
||||
/**
|
||||
* dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
|
||||
* @soc_hdl: DP soc handle
|
||||
* @vdev_id: id of DP_VDEV handle
|
||||
* @pcp: pcp value
|
||||
* @tid: tid value passed by the user
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS on success
|
||||
*/
|
||||
QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t vdev_id,
|
||||
uint8_t pcp, uint8_t tid);
|
||||
|
||||
/* *
|
||||
* dp_dump_wbm_idle_hptp() -dump wbm idle ring, hw hp tp info.
|
||||
* @soc: dp soc.
|
||||
* @pdev: dp pdev.
|
||||
*
|
||||
* Return: None.
|
||||
*/
|
||||
void
|
||||
dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev);
|
||||
|
||||
/**
|
||||
* dp_display_srng_info() - Dump the srng HP TP info
|
||||
* @soc_hdl: CDP Soc handle
|
||||
*
|
||||
* This function dumps the SW hp/tp values for the important rings.
|
||||
* HW hp/tp values are not being dumped, since it can lead to
|
||||
* READ NOC error when UMAC is in low power state. MCC does not have
|
||||
* device force wake working yet.
|
||||
*
|
||||
* Return: rings are empty
|
||||
*/
|
||||
bool dp_display_srng_info(struct cdp_soc_t *soc_hdl);
|
||||
|
||||
#if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
|
||||
QDF_STATUS dp_drain_txrx(struct cdp_soc_t *soc_handle, uint8_t rx_only);
|
||||
|
||||
/*
|
||||
* dp_update_ring_hptp() - update dp rings hptp
|
||||
* @soc: dp soc handler
|
||||
* @force_flush_tx: force flush the Tx ring hp
|
||||
*/
|
||||
void dp_update_ring_hptp(struct dp_soc *soc, bool force_flush_tx);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* dp_flush_tcl_ring() - flush TCL ring hp
|
||||
* @pdev: dp pdev
|
||||
* @ring_id: TCL ring id
|
||||
*
|
||||
* Return: 0 on success and error code on failure
|
||||
*/
|
||||
int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id);
|
||||
|
||||
#ifdef WLAN_FEATURE_STATS_EXT
|
||||
/**
|
||||
* dp_request_rx_hw_stats - request rx hardware stats
|
||||
* @soc_hdl: soc handle
|
||||
* @vdev_id: vdev id
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dp_reset_rx_hw_ext_stats - Reset rx hardware ext stats
|
||||
* @soc_hdl: soc handle
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl);
|
||||
|
||||
void dp_pdev_set_default_reo(struct dp_pdev *pdev);
|
||||
|
||||
/**
|
||||
* dp_soc_init() - Initialize txrx SOC
|
||||
* @soc: Opaque DP SOC handle
|
||||
* @htc_handle: Opaque HTC handle
|
||||
* @hif_handle: Opaque HIF handle
|
||||
*
|
||||
* Return: DP SOC handle on success, NULL on failure
|
||||
*/
|
||||
void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
|
||||
struct hif_opaque_softc *hif_handle);
|
||||
|
||||
void dp_tx_init_cmd_credit_ring(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_soc_srng_deinit() - de-initialize soc srng rings
|
||||
* @soc: Datapath soc handle
|
||||
*
|
||||
*/
|
||||
void dp_soc_srng_deinit(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_soc_srng_init() - Initialize soc level srng rings
|
||||
* @soc: Datapath soc handle
|
||||
*
|
||||
* return: QDF_STATUS_SUCCESS on success
|
||||
* QDF_STATUS_E_FAILURE on failure
|
||||
*/
|
||||
QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_soc_srng_free() - free soc level srng rings
|
||||
* @soc: Datapath soc handle
|
||||
*
|
||||
*/
|
||||
void dp_soc_srng_free(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_soc_srng_alloc() - Allocate memory for soc level srng rings
|
||||
* @soc: Datapath soc handle
|
||||
*
|
||||
* return: QDF_STATUS_SUCCESS on success
|
||||
* QDF_STATUS_E_NOMEM on failure
|
||||
*/
|
||||
QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_soc_cfg_attach() - set target specific configuration in
|
||||
* dp soc cfg.
|
||||
* @soc: dp soc handle
|
||||
*/
|
||||
void dp_soc_cfg_attach(struct dp_soc *soc);
|
||||
|
||||
#else /* WLAN_SOFTUMAC_SUPPORT */
|
||||
static inline void dp_hw_link_desc_ring_free(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dp_soc_reset_intr_mask(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
|
||||
* @pdev: DP_PDEV handle
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static inline void
|
||||
dp_dscp_tid_map_setup(struct dp_pdev *pdev)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
|
||||
* @pdev: DP_PDEV handle
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static inline void
|
||||
dp_pcp_tid_map_setup(struct dp_pdev *pdev)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef QCA_HOST2FW_RXBUF_RING
|
||||
static inline void
|
||||
dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
|
||||
int lmac_id)
|
||||
{
|
||||
if ((soc->cdp_soc.ol_ops->get_con_mode &&
|
||||
soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) &&
|
||||
soc->rxdma_err_dst_ring[lmac_id].hal_srng)
|
||||
htt_srng_setup(soc->htt_handle, mac_id,
|
||||
soc->rxdma_err_dst_ring[lmac_id].hal_srng,
|
||||
RXDMA_DST);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* *
|
||||
* dp_dump_wbm_idle_hptp() -dump wbm idle ring, hw hp tp info.
|
||||
* @soc: dp soc.
|
||||
* @pdev: dp pdev.
|
||||
*
|
||||
* Return: None.
|
||||
*/
|
||||
static inline void
|
||||
dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dp_pdev_set_default_reo(struct dp_pdev *pdev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_soc_srng_deinit() - de-initialize soc srng rings
|
||||
* @soc: Datapath soc handle
|
||||
*
|
||||
*/
|
||||
static inline void dp_soc_srng_deinit(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_soc_srng_init() - Initialize soc level srng rings
|
||||
* @soc: Datapath soc handle
|
||||
*
|
||||
* return: QDF_STATUS_SUCCESS on success
|
||||
* QDF_STATUS_E_FAILURE on failure
|
||||
*/
|
||||
static inline QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
|
||||
{
|
||||
dp_enable_verbose_debug(soc);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_soc_srng_free() - free soc level srng rings
|
||||
* @soc: Datapath soc handle
|
||||
*
|
||||
*/
|
||||
static inline void dp_soc_srng_free(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_soc_srng_alloc() - Allocate memory for soc level srng rings
|
||||
* @soc: Datapath soc handle
|
||||
*
|
||||
* return: QDF_STATUS_SUCCESS on success
|
||||
* QDF_STATUS_E_NOMEM on failure
|
||||
*/
|
||||
static inline QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_display_srng_info() - Dump the ring Read/Write idx info
|
||||
* @soc_hdl: CDP Soc handle
|
||||
*
|
||||
* This function dumps the SW Read/Write idx for the important rings.
|
||||
*
|
||||
* Return: rings are empty
|
||||
*/
|
||||
static inline bool dp_display_srng_info(struct cdp_soc_t *soc_hdl)
|
||||
{
|
||||
/*TODO add support display SOFTUMAC data rings info*/
|
||||
return true;
|
||||
}
|
||||
|
||||
#if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
|
||||
static inline QDF_STATUS dp_drain_txrx(struct cdp_soc_t *soc_handle,
|
||||
uint8_t rx_only)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
#endif /* WLAN_SOFTUMAC_SUPPORT */
|
||||
|
||||
#if defined(WLAN_FEATURE_NEAR_FULL_IRQ) && !defined(WLAN_SOFTUMAC_SUPPORT)
|
||||
void dp_srng_msi2_setup(struct dp_soc *soc,
|
||||
struct hal_srng_params *ring_params,
|
||||
int ring_type, int ring_num, int nf_msi_grp_num);
|
||||
void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
|
||||
struct hal_srng_params *ring_params,
|
||||
qdf_dma_addr_t msi2_addr,
|
||||
uint32_t msi2_data);
|
||||
uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
|
||||
enum hal_ring_type ring_type,
|
||||
int ring_num);
|
||||
void
|
||||
dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
|
||||
struct hal_srng_params *ring_params,
|
||||
int ring_type);
|
||||
#else
|
||||
static inline void
|
||||
dp_srng_msi2_setup(struct dp_soc *soc,
|
||||
struct hal_srng_params *ring_params,
|
||||
int ring_type, int ring_num, int nf_msi_grp_num)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_srng_set_msi2_ring_params(struct dp_soc *soc,
|
||||
struct hal_srng_params *ring_params,
|
||||
qdf_dma_addr_t msi2_addr,
|
||||
uint32_t msi2_data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
|
||||
enum hal_ring_type ring_type,
|
||||
int ring_num)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
|
||||
struct hal_srng_params *ring_params,
|
||||
int ring_type)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_SUPPORT_DPDK
|
||||
/*
|
||||
* dp_soc_reset_dpdk_intr_mask() - reset interrupt mask
|
||||
* @dp_soc - DP Soc handle
|
||||
*
|
||||
* Return: Return void
|
||||
*/
|
||||
void dp_soc_reset_dpdk_intr_mask(struct dp_soc *soc);
|
||||
#else
|
||||
static inline void dp_soc_reset_dpdk_intr_mask(struct dp_soc *soc)
|
||||
{ }
|
||||
#endif
|
||||
#endif /* _DP_RINGS_H_ */
|
4468
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rings_main.c
Normal file
4468
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rings_main.c
Normal file
File diff suppressed because it is too large
Load Diff
3543
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c
Normal file
3543
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c
Normal file
File diff suppressed because it is too large
Load Diff
3601
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h
Normal file
3601
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,429 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "dp_rx_buffer_pool.h"
|
||||
#include "dp_ipa.h"
|
||||
|
||||
#ifndef DP_RX_BUFFER_POOL_SIZE
|
||||
#define DP_RX_BUFFER_POOL_SIZE 128
|
||||
#endif
|
||||
|
||||
#ifndef DP_RX_BUFF_POOL_ALLOC_THRES
|
||||
#define DP_RX_BUFF_POOL_ALLOC_THRES 1
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
|
||||
bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
|
||||
{
|
||||
struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
|
||||
struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
|
||||
struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
|
||||
qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
|
||||
bool consumed = false;
|
||||
|
||||
if (!bufpool->is_initialized || !pdev)
|
||||
return consumed;
|
||||
|
||||
/* process only buffers of RXDMA ring */
|
||||
if (soc->wlan_cfg_ctx->rxdma1_enable)
|
||||
return consumed;
|
||||
|
||||
first_nbuf = nbuf;
|
||||
|
||||
while (nbuf) {
|
||||
next_nbuf = qdf_nbuf_next(nbuf);
|
||||
|
||||
if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
|
||||
DP_RX_BUFFER_POOL_SIZE))
|
||||
break;
|
||||
|
||||
refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
|
||||
RX_BUFFER_RESERVATION,
|
||||
rx_desc_pool->buf_alignment,
|
||||
FALSE);
|
||||
|
||||
/* Failed to allocate new nbuf, reset and place it back
|
||||
* in to the pool.
|
||||
*/
|
||||
if (!refill_nbuf) {
|
||||
DP_STATS_INC(pdev,
|
||||
rx_buffer_pool.num_bufs_consumed, 1);
|
||||
consumed = true;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Successful allocation!! */
|
||||
DP_STATS_INC(pdev,
|
||||
rx_buffer_pool.num_bufs_alloc_success, 1);
|
||||
qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
|
||||
refill_nbuf);
|
||||
nbuf = next_nbuf;
|
||||
}
|
||||
|
||||
nbuf = first_nbuf;
|
||||
if (consumed) {
|
||||
/* Free the MSDU/scattered MSDU */
|
||||
while (nbuf) {
|
||||
next_nbuf = qdf_nbuf_next(nbuf);
|
||||
dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
|
||||
nbuf = next_nbuf;
|
||||
}
|
||||
}
|
||||
|
||||
return consumed;
|
||||
}
|
||||
|
||||
void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
|
||||
{
|
||||
struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
|
||||
struct rx_desc_pool *rx_desc_pool;
|
||||
struct rx_buff_pool *buff_pool;
|
||||
|
||||
if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
|
||||
mac_id = dp_pdev->lmac_id;
|
||||
|
||||
rx_desc_pool = &soc->rx_desc_buf[mac_id];
|
||||
buff_pool = &soc->rx_buff_pool[mac_id];
|
||||
|
||||
if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
|
||||
DP_RX_BUFFER_POOL_SIZE) ||
|
||||
!buff_pool->is_initialized)
|
||||
return qdf_nbuf_free(nbuf);
|
||||
|
||||
qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
|
||||
rx_desc_pool->buf_alignment);
|
||||
qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
|
||||
}
|
||||
|
||||
void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
|
||||
{
|
||||
struct rx_desc_pool *rx_desc_pool;
|
||||
struct rx_refill_buff_pool *buff_pool;
|
||||
qdf_device_t dev;
|
||||
qdf_nbuf_t nbuf;
|
||||
QDF_STATUS ret;
|
||||
int count, i;
|
||||
uint16_t num_refill;
|
||||
uint16_t total_num_refill;
|
||||
uint16_t total_count = 0;
|
||||
uint16_t head, tail;
|
||||
|
||||
if (!soc)
|
||||
return;
|
||||
|
||||
dev = soc->osdev;
|
||||
buff_pool = &soc->rx_refill_buff_pool;
|
||||
rx_desc_pool = &soc->rx_desc_buf[0];
|
||||
if (!buff_pool->is_initialized)
|
||||
return;
|
||||
|
||||
head = buff_pool->head;
|
||||
tail = buff_pool->tail;
|
||||
if (tail > head)
|
||||
total_num_refill = (tail - head - 1);
|
||||
else
|
||||
total_num_refill = (buff_pool->max_bufq_len - head +
|
||||
tail - 1);
|
||||
|
||||
while (total_num_refill) {
|
||||
if (total_num_refill > DP_RX_REFILL_BUFF_POOL_BURST)
|
||||
num_refill = DP_RX_REFILL_BUFF_POOL_BURST;
|
||||
else
|
||||
num_refill = total_num_refill;
|
||||
|
||||
count = 0;
|
||||
for (i = 0; i < num_refill; i++) {
|
||||
nbuf = qdf_nbuf_alloc(dev, rx_desc_pool->buf_size,
|
||||
RX_BUFFER_RESERVATION,
|
||||
rx_desc_pool->buf_alignment,
|
||||
FALSE);
|
||||
if (qdf_unlikely(!nbuf))
|
||||
continue;
|
||||
|
||||
ret = qdf_nbuf_map_nbytes_single(dev, nbuf,
|
||||
QDF_DMA_FROM_DEVICE,
|
||||
rx_desc_pool->buf_size);
|
||||
if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
|
||||
qdf_nbuf_free(nbuf);
|
||||
continue;
|
||||
}
|
||||
|
||||
dp_audio_smmu_map(dev,
|
||||
qdf_mem_paddr_from_dmaaddr(dev,
|
||||
QDF_NBUF_CB_PADDR(nbuf)),
|
||||
QDF_NBUF_CB_PADDR(nbuf),
|
||||
rx_desc_pool->buf_size);
|
||||
|
||||
buff_pool->buf_elem[head++] = nbuf;
|
||||
head &= (buff_pool->max_bufq_len - 1);
|
||||
count++;
|
||||
}
|
||||
|
||||
if (count) {
|
||||
buff_pool->head = head;
|
||||
total_num_refill -= count;
|
||||
total_count += count;
|
||||
}
|
||||
}
|
||||
|
||||
DP_STATS_INC(buff_pool->dp_pdev,
|
||||
rx_refill_buff_pool.num_bufs_refilled,
|
||||
total_count);
|
||||
}
|
||||
|
||||
static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
|
||||
{
|
||||
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
|
||||
qdf_nbuf_t nbuf = NULL;
|
||||
uint16_t head, tail;
|
||||
|
||||
head = buff_pool->head;
|
||||
tail = buff_pool->tail;
|
||||
|
||||
if (head == tail)
|
||||
return NULL;
|
||||
|
||||
nbuf = buff_pool->buf_elem[tail++];
|
||||
tail &= (buff_pool->max_bufq_len - 1);
|
||||
buff_pool->tail = tail;
|
||||
|
||||
return nbuf;
|
||||
}
|
||||
|
||||
qdf_nbuf_t
|
||||
dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
uint32_t num_available_buffers)
|
||||
{
|
||||
struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
|
||||
struct rx_buff_pool *buff_pool;
|
||||
struct dp_srng *dp_rxdma_srng;
|
||||
qdf_nbuf_t nbuf;
|
||||
|
||||
nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
|
||||
if (qdf_likely(nbuf)) {
|
||||
DP_STATS_INC(dp_pdev,
|
||||
rx_refill_buff_pool.num_bufs_allocated, 1);
|
||||
return nbuf;
|
||||
}
|
||||
|
||||
if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
|
||||
mac_id = dp_pdev->lmac_id;
|
||||
|
||||
buff_pool = &soc->rx_buff_pool[mac_id];
|
||||
dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
|
||||
|
||||
nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
|
||||
RX_BUFFER_RESERVATION,
|
||||
rx_desc_pool->buf_alignment,
|
||||
FALSE);
|
||||
|
||||
if (!buff_pool->is_initialized)
|
||||
return nbuf;
|
||||
|
||||
if (qdf_likely(nbuf)) {
|
||||
buff_pool->nbuf_fail_cnt = 0;
|
||||
return nbuf;
|
||||
}
|
||||
|
||||
buff_pool->nbuf_fail_cnt++;
|
||||
|
||||
/* Allocate buffer from the buffer pool */
|
||||
if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
|
||||
(num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
|
||||
nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
|
||||
if (nbuf)
|
||||
DP_STATS_INC(dp_pdev,
|
||||
rx_buffer_pool.num_pool_bufs_replenish, 1);
|
||||
}
|
||||
|
||||
return nbuf;
|
||||
}
|
||||
|
||||
QDF_STATUS
|
||||
dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
|
||||
{
|
||||
QDF_STATUS ret = QDF_STATUS_SUCCESS;
|
||||
|
||||
if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)) {
|
||||
ret = qdf_nbuf_map_nbytes_single(soc->osdev,
|
||||
(nbuf_frag_info_t->virt_addr).nbuf,
|
||||
QDF_DMA_FROM_DEVICE,
|
||||
rx_desc_pool->buf_size);
|
||||
if (QDF_IS_STATUS_SUCCESS(ret))
|
||||
dp_audio_smmu_map(soc->osdev,
|
||||
qdf_mem_paddr_from_dmaaddr(soc->osdev,
|
||||
QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)),
|
||||
QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf),
|
||||
rx_desc_pool->buf_size);
|
||||
}
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
|
||||
{
|
||||
struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
|
||||
qdf_nbuf_t nbuf;
|
||||
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
|
||||
QDF_STATUS ret;
|
||||
uint16_t head = 0;
|
||||
int i;
|
||||
|
||||
if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
|
||||
dp_err("RX refill buffer pool support is disabled");
|
||||
buff_pool->is_initialized = false;
|
||||
return;
|
||||
}
|
||||
|
||||
buff_pool->max_bufq_len =
|
||||
wlan_cfg_get_rx_refill_buf_pool_size(soc->wlan_cfg_ctx);
|
||||
|
||||
buff_pool->buf_elem = qdf_mem_malloc(buff_pool->max_bufq_len *
|
||||
sizeof(qdf_nbuf_t));
|
||||
if (!buff_pool->buf_elem) {
|
||||
dp_err("Failed to allocate memory for RX refill buf element");
|
||||
buff_pool->is_initialized = false;
|
||||
return;
|
||||
}
|
||||
|
||||
buff_pool->dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
|
||||
buff_pool->tail = 0;
|
||||
|
||||
for (i = 0; i < (buff_pool->max_bufq_len - 1); i++) {
|
||||
nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
|
||||
RX_BUFFER_RESERVATION,
|
||||
rx_desc_pool->buf_alignment, FALSE);
|
||||
if (!nbuf)
|
||||
continue;
|
||||
|
||||
ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
|
||||
QDF_DMA_FROM_DEVICE,
|
||||
rx_desc_pool->buf_size);
|
||||
if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
|
||||
qdf_nbuf_free(nbuf);
|
||||
continue;
|
||||
}
|
||||
|
||||
dp_audio_smmu_map(soc->osdev,
|
||||
qdf_mem_paddr_from_dmaaddr(soc->osdev,
|
||||
QDF_NBUF_CB_PADDR(nbuf)),
|
||||
QDF_NBUF_CB_PADDR(nbuf),
|
||||
rx_desc_pool->buf_size);
|
||||
|
||||
buff_pool->buf_elem[head] = nbuf;
|
||||
head++;
|
||||
}
|
||||
|
||||
buff_pool->head = head;
|
||||
|
||||
dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
|
||||
buff_pool->max_bufq_len,
|
||||
buff_pool->head);
|
||||
|
||||
buff_pool->is_initialized = true;
|
||||
}
|
||||
|
||||
void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
|
||||
{
|
||||
struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
|
||||
struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
|
||||
qdf_nbuf_t nbuf;
|
||||
int i;
|
||||
|
||||
dp_rx_refill_buff_pool_init(soc, mac_id);
|
||||
|
||||
if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
|
||||
dp_info("RX buffer pool support is disabled");
|
||||
buff_pool->is_initialized = false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (buff_pool->is_initialized)
|
||||
return;
|
||||
|
||||
qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
|
||||
|
||||
for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
|
||||
nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
|
||||
RX_BUFFER_RESERVATION,
|
||||
rx_desc_pool->buf_alignment, FALSE);
|
||||
if (!nbuf)
|
||||
continue;
|
||||
qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
|
||||
nbuf);
|
||||
}
|
||||
|
||||
dp_info("RX buffer pool required allocation: %u actual allocation: %u",
|
||||
DP_RX_BUFFER_POOL_SIZE,
|
||||
qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
|
||||
|
||||
buff_pool->is_initialized = true;
|
||||
}
|
||||
|
||||
static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
|
||||
{
|
||||
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
|
||||
struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
|
||||
qdf_nbuf_t nbuf;
|
||||
uint32_t count = 0;
|
||||
|
||||
if (!buff_pool->is_initialized)
|
||||
return;
|
||||
|
||||
while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
|
||||
dp_audio_smmu_unmap(soc->osdev,
|
||||
QDF_NBUF_CB_PADDR(nbuf),
|
||||
rx_desc_pool->buf_size);
|
||||
qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
|
||||
QDF_DMA_BIDIRECTIONAL,
|
||||
rx_desc_pool->buf_size);
|
||||
qdf_nbuf_free(nbuf);
|
||||
count++;
|
||||
}
|
||||
|
||||
dp_info("Rx refill buffers freed during deinit %u head: %u, tail: %u",
|
||||
count, buff_pool->head, buff_pool->tail);
|
||||
|
||||
qdf_mem_free(buff_pool->buf_elem);
|
||||
buff_pool->is_initialized = false;
|
||||
}
|
||||
|
||||
void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
|
||||
{
|
||||
struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
|
||||
qdf_nbuf_t nbuf;
|
||||
|
||||
dp_rx_refill_buff_pool_deinit(soc, mac_id);
|
||||
|
||||
if (!buff_pool->is_initialized)
|
||||
return;
|
||||
|
||||
dp_info("buffers in the RX buffer pool during deinit: %u",
|
||||
qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
|
||||
|
||||
while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
|
||||
qdf_nbuf_free(nbuf);
|
||||
|
||||
buff_pool->is_initialized = false;
|
||||
}
|
||||
#endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
|
@ -0,0 +1,237 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DP_RX_BUFFER_POOL_H_
|
||||
#define _DP_RX_BUFFER_POOL_H_
|
||||
|
||||
#include "dp_types.h"
|
||||
#include "qdf_nbuf.h"
|
||||
#include "qdf_module.h"
|
||||
#include "athdefs.h"
|
||||
#include "wlan_cfg.h"
|
||||
#include "dp_internal.h"
|
||||
#include "dp_rx.h"
|
||||
|
||||
#ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
|
||||
/**
|
||||
* dp_rx_buffer_pool_init() - Initialize emergency buffer pool
|
||||
* @soc: SoC handle
|
||||
* @mac_id: MAC ID
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id);
|
||||
|
||||
/**
|
||||
* dp_rx_buffer_pool_deinit() - De-Initialize emergency buffer pool
|
||||
* @soc: SoC handle
|
||||
* @mac_id: MAC ID
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id);
|
||||
|
||||
/**
|
||||
* dp_rx_buffer_pool_refill() - Process the rx nbuf list and
|
||||
* refill the emergency buffer pool
|
||||
* @soc: SoC handle
|
||||
* @nbuf: RX buffer
|
||||
* @mac_id: MAC ID
|
||||
*
|
||||
* Return: Whether the rx nbuf is consumed into the pool or not.
|
||||
*/
|
||||
bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id);
|
||||
|
||||
/**
|
||||
* dp_rx_buffer_pool_nbuf_free() - Free the nbuf or queue it
|
||||
* back into the pool
|
||||
* @soc: SoC handle
|
||||
* @nbuf: RX buffer
|
||||
* @mac_id: MAC ID
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf,
|
||||
u8 mac_id);
|
||||
|
||||
/**
|
||||
* dp_rx_buffer_pool_nbuf_alloc() - Allocate nbuf for buffer replenish,
|
||||
* give nbuf from the pool if allocation fails
|
||||
* @soc: SoC handle
|
||||
* @mac_id: MAC ID
|
||||
* @rx_desc_pool: RX descriptor pool
|
||||
* @num_available_buffers: number of available buffers in the ring.
|
||||
*
|
||||
* Return: nbuf
|
||||
*/
|
||||
qdf_nbuf_t dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
uint32_t num_available_buffers);
|
||||
|
||||
/**
|
||||
* dp_rx_buffer_pool_nbuf_map() - Map nbuff for buffer replenish
|
||||
* @soc: SoC handle
|
||||
* @rx_desc_pool: RX descriptor pool
|
||||
* @nbuf_frag_info_t: nbuf frag info
|
||||
*
|
||||
* Return: nbuf
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
struct dp_rx_nbuf_frag_info *nbuf_frag_info_t);
|
||||
|
||||
/**
|
||||
* dp_rx_schedule_refill_thread() - Schedule RX refill thread to enqueue
|
||||
* buffers in refill pool
|
||||
* @soc: SoC handle
|
||||
*
|
||||
*/
|
||||
static inline void dp_rx_schedule_refill_thread(struct dp_soc *soc)
|
||||
{
|
||||
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
|
||||
uint16_t head = buff_pool->head;
|
||||
uint16_t tail = buff_pool->tail;
|
||||
uint16_t num_refill;
|
||||
|
||||
if (!buff_pool->is_initialized)
|
||||
return;
|
||||
|
||||
if (tail > head)
|
||||
num_refill = (tail - head - 1);
|
||||
else
|
||||
num_refill = (buff_pool->max_bufq_len - head + tail - 1);
|
||||
|
||||
if (soc->cdp_soc.ol_ops->dp_rx_sched_refill_thread &&
|
||||
num_refill >= DP_RX_REFILL_THRD_THRESHOLD)
|
||||
soc->cdp_soc.ol_ops->dp_rx_sched_refill_thread(
|
||||
dp_soc_to_cdp_soc_t(soc));
|
||||
}
|
||||
#else
|
||||
/**
|
||||
* dp_rx_buffer_pool_init() - Initialize emergency buffer pool
|
||||
* @soc: SoC handle
|
||||
* @mac_id: MAC ID
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline
|
||||
void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
|
||||
{
|
||||
soc->rx_buff_pool[mac_id].is_initialized = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_buffer_pool_deinit() - De-Initialize emergency buffer pool
|
||||
* @soc: SoC handle
|
||||
* @mac_id: MAC ID
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline
|
||||
void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_buffer_pool_refill() - Process the rx nbuf list and
|
||||
* refill the emergency buffer pool
|
||||
* @soc: SoC handle
|
||||
* @nbuf: RX buffer
|
||||
* @mac_id: MAC ID
|
||||
*
|
||||
* Return: Whether the rx nbuf is consumed into the pool or not.
|
||||
*/
|
||||
static inline
|
||||
bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_buffer_pool_nbuf_free() - Free the nbuf or queue it
|
||||
* back into the pool
|
||||
* @soc: SoC handle
|
||||
* @nbuf: RX buffer
|
||||
* @mac_id: MAC ID
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline
|
||||
void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf,
|
||||
u8 mac_id)
|
||||
{
|
||||
qdf_nbuf_free(nbuf);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_buffer_pool_nbuf_alloc() - Allocate nbuf for buffer replenish,
|
||||
* give nbuf from the pool if allocation fails
|
||||
* @soc: SoC handle
|
||||
* @mac_id: MAC ID
|
||||
* @rx_desc_pool: RX descriptor pool
|
||||
* @num_available_buffers: number of available buffers in the ring.
|
||||
*
|
||||
* Return: nbuf
|
||||
*/
|
||||
static inline qdf_nbuf_t
|
||||
dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
uint32_t num_available_buffers)
|
||||
{
|
||||
return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
|
||||
RX_BUFFER_RESERVATION,
|
||||
rx_desc_pool->buf_alignment, FALSE);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_buffer_pool_nbuf_map() - Map nbuff for buffer replenish
|
||||
* @soc: SoC handle
|
||||
* @rx_desc_pool: RX descriptor pool
|
||||
* @nbuf_frag_info_t: nbuf frag info
|
||||
*
|
||||
* Return: nbuf
|
||||
*/
|
||||
static inline QDF_STATUS
|
||||
dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
|
||||
status = qdf_nbuf_map_nbytes_single(soc->osdev,
|
||||
(nbuf_frag_info_t->virt_addr).nbuf,
|
||||
QDF_DMA_FROM_DEVICE,
|
||||
rx_desc_pool->buf_size);
|
||||
if (QDF_IS_STATUS_ERROR(status))
|
||||
return status;
|
||||
|
||||
dp_audio_smmu_map(soc->osdev,
|
||||
qdf_mem_paddr_from_dmaaddr(soc->osdev,
|
||||
QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)),
|
||||
QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf),
|
||||
rx_desc_pool->buf_size);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static inline void dp_rx_schedule_refill_thread(struct dp_soc *soc) { }
|
||||
|
||||
#endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
|
||||
#endif /* _DP_RX_BUFFER_POOL_H_ */
|
2204
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c
Normal file
2204
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c
Normal file
File diff suppressed because it is too large
Load Diff
252
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.h
Normal file
252
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.h
Normal file
@ -0,0 +1,252 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DP_RX_DEFRAG_H
|
||||
#define _DP_RX_DEFRAG_H
|
||||
|
||||
#include "hal_rx.h"
|
||||
|
||||
#define DEFRAG_IEEE80211_KEY_LEN 8
|
||||
#define DEFRAG_IEEE80211_FCS_LEN 4
|
||||
|
||||
#define DP_RX_DEFRAG_IEEE80211_ADDR_COPY(dst, src) \
|
||||
qdf_mem_copy(dst, src, QDF_MAC_ADDR_SIZE)
|
||||
|
||||
#define DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(wh) \
|
||||
(((wh) & \
|
||||
(IEEE80211_FC0_TYPE_MASK | QDF_IEEE80211_FC0_SUBTYPE_QOS)) == \
|
||||
(IEEE80211_FC0_TYPE_DATA | QDF_IEEE80211_FC0_SUBTYPE_QOS))
|
||||
|
||||
#define UNI_DESC_OWNER_SW 0x1
|
||||
#define UNI_DESC_BUF_TYPE_RX_MSDU_LINK 0x6
|
||||
/**
|
||||
* struct dp_rx_defrag_cipher: structure to indicate cipher header
|
||||
* @ic_name: Name
|
||||
* @ic_header: header length
|
||||
* @ic_trailer: trail length
|
||||
* @ic_miclen: MIC length
|
||||
*/
|
||||
struct dp_rx_defrag_cipher {
|
||||
const char *ic_name;
|
||||
uint16_t ic_header;
|
||||
uint8_t ic_trailer;
|
||||
uint8_t ic_miclen;
|
||||
};
|
||||
|
||||
#ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
|
||||
/**
|
||||
* dp_rx_frag_handle() - Handles fragmented Rx frames
|
||||
*
|
||||
* @soc: core txrx main context
|
||||
* @ring_desc: opaque pointer to the REO error ring descriptor
|
||||
* @mpdu_desc_info: MPDU descriptor information from ring descriptor
|
||||
* @rx_desc:
|
||||
* @mac_id:
|
||||
* @quota: No. of units (packets) that can be serviced in one shot.
|
||||
*
|
||||
* This function implements RX 802.11 fragmentation handling
|
||||
* The handling is mostly same as legacy fragmentation handling.
|
||||
* If required, this function can re-inject the frames back to
|
||||
* REO ring (with proper setting to by-pass fragmentation check
|
||||
* but use duplicate detection / re-ordering and routing these frames
|
||||
* to a different core.
|
||||
*
|
||||
* Return: uint32_t: No. of elements processed
|
||||
*/
|
||||
uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
|
||||
struct hal_rx_mpdu_desc_info *mpdu_desc_info,
|
||||
struct dp_rx_desc *rx_desc,
|
||||
uint8_t *mac_id,
|
||||
uint32_t quota);
|
||||
#endif /* WLAN_SOFTUMAC_SUPPORT */
|
||||
|
||||
/**
|
||||
* dp_rx_frag_get_mac_hdr() - Return pointer to the mac hdr
|
||||
* @soc: DP SOC
|
||||
* @rx_desc_info: Pointer to the pkt_tlvs in the
|
||||
* nbuf (pkt_tlvs->mac_hdr->data)
|
||||
*
|
||||
* It is inefficient to peek into the packet for received
|
||||
* frames but these APIs are required to get to some of
|
||||
* 802.11 fields that hardware does not populate in the
|
||||
* rx meta data.
|
||||
*
|
||||
* Return: pointer to ieee80211_frame
|
||||
*/
|
||||
static inline struct ieee80211_frame *
|
||||
dp_rx_frag_get_mac_hdr(struct dp_soc *soc, uint8_t *rx_desc_info)
|
||||
{
|
||||
int rx_desc_len = soc->rx_pkt_tlv_size;
|
||||
return (struct ieee80211_frame *)(rx_desc_info + rx_desc_len);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_frag_get_mpdu_seq_number() - Get mpdu sequence number
|
||||
* @soc: DP SOC
|
||||
* @rx_desc_info: Pointer to the pkt_tlvs in the
|
||||
* nbuf (pkt_tlvs->mac_hdr->data)
|
||||
*
|
||||
* Return: uint16_t, rx sequence number
|
||||
*/
|
||||
static inline uint16_t
|
||||
dp_rx_frag_get_mpdu_seq_number(struct dp_soc *soc, int8_t *rx_desc_info)
|
||||
{
|
||||
struct ieee80211_frame *mac_hdr;
|
||||
mac_hdr = dp_rx_frag_get_mac_hdr(soc, rx_desc_info);
|
||||
|
||||
return qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) >>
|
||||
IEEE80211_SEQ_SEQ_SHIFT;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_frag_get_mpdu_frag_number() - Get mpdu fragment number
|
||||
* @soc: DP SOC
|
||||
* @rx_desc_info: Pointer to the pkt_tlvs in the
|
||||
* nbuf (pkt_tlvs->mac_hdr->data)
|
||||
*
|
||||
* Return: uint8_t, receive fragment number
|
||||
*/
|
||||
static inline uint8_t
|
||||
dp_rx_frag_get_mpdu_frag_number(struct dp_soc *soc, uint8_t *rx_desc_info)
|
||||
{
|
||||
struct ieee80211_frame *mac_hdr;
|
||||
mac_hdr = dp_rx_frag_get_mac_hdr(soc, rx_desc_info);
|
||||
|
||||
return qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
|
||||
IEEE80211_SEQ_FRAG_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_frag_get_more_frag_bit() - Get more fragment bit
|
||||
* @soc: DP SOC
|
||||
* @rx_desc_info: Pointer to the pkt_tlvs in the
|
||||
* nbuf (pkt_tlvs->mac_hdr->data)
|
||||
*
|
||||
* Return: uint8_t, get more fragment bit
|
||||
*/
|
||||
static inline
|
||||
uint8_t dp_rx_frag_get_more_frag_bit(struct dp_soc *soc, uint8_t *rx_desc_info)
|
||||
{
|
||||
struct ieee80211_frame *mac_hdr;
|
||||
mac_hdr = dp_rx_frag_get_mac_hdr(soc, rx_desc_info);
|
||||
|
||||
return (mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG) >> 2;
|
||||
}
|
||||
|
||||
static inline
|
||||
uint8_t dp_rx_get_pkt_dir(struct dp_soc *soc, uint8_t *rx_desc_info)
|
||||
{
|
||||
struct ieee80211_frame *mac_hdr;
|
||||
mac_hdr = dp_rx_frag_get_mac_hdr(soc, rx_desc_info);
|
||||
|
||||
return mac_hdr->i_fc[1] & IEEE80211_FC1_DIR_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_defrag_fraglist_insert() - Create a per-sequence fragment list
|
||||
* @txrx_peer: Pointer to the peer data structure
|
||||
* @tid: Transmit ID (TID)
|
||||
* @head_addr: Pointer to head list
|
||||
* @tail_addr: Pointer to tail list
|
||||
* @frag: Incoming fragment
|
||||
* @all_frag_present: Flag to indicate whether all fragments are received
|
||||
*
|
||||
* Build a per-tid, per-sequence fragment list.
|
||||
*
|
||||
* Return: Success, if inserted
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_rx_defrag_fraglist_insert(struct dp_txrx_peer *txrx_peer, unsigned int tid,
|
||||
qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr,
|
||||
qdf_nbuf_t frag, uint8_t *all_frag_present);
|
||||
|
||||
/**
|
||||
* dp_rx_defrag_waitlist_add() - Update per-PDEV defrag wait list
|
||||
* @txrx_peer: Pointer to the peer data structure
|
||||
* @tid: Transmit ID (TID)
|
||||
*
|
||||
* Appends per-tid fragments to global fragment wait list
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_rx_defrag_waitlist_add(struct dp_txrx_peer *txrx_peer,
|
||||
unsigned int tid);
|
||||
|
||||
/**
|
||||
* dp_rx_defrag() - Defragment the fragment chain
|
||||
* @txrx_peer: Pointer to the peer
|
||||
* @tid: Transmit Identifier
|
||||
* @frag_list_head: Pointer to head list
|
||||
* @frag_list_tail: Pointer to tail list
|
||||
*
|
||||
* Defragment the fragment chain
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_rx_defrag(struct dp_txrx_peer *txrx_peer, unsigned int tid,
|
||||
qdf_nbuf_t frag_list_head,
|
||||
qdf_nbuf_t frag_list_tail);
|
||||
|
||||
/**
|
||||
* dp_rx_defrag_waitlist_flush() - Flush SOC defrag wait list
|
||||
* @soc: DP SOC
|
||||
*
|
||||
* Flush fragments of all waitlisted TID's
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_rx_defrag_waitlist_flush(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_rx_reorder_flush_frag() - Flush the frag list
|
||||
* @txrx_peer: Pointer to the peer data structure
|
||||
* @tid: Transmit ID (TID)
|
||||
*
|
||||
* Flush the per-TID frag list
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_rx_reorder_flush_frag(struct dp_txrx_peer *txrx_peer,
|
||||
unsigned int tid);
|
||||
|
||||
/**
|
||||
* dp_rx_defrag_waitlist_remove() - Remove fragments from waitlist
|
||||
* @txrx_peer: Pointer to the peer data structure
|
||||
* @tid: Transmit ID (TID)
|
||||
*
|
||||
* Remove fragments from waitlist
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_rx_defrag_waitlist_remove(struct dp_txrx_peer *txrx_peer,
|
||||
unsigned int tid);
|
||||
|
||||
/**
|
||||
* dp_rx_defrag_cleanup() - Clean up activities
|
||||
* @txrx_peer: Pointer to the peer
|
||||
* @tid: Transmit Identifier
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_rx_defrag_cleanup(struct dp_txrx_peer *txrx_peer, unsigned int tid);
|
||||
|
||||
QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
|
||||
struct dp_txrx_peer *peer, uint16_t tid,
|
||||
uint16_t rxseq, qdf_nbuf_t nbuf);
|
||||
#endif /* _DP_RX_DEFRAG_H */
|
538
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c
Normal file
538
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c
Normal file
@ -0,0 +1,538 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "dp_types.h"
|
||||
#include "dp_rx.h"
|
||||
#include "dp_ipa.h"
|
||||
#include <qdf_module.h>
|
||||
|
||||
#ifdef RX_DESC_MULTI_PAGE_ALLOC
|
||||
A_COMPILE_TIME_ASSERT(cookie_size_check,
|
||||
(DP_BLOCKMEM_SIZE /
|
||||
sizeof(union dp_rx_desc_list_elem_t))
|
||||
<= (1 << DP_RX_DESC_PAGE_ID_SHIFT));
|
||||
|
||||
QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
|
||||
{
|
||||
if (!rx_desc_pool->desc_pages.num_pages) {
|
||||
dp_err("Multi page alloc fail, size=%d, elem=%d",
|
||||
rx_desc_pool->elem_size, rx_desc_pool->pool_size);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_desc_pool_is_allocated);
|
||||
|
||||
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
|
||||
uint32_t num_elem,
|
||||
struct rx_desc_pool *rx_desc_pool)
|
||||
{
|
||||
uint32_t desc_size;
|
||||
union dp_rx_desc_list_elem_t *rx_desc_elem;
|
||||
|
||||
desc_size = sizeof(*rx_desc_elem);
|
||||
rx_desc_pool->elem_size = desc_size;
|
||||
rx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
|
||||
dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
|
||||
&rx_desc_pool->desc_pages,
|
||||
desc_size, num_elem, 0, true);
|
||||
if (!rx_desc_pool->desc_pages.num_pages) {
|
||||
qdf_err("Multi page alloc fail,size=%d, elem=%d",
|
||||
desc_size, num_elem);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
if (qdf_mem_multi_page_link(soc->osdev,
|
||||
&rx_desc_pool->desc_pages,
|
||||
desc_size, num_elem, true)) {
|
||||
qdf_err("overflow num link,size=%d, elem=%d",
|
||||
desc_size, num_elem);
|
||||
goto free_rx_desc_pool;
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
free_rx_desc_pool:
|
||||
dp_rx_desc_pool_free(soc, rx_desc_pool);
|
||||
|
||||
return QDF_STATUS_E_FAULT;
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_desc_pool_alloc);
|
||||
|
||||
QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
uint32_t pool_id)
|
||||
{
|
||||
uint32_t id, page_id, offset, num_desc_per_page;
|
||||
uint32_t count = 0;
|
||||
union dp_rx_desc_list_elem_t *rx_desc_elem;
|
||||
|
||||
num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
|
||||
|
||||
rx_desc_elem = rx_desc_pool->freelist;
|
||||
while (rx_desc_elem) {
|
||||
page_id = count / num_desc_per_page;
|
||||
offset = count % num_desc_per_page;
|
||||
/*
|
||||
* Below cookie size is from REO destination ring
|
||||
* reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
|
||||
* cookie size = 21 bits
|
||||
* 8 bits - offset
|
||||
* 8 bits - page ID
|
||||
* 4 bits - pool ID
|
||||
*/
|
||||
id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
|
||||
(page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
|
||||
offset);
|
||||
rx_desc_elem->rx_desc.cookie = id;
|
||||
rx_desc_elem->rx_desc.pool_id = pool_id;
|
||||
rx_desc_elem->rx_desc.in_use = 0;
|
||||
rx_desc_elem = rx_desc_elem->next;
|
||||
count++;
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
|
||||
uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
|
||||
/* Initialize the lock */
|
||||
qdf_spinlock_create(&rx_desc_pool->lock);
|
||||
|
||||
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
||||
rx_desc_pool->pool_size = pool_size;
|
||||
|
||||
rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
|
||||
*rx_desc_pool->desc_pages.cacheable_pages;
|
||||
|
||||
status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
|
||||
pool_id);
|
||||
if (!QDF_IS_STATUS_SUCCESS(status))
|
||||
dp_err("RX desc pool initialization failed");
|
||||
|
||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_desc_pool_init);
|
||||
|
||||
union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
|
||||
struct rx_desc_pool *rx_desc_pool)
|
||||
{
|
||||
return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
|
||||
rx_desc_pool->elem_size * offset;
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
qdf_nbuf_t *nbuf_unmap_list,
|
||||
qdf_nbuf_t *nbuf_free_list)
|
||||
{
|
||||
uint32_t i, num_desc, page_id, offset, num_desc_per_page;
|
||||
union dp_rx_desc_list_elem_t *rx_desc_elem;
|
||||
struct dp_rx_desc *rx_desc;
|
||||
|
||||
if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) {
|
||||
qdf_err("No pages found on this desc pool");
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
num_desc = rx_desc_pool->pool_size;
|
||||
num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
|
||||
for (i = 0; i < num_desc; i++) {
|
||||
page_id = i / num_desc_per_page;
|
||||
offset = i % num_desc_per_page;
|
||||
rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
|
||||
rx_desc = &rx_desc_elem->rx_desc;
|
||||
dp_rx_desc_free_dbg_info(rx_desc);
|
||||
if (rx_desc->in_use) {
|
||||
if (!rx_desc->unmapped) {
|
||||
DP_RX_HEAD_APPEND(*nbuf_unmap_list,
|
||||
rx_desc->nbuf);
|
||||
rx_desc->unmapped = 1;
|
||||
} else {
|
||||
DP_RX_HEAD_APPEND(*nbuf_free_list,
|
||||
rx_desc->nbuf);
|
||||
}
|
||||
}
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
|
||||
qdf_nbuf_t nbuf_unmap_list,
|
||||
qdf_nbuf_t nbuf_free_list,
|
||||
uint16_t buf_size,
|
||||
bool is_mon_pool)
|
||||
{
|
||||
qdf_nbuf_t nbuf = nbuf_unmap_list;
|
||||
qdf_nbuf_t next;
|
||||
|
||||
while (nbuf) {
|
||||
next = nbuf->next;
|
||||
|
||||
if (!is_mon_pool)
|
||||
dp_audio_smmu_unmap(soc->osdev,
|
||||
QDF_NBUF_CB_PADDR(nbuf),
|
||||
buf_size);
|
||||
|
||||
if (dp_ipa_handle_rx_buf_smmu_mapping(
|
||||
soc, nbuf, buf_size,
|
||||
false, __func__,
|
||||
__LINE__))
|
||||
dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
|
||||
|
||||
qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
|
||||
QDF_DMA_BIDIRECTIONAL, buf_size);
|
||||
dp_rx_nbuf_free(nbuf);
|
||||
nbuf = next;
|
||||
}
|
||||
|
||||
nbuf = nbuf_free_list;
|
||||
while (nbuf) {
|
||||
next = nbuf->next;
|
||||
dp_rx_nbuf_free(nbuf);
|
||||
nbuf = next;
|
||||
}
|
||||
}
|
||||
|
||||
void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
|
||||
struct rx_desc_pool *rx_desc_pool)
|
||||
{
|
||||
qdf_nbuf_t nbuf_unmap_list = NULL;
|
||||
qdf_nbuf_t nbuf_free_list = NULL;
|
||||
|
||||
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
||||
dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
|
||||
&nbuf_unmap_list, &nbuf_free_list);
|
||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||
dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
|
||||
rx_desc_pool->buf_size, false);
|
||||
qdf_spinlock_destroy(&rx_desc_pool->lock);
|
||||
}
|
||||
|
||||
void dp_rx_desc_nbuf_free(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
bool is_mon_pool)
|
||||
{
|
||||
qdf_nbuf_t nbuf_unmap_list = NULL;
|
||||
qdf_nbuf_t nbuf_free_list = NULL;
|
||||
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
||||
dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
|
||||
&nbuf_unmap_list, &nbuf_free_list);
|
||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||
dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
|
||||
rx_desc_pool->buf_size, is_mon_pool);
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_desc_nbuf_free);
|
||||
|
||||
void dp_rx_desc_pool_free(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool)
|
||||
{
|
||||
if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
|
||||
return;
|
||||
|
||||
dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
|
||||
&rx_desc_pool->desc_pages, 0, true);
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_desc_pool_free);
|
||||
|
||||
void dp_rx_desc_pool_deinit(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
uint32_t pool_id)
|
||||
{
|
||||
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
||||
|
||||
rx_desc_pool->freelist = NULL;
|
||||
rx_desc_pool->pool_size = 0;
|
||||
|
||||
/* Deinitialize rx mon desr frag flag */
|
||||
rx_desc_pool->rx_mon_dest_frag_enable = false;
|
||||
qdf_frag_cache_drain(&rx_desc_pool->pf_cache);
|
||||
|
||||
soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
|
||||
|
||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||
qdf_spinlock_destroy(&rx_desc_pool->lock);
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_desc_pool_deinit);
|
||||
#else
|
||||
QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
|
||||
{
|
||||
if (!rx_desc_pool->array) {
|
||||
dp_err("nss-wifi<4> skip Rx refil");
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_desc_pool_is_allocated);
|
||||
|
||||
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
|
||||
uint32_t pool_size,
|
||||
struct rx_desc_pool *rx_desc_pool)
|
||||
{
|
||||
rx_desc_pool->array = qdf_mem_common_alloc(pool_size *
|
||||
sizeof(union dp_rx_desc_list_elem_t));
|
||||
|
||||
if (!(rx_desc_pool->array)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"RX Desc Pool allocation failed");
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_desc_pool_alloc);
|
||||
|
||||
QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
uint32_t pool_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
|
||||
if (i == rx_desc_pool->pool_size - 1)
|
||||
rx_desc_pool->array[i].next = NULL;
|
||||
else
|
||||
rx_desc_pool->array[i].next =
|
||||
&rx_desc_pool->array[i + 1];
|
||||
rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
|
||||
rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
|
||||
rx_desc_pool->array[i].rx_desc.in_use = 0;
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
|
||||
uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
|
||||
/* Initialize the lock */
|
||||
qdf_spinlock_create(&rx_desc_pool->lock);
|
||||
|
||||
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
||||
rx_desc_pool->pool_size = pool_size;
|
||||
|
||||
/* link SW rx descs into a freelist */
|
||||
rx_desc_pool->freelist = &rx_desc_pool->array[0];
|
||||
qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
|
||||
|
||||
status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
|
||||
pool_id);
|
||||
if (!QDF_IS_STATUS_SUCCESS(status))
|
||||
dp_err("RX desc pool initialization failed");
|
||||
|
||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_desc_pool_init);
|
||||
|
||||
#ifdef WLAN_SUPPORT_PPEDS
|
||||
static inline
|
||||
qdf_nbuf_t dp_rx_desc_get_nbuf(struct rx_desc_pool *rx_desc_pool, int i)
|
||||
{
|
||||
if (rx_desc_pool->array[i].rx_desc.has_reuse_nbuf)
|
||||
return rx_desc_pool->array[i].rx_desc.reuse_nbuf;
|
||||
else
|
||||
return rx_desc_pool->array[i].rx_desc.nbuf;
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
qdf_nbuf_t dp_rx_desc_get_nbuf(struct rx_desc_pool *rx_desc_pool, int i)
|
||||
{
|
||||
return rx_desc_pool->array[i].rx_desc.nbuf;
|
||||
}
|
||||
#endif
|
||||
|
||||
void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
|
||||
struct rx_desc_pool *rx_desc_pool)
|
||||
{
|
||||
qdf_nbuf_t nbuf;
|
||||
int i;
|
||||
|
||||
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
||||
for (i = 0; i < rx_desc_pool->pool_size; i++) {
|
||||
if (rx_desc_pool->array[i].rx_desc.in_use) {
|
||||
nbuf = dp_rx_desc_get_nbuf(rx_desc_pool, i);
|
||||
|
||||
if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
|
||||
dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
|
||||
rx_desc_pool->array[i].rx_desc.unmapped = 1;
|
||||
}
|
||||
dp_rx_nbuf_free(nbuf);
|
||||
}
|
||||
}
|
||||
qdf_mem_common_free(rx_desc_pool->array);
|
||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||
qdf_spinlock_destroy(&rx_desc_pool->lock);
|
||||
}
|
||||
|
||||
void dp_rx_desc_nbuf_free(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
bool is_mon_pool)
|
||||
{
|
||||
qdf_nbuf_t nbuf;
|
||||
int i;
|
||||
|
||||
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
||||
for (i = 0; i < rx_desc_pool->pool_size; i++) {
|
||||
dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
|
||||
if (rx_desc_pool->array[i].rx_desc.in_use) {
|
||||
nbuf = dp_rx_desc_get_nbuf(rx_desc_pool, i);
|
||||
|
||||
if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
|
||||
dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
|
||||
rx_desc_pool->array[i].rx_desc.unmapped = 1;
|
||||
}
|
||||
dp_rx_nbuf_free(nbuf);
|
||||
}
|
||||
}
|
||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_desc_nbuf_free);
|
||||
|
||||
#ifdef DP_RX_MON_MEM_FRAG
|
||||
void dp_rx_desc_frag_free(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool)
|
||||
{
|
||||
qdf_dma_addr_t paddr;
|
||||
qdf_frag_t vaddr;
|
||||
int i;
|
||||
|
||||
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
||||
for (i = 0; i < rx_desc_pool->pool_size; i++) {
|
||||
if (rx_desc_pool->array[i].rx_desc.in_use) {
|
||||
paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
|
||||
vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
|
||||
|
||||
dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
|
||||
if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
|
||||
qdf_mem_unmap_page(soc->osdev, paddr,
|
||||
rx_desc_pool->buf_size,
|
||||
QDF_DMA_FROM_DEVICE);
|
||||
rx_desc_pool->array[i].rx_desc.unmapped = 1;
|
||||
}
|
||||
qdf_frag_free(vaddr);
|
||||
}
|
||||
}
|
||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_desc_frag_free);
|
||||
#endif
|
||||
|
||||
void dp_rx_desc_pool_free(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool)
|
||||
{
|
||||
qdf_mem_common_free(rx_desc_pool->array);
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_desc_pool_free);
|
||||
|
||||
void dp_rx_desc_pool_deinit(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
uint32_t pool_id)
|
||||
{
|
||||
if (rx_desc_pool->pool_size) {
|
||||
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
||||
|
||||
rx_desc_pool->freelist = NULL;
|
||||
rx_desc_pool->pool_size = 0;
|
||||
|
||||
/* Deinitialize rx mon dest frag flag */
|
||||
rx_desc_pool->rx_mon_dest_frag_enable = false;
|
||||
qdf_frag_cache_drain(&rx_desc_pool->pf_cache);
|
||||
|
||||
soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool,
|
||||
pool_id);
|
||||
|
||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||
qdf_spinlock_destroy(&rx_desc_pool->lock);
|
||||
}
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_desc_pool_deinit);
|
||||
|
||||
#endif /* RX_DESC_MULTI_PAGE_ALLOC */
|
||||
|
||||
void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
uint32_t pool_id)
|
||||
{
|
||||
}
|
||||
|
||||
uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
uint16_t num_descs,
|
||||
union dp_rx_desc_list_elem_t **desc_list,
|
||||
union dp_rx_desc_list_elem_t **tail)
|
||||
{
|
||||
uint16_t count;
|
||||
|
||||
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
||||
|
||||
*desc_list = *tail = rx_desc_pool->freelist;
|
||||
|
||||
for (count = 0; count < num_descs; count++) {
|
||||
|
||||
if (qdf_unlikely(!rx_desc_pool->freelist)) {
|
||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||
return count;
|
||||
}
|
||||
*tail = rx_desc_pool->freelist;
|
||||
rx_desc_pool->freelist = rx_desc_pool->freelist->next;
|
||||
}
|
||||
(*tail)->next = NULL;
|
||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||
return count;
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_get_free_desc_list);
|
||||
|
||||
void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
|
||||
union dp_rx_desc_list_elem_t **local_desc_list,
|
||||
union dp_rx_desc_list_elem_t **tail,
|
||||
uint16_t pool_id,
|
||||
struct rx_desc_pool *rx_desc_pool)
|
||||
{
|
||||
union dp_rx_desc_list_elem_t *temp_list = NULL;
|
||||
|
||||
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
||||
|
||||
|
||||
temp_list = rx_desc_pool->freelist;
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
|
||||
temp_list, *local_desc_list, *tail, (*tail)->next);
|
||||
rx_desc_pool->freelist = *local_desc_list;
|
||||
(*tail)->next = temp_list;
|
||||
*tail = NULL;
|
||||
*local_desc_list = NULL;
|
||||
|
||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_rx_add_desc_list_to_free_list);
|
3320
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c
Normal file
3320
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c
Normal file
File diff suppressed because it is too large
Load Diff
2235
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_tid.c
Normal file
2235
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_tid.c
Normal file
File diff suppressed because it is too large
Load Diff
387
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_tid.h
Normal file
387
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_tid.h
Normal file
@ -0,0 +1,387 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DP_RX_TID_H_
|
||||
#define _DP_RX_TID_H_
|
||||
|
||||
#include "dp_types.h"
|
||||
|
||||
/*
|
||||
* dp_rxtid_stats_cmd_cb - function pointer for peer
|
||||
* rx tid stats cmd call_back
|
||||
* @soc:
|
||||
* @cb_ctxt:
|
||||
* @reo_status:
|
||||
*/
|
||||
typedef void (*dp_rxtid_stats_cmd_cb)(struct dp_soc *soc, void *cb_ctxt,
|
||||
union hal_reo_status *reo_status);
|
||||
|
||||
#ifndef WLAN_SOFTUMAC_SUPPORT
|
||||
void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
|
||||
union hal_reo_status *reo_status);
|
||||
|
||||
/**
|
||||
* dp_peer_rx_cleanup() - Cleanup receive TID state
|
||||
* @vdev: Datapath vdev
|
||||
* @peer: Datapath peer
|
||||
*
|
||||
*/
|
||||
void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
|
||||
|
||||
/**
|
||||
* dp_rx_tid_setup_wifi3() - Set up receive TID state
|
||||
* @peer: Datapath peer handle
|
||||
* @tid_bitmap: TIDs to be set up
|
||||
* @ba_window_size: BlockAck window size
|
||||
* @start_seq: Starting sequence number
|
||||
*
|
||||
* Return: QDF_STATUS code
|
||||
*/
|
||||
QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, uint32_t tid_bitmap,
|
||||
uint32_t ba_window_size, uint32_t start_seq);
|
||||
|
||||
/**
|
||||
* dp_rx_tid_update_wifi3() - Update receive TID state
|
||||
* @peer: Datapath peer handle
|
||||
* @tid: TID
|
||||
* @ba_window_size: BlockAck window size
|
||||
* @start_seq: Starting sequence number
|
||||
* @bar_update: BAR update triggered
|
||||
*
|
||||
* Return: QDF_STATUS code
|
||||
*/
|
||||
QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid,
|
||||
uint32_t ba_window_size, uint32_t start_seq,
|
||||
bool bar_update);
|
||||
|
||||
/*
|
||||
* dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
|
||||
* after deleting the entries (ie., setting valid=0)
|
||||
*
|
||||
* @soc: DP SOC handle
|
||||
* @cb_ctxt: Callback context
|
||||
* @reo_status: REO command status
|
||||
*/
|
||||
void dp_rx_tid_delete_cb(struct dp_soc *soc,
|
||||
void *cb_ctxt,
|
||||
union hal_reo_status *reo_status);
|
||||
|
||||
#ifdef IPA_OFFLOAD
|
||||
void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
|
||||
union hal_reo_status *reo_status);
|
||||
int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
|
||||
dp_rxtid_stats_cmd_cb dp_stats_cmd_cb);
|
||||
#endif
|
||||
|
||||
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||
void dp_reset_tid_q_setup(struct dp_soc *soc);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dp_addba_resp_tx_completion_wifi3() - Update Rx Tid State
|
||||
*
|
||||
* @cdp_soc: Datapath soc handle
|
||||
* @peer_mac: Datapath peer mac address
|
||||
* @vdev_id: id of atapath vdev
|
||||
* @tid: TID number
|
||||
* @status: tx completion status
|
||||
* Return: 0 on success, error code on failure
|
||||
*/
|
||||
int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
|
||||
uint8_t *peer_mac,
|
||||
uint16_t vdev_id,
|
||||
uint8_t tid, int status);
|
||||
|
||||
/**
|
||||
* dp_addba_responsesetup_wifi3() - Process ADDBA request from peer
|
||||
* @cdp_soc: Datapath soc handle
|
||||
* @peer_mac: Datapath peer mac address
|
||||
* @vdev_id: id of atapath vdev
|
||||
* @tid: TID number
|
||||
* @dialogtoken: output dialogtoken
|
||||
* @statuscode: output dialogtoken
|
||||
* @buffersize: Output BA window size
|
||||
* @batimeout: Output BA timeout
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
|
||||
uint16_t vdev_id, uint8_t tid,
|
||||
uint8_t *dialogtoken, uint16_t *statuscode,
|
||||
uint16_t *buffersize, uint16_t *batimeout);
|
||||
|
||||
/**
|
||||
* dp_rx_tid_update_ba_win_size() - Update the DP tid BA window size
|
||||
* @cdp_soc: soc handle
|
||||
* @peer_mac: mac address of peer handle
|
||||
* @vdev_id: id of vdev handle
|
||||
* @tid: tid
|
||||
* @buffersize: BA window size
|
||||
*
|
||||
* Return: success/failure of tid update
|
||||
*/
|
||||
QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc,
|
||||
uint8_t *peer_mac, uint16_t vdev_id,
|
||||
uint8_t tid, uint16_t buffersize);
|
||||
|
||||
/**
|
||||
* dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
|
||||
* @cdp_soc: Datapath soc handle
|
||||
* @peer_mac: Datapath peer mac address
|
||||
* @vdev_id: id of atapath vdev
|
||||
* @dialogtoken: dialogtoken from ADDBA frame
|
||||
* @tid: TID number
|
||||
* @batimeout: BA timeout
|
||||
* @buffersize: BA window size
|
||||
* @startseqnum: Start seq. number received in BA sequence control
|
||||
*
|
||||
* Return: 0 on success, error code on failure
|
||||
*/
|
||||
int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
|
||||
uint8_t *peer_mac,
|
||||
uint16_t vdev_id,
|
||||
uint8_t dialogtoken,
|
||||
uint16_t tid, uint16_t batimeout,
|
||||
uint16_t buffersize,
|
||||
uint16_t startseqnum);
|
||||
|
||||
/**
|
||||
* dp_set_addba_response() - Set a user defined ADDBA response status code
|
||||
* @cdp_soc: Datapath soc handle
|
||||
* @peer_mac: Datapath peer mac address
|
||||
* @vdev_id: id of atapath vdev
|
||||
* @tid: TID number
|
||||
* @statuscode: response status code to be set
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
|
||||
uint16_t vdev_id, uint8_t tid, uint16_t statuscode);
|
||||
|
||||
/**
|
||||
* dp_delba_process_wifi3() - Process DELBA from peer
|
||||
* @cdp_soc: Datapath soc handle
|
||||
* @peer_mac: Datapath peer mac address
|
||||
* @vdev_id: id of atapath vdev
|
||||
* @tid: TID number
|
||||
* @reasoncode: Reason code received in DELBA frame
|
||||
*
|
||||
* Return: 0 on success, error code on failure
|
||||
*/
|
||||
int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
|
||||
uint16_t vdev_id, int tid, uint16_t reasoncode);
|
||||
|
||||
/**
|
||||
* dp_delba_tx_completion_wifi3() - Handle delba tx completion
|
||||
* @cdp_soc: soc handle
|
||||
* @peer_mac: peer mac address
|
||||
* @vdev_id: id of the vdev handle
|
||||
* @tid: Tid number
|
||||
* @status: Tx completion status
|
||||
*
|
||||
* Indicate status of delba Tx to DP for stats update and retry
|
||||
* delba if tx failed.
|
||||
*
|
||||
* Return: 0 on success, error code on failure
|
||||
*/
|
||||
int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
|
||||
uint16_t vdev_id,
|
||||
uint8_t tid, int status);
|
||||
|
||||
/**
|
||||
* dp_set_pn_check_wifi3() - enable PN check in REO for security
|
||||
* @soc: Datapath soc handle
|
||||
* @vdev_id: id of atapath vdev
|
||||
* @peer_mac: Datapath peer mac address
|
||||
* @sec_type: security type
|
||||
* @rx_pn: Receive pn starting number
|
||||
*
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
|
||||
uint8_t *peer_mac, enum cdp_sec_type sec_type,
|
||||
uint32_t *rx_pn);
|
||||
QDF_STATUS
|
||||
dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
|
||||
uint8_t tid, uint16_t win_sz);
|
||||
|
||||
/**
|
||||
* dp_peer_rxtid_stats() - Retried Rx TID (REO queue) stats from HW
|
||||
* @peer: DP peer handle
|
||||
* @dp_stats_cmd_cb: REO command callback function
|
||||
* @cb_ctxt: Callback context
|
||||
*
|
||||
* Return: count of tid stats cmd send succeeded
|
||||
*/
|
||||
int dp_peer_rxtid_stats(struct dp_peer *peer,
|
||||
dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
|
||||
void *cb_ctxt);
|
||||
QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer);
|
||||
void dp_peer_rx_tids_destroy(struct dp_peer *peer);
|
||||
|
||||
#ifdef DUMP_REO_QUEUE_INFO_IN_DDR
|
||||
/**
|
||||
* dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
|
||||
* @soc : dp_soc handle
|
||||
* @peer: peer
|
||||
*
|
||||
* This function is used to send cache flush cmd to reo and
|
||||
* to register the callback to handle the dumping of the reo
|
||||
* queue stas from DDR
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_send_cache_flush_for_rx_tid(struct dp_soc *soc, struct dp_peer *peer);
|
||||
|
||||
/**
|
||||
* dp_get_rx_reo_queue_info() - Handler to get rx tid info
|
||||
* @soc_hdl : cdp_soc_t handle
|
||||
* @vdev_id: vdev id
|
||||
*
|
||||
* Handler to get rx tid info from DDR after h/w cache is
|
||||
* invalidated first using the cache flush cmd.
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_get_rx_reo_queue_info(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
|
||||
|
||||
/**
|
||||
* dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
|
||||
* @soc : dp_soc handle
|
||||
* @cb_ctxt: callback context
|
||||
* @reo_status: vdev id
|
||||
*
|
||||
* This is the callback function registered after sending the reo cmd
|
||||
* to flush the h/w cache and invalidate it. In the callback the reo
|
||||
* queue desc info is dumped from DDR.
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_dump_rx_reo_queue_info(struct dp_soc *soc, void *cb_ctxt,
|
||||
union hal_reo_status *reo_status);
|
||||
|
||||
#else /* DUMP_REO_QUEUE_INFO_IN_DDR */
|
||||
|
||||
static inline void dp_get_rx_reo_queue_info(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t vdev_id)
|
||||
{
|
||||
}
|
||||
#endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
|
||||
void dp_peer_rx_tid_setup(struct dp_peer *peer);
|
||||
#else
|
||||
static inline void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
|
||||
union hal_reo_status *reo_status) {}
|
||||
static inline void dp_peer_rx_cleanup(struct dp_vdev *vdev,
|
||||
struct dp_peer *peer) {}
|
||||
static inline int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
|
||||
uint8_t *peer_mac,
|
||||
uint16_t vdev_id,
|
||||
uint8_t tid, int status)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
|
||||
uint16_t vdev_id, uint8_t tid,
|
||||
uint8_t *dialogtoken, uint16_t *statuscode,
|
||||
uint16_t *buffersize, uint16_t *batimeout)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc,
|
||||
uint8_t *peer_mac, uint16_t vdev_id,
|
||||
uint8_t tid, uint16_t buffersize)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
|
||||
uint16_t vdev_id, uint8_t dialogtoken,
|
||||
uint16_t tid, uint16_t batimeout,
|
||||
uint16_t buffersize, uint16_t startseqnum)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
|
||||
uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
|
||||
uint16_t vdev_id, int tid, uint16_t reasoncode)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
|
||||
uint8_t *peer_mac,
|
||||
uint16_t vdev_id,
|
||||
uint8_t tid, int status)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
|
||||
uint8_t *peer_mac, enum cdp_sec_type sec_type,
|
||||
uint32_t *rx_pn)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS
|
||||
dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
|
||||
uint8_t tid, uint16_t win_sz)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dp_peer_rxtid_stats(struct dp_peer *peer,
|
||||
dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
|
||||
void *cb_ctxt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline void dp_peer_rx_tids_destroy(struct dp_peer *peer) {}
|
||||
|
||||
static inline void dp_get_rx_reo_queue_info(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t vdev_id) {}
|
||||
static inline void dp_peer_rx_tid_setup(struct dp_peer *peer) {}
|
||||
|
||||
static inline QDF_STATUS
|
||||
dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
|
||||
uint32_t ba_window_size, uint32_t start_seq)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
#endif /* _DP_RX_TID_H_ */
|
10509
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_stats.c
Normal file
10509
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_stats.c
Normal file
File diff suppressed because it is too large
Load Diff
8074
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c
Normal file
8074
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c
Normal file
File diff suppressed because it is too large
Load Diff
2213
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h
Normal file
2213
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h
Normal file
File diff suppressed because it is too large
Load Diff
871
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c
Normal file
871
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c
Normal file
@ -0,0 +1,871 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "hal_hw_headers.h"
|
||||
#include "dp_types.h"
|
||||
#include "dp_tx_desc.h"
|
||||
|
||||
#ifndef DESC_PARTITION
|
||||
#define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
|
||||
#define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
|
||||
do { \
|
||||
uint8_t sig_bit; \
|
||||
soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
|
||||
/* Calculate page divider to find page number */ \
|
||||
sig_bit = 0; \
|
||||
while (num_desc_per_page) { \
|
||||
sig_bit++; \
|
||||
num_desc_per_page = num_desc_per_page >> 1; \
|
||||
} \
|
||||
soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
|
||||
} while (0)
|
||||
#else
|
||||
#define DP_TX_DESC_SIZE(a) a
|
||||
#define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
|
||||
#endif /* DESC_PARTITION */
|
||||
|
||||
/**
|
||||
* dp_tx_desc_pool_counter_initialize() - Initialize counters
|
||||
* @tx_desc_pool: Handle to DP tx_desc_pool structure
|
||||
* @num_elem: Number of descriptor elements per pool
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
|
||||
static void
|
||||
dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
|
||||
uint16_t num_elem)
|
||||
{
|
||||
}
|
||||
#else
|
||||
static void
|
||||
dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
|
||||
uint16_t num_elem)
|
||||
{
|
||||
tx_desc_pool->elem_count = num_elem;
|
||||
tx_desc_pool->num_free = num_elem;
|
||||
tx_desc_pool->num_allocated = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||
/**
|
||||
* dp_tx_desc_clean_up() - Clean up the tx descriptors
|
||||
* @ctxt: context passed
|
||||
* @elem: element to be cleaned up
|
||||
* @elem_list: element list
|
||||
*
|
||||
*/
|
||||
static void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
|
||||
{
|
||||
struct dp_soc *soc = (struct dp_soc *)ctxt;
|
||||
struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
|
||||
qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
|
||||
qdf_nbuf_t nbuf = NULL;
|
||||
|
||||
if (tx_desc->nbuf) {
|
||||
nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
|
||||
dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
|
||||
|
||||
if (nbuf) {
|
||||
if (!nbuf_list) {
|
||||
dp_err("potential memory leak");
|
||||
qdf_assert_always(0);
|
||||
}
|
||||
|
||||
nbuf->next = *nbuf_list;
|
||||
*nbuf_list = nbuf;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list,
|
||||
bool cleanup)
|
||||
{
|
||||
int i;
|
||||
struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
|
||||
uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
|
||||
|
||||
if (!cleanup)
|
||||
return;
|
||||
|
||||
for (i = 0; i < num_pool; i++) {
|
||||
tx_desc_pool = dp_get_tx_desc_pool(soc, i);
|
||||
|
||||
TX_DESC_LOCK_LOCK(&tx_desc_pool->lock);
|
||||
if (tx_desc_pool)
|
||||
qdf_tx_desc_pool_free_bufs(soc,
|
||||
&tx_desc_pool->desc_pages,
|
||||
tx_desc_pool->elem_size,
|
||||
tx_desc_pool->elem_count,
|
||||
true, &dp_tx_desc_clean_up,
|
||||
nbuf_list);
|
||||
|
||||
TX_DESC_LOCK_UNLOCK(&tx_desc_pool->lock);
|
||||
|
||||
tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, i);
|
||||
TX_DESC_LOCK_LOCK(&tx_desc_pool->lock);
|
||||
|
||||
if (tx_desc_pool)
|
||||
qdf_tx_desc_pool_free_bufs(soc,
|
||||
&tx_desc_pool->desc_pages,
|
||||
tx_desc_pool->elem_size,
|
||||
tx_desc_pool->elem_count,
|
||||
true, &dp_tx_desc_clean_up,
|
||||
nbuf_list);
|
||||
|
||||
TX_DESC_LOCK_UNLOCK(&tx_desc_pool->lock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef QCA_SUPPORT_DP_GLOBAL_CTX
|
||||
static void dp_tx_desc_pool_alloc_mem(struct dp_soc *soc, int8_t pool_id,
|
||||
bool spcl_tx_desc)
|
||||
{
|
||||
struct dp_global_context *dp_global = NULL;
|
||||
|
||||
dp_global = wlan_objmgr_get_global_ctx();
|
||||
|
||||
if (spcl_tx_desc) {
|
||||
dp_global->spcl_tx_desc[soc->arch_id][pool_id] =
|
||||
qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
|
||||
} else {
|
||||
dp_global->tx_desc[soc->arch_id][pool_id] =
|
||||
qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
|
||||
}
|
||||
}
|
||||
|
||||
static void dp_tx_desc_pool_free_mem(struct dp_soc *soc, int8_t pool_id,
|
||||
bool spcl_tx_desc)
|
||||
{
|
||||
struct dp_global_context *dp_global = NULL;
|
||||
|
||||
dp_global = wlan_objmgr_get_global_ctx();
|
||||
if (spcl_tx_desc) {
|
||||
if (!dp_global->spcl_tx_desc[soc->arch_id][pool_id])
|
||||
return;
|
||||
|
||||
qdf_mem_free(dp_global->spcl_tx_desc[soc->arch_id][pool_id]);
|
||||
dp_global->spcl_tx_desc[soc->arch_id][pool_id] = NULL;
|
||||
} else {
|
||||
if (!dp_global->tx_desc[soc->arch_id][pool_id])
|
||||
return;
|
||||
|
||||
qdf_mem_free(dp_global->tx_desc[soc->arch_id][pool_id]);
|
||||
dp_global->tx_desc[soc->arch_id][pool_id] = NULL;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void dp_tx_desc_pool_alloc_mem(struct dp_soc *soc, int8_t pool_id,
|
||||
bool spcl_tx_desc)
|
||||
{
|
||||
}
|
||||
|
||||
static void dp_tx_desc_pool_free_mem(struct dp_soc *soc, int8_t pool_id,
|
||||
bool spcl_tx_desc)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
|
||||
uint32_t num_elem, bool spcl_tx_desc)
|
||||
{
|
||||
uint32_t desc_size, num_elem_t;
|
||||
struct dp_tx_desc_pool_s *tx_desc_pool;
|
||||
QDF_STATUS status;
|
||||
enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
|
||||
|
||||
desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
|
||||
|
||||
dp_tx_desc_pool_alloc_mem(soc, pool_id, spcl_tx_desc);
|
||||
if (spcl_tx_desc) {
|
||||
tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
|
||||
desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
|
||||
num_elem_t = num_elem;
|
||||
} else {
|
||||
tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
|
||||
desc_type = QDF_DP_TX_DESC_TYPE;
|
||||
num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
|
||||
}
|
||||
|
||||
tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
|
||||
dp_desc_multi_pages_mem_alloc(soc, desc_type,
|
||||
&tx_desc_pool->desc_pages,
|
||||
desc_size, num_elem_t,
|
||||
0, true);
|
||||
|
||||
if (!tx_desc_pool->desc_pages.num_pages) {
|
||||
dp_err("Multi page alloc fail, tx desc");
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
/* Arch specific TX descriptor allocation */
|
||||
status = soc->arch_ops.dp_tx_desc_pool_alloc(soc, num_elem_t, pool_id);
|
||||
if (QDF_IS_STATUS_ERROR(status)) {
|
||||
dp_err("failed to allocate arch specific descriptors");
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id,
|
||||
bool spcl_tx_desc)
|
||||
{
|
||||
struct dp_tx_desc_pool_s *tx_desc_pool;
|
||||
enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
|
||||
|
||||
if (spcl_tx_desc) {
|
||||
tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
|
||||
desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
|
||||
} else {
|
||||
tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
|
||||
desc_type = QDF_DP_TX_DESC_TYPE;
|
||||
}
|
||||
|
||||
if (tx_desc_pool->desc_pages.num_pages)
|
||||
dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_DESC_TYPE,
|
||||
&tx_desc_pool->desc_pages, 0,
|
||||
true);
|
||||
|
||||
/* Free arch specific TX descriptor */
|
||||
soc->arch_ops.dp_tx_desc_pool_free(soc, pool_id);
|
||||
dp_tx_desc_pool_free_mem(soc, pool_id, spcl_tx_desc);
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
|
||||
uint32_t num_elem, bool spcl_tx_desc)
|
||||
{
|
||||
struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
|
||||
uint32_t desc_size, num_elem_t;
|
||||
|
||||
desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
|
||||
|
||||
if (spcl_tx_desc) {
|
||||
tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
|
||||
num_elem_t = num_elem;
|
||||
} else {
|
||||
tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
|
||||
num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
|
||||
}
|
||||
if (qdf_mem_multi_page_link(soc->osdev,
|
||||
&tx_desc_pool->desc_pages,
|
||||
desc_size, num_elem_t, true)) {
|
||||
dp_err("invalid tx desc allocation -overflow num link");
|
||||
return QDF_STATUS_E_FAULT;
|
||||
}
|
||||
|
||||
tx_desc_pool->freelist = (struct dp_tx_desc_s *)
|
||||
*tx_desc_pool->desc_pages.cacheable_pages;
|
||||
/* Set unique IDs for each Tx descriptor */
|
||||
if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
|
||||
soc, num_elem_t,
|
||||
pool_id, spcl_tx_desc)) {
|
||||
dp_err("initialization per target failed");
|
||||
return QDF_STATUS_E_FAULT;
|
||||
}
|
||||
|
||||
tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
|
||||
|
||||
dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem_t);
|
||||
TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id,
|
||||
bool spcl_tx_desc)
|
||||
{
|
||||
struct dp_tx_desc_pool_s *tx_desc_pool;
|
||||
|
||||
if (spcl_tx_desc)
|
||||
tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
|
||||
else
|
||||
tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
|
||||
soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool,
|
||||
pool_id, spcl_tx_desc);
|
||||
TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
|
||||
TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
|
||||
}
|
||||
|
||||
QDF_STATUS
|
||||
dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
|
||||
uint8_t pool_id)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
qdf_dma_context_t memctx = 0;
|
||||
uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
|
||||
struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
|
||||
uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
|
||||
|
||||
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
|
||||
memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
|
||||
|
||||
/* Coherent tx extension descriptor alloc */
|
||||
dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_TYPE,
|
||||
&dp_tx_ext_desc_pool->desc_pages,
|
||||
elem_size, num_elem, memctx, false);
|
||||
|
||||
if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"ext desc page alloc fail");
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cacheable ext descriptor link alloc
|
||||
* This structure also large size already
|
||||
* single element is 24bytes, 2K elements are 48Kbytes
|
||||
* Have to alloc multi page cacheable memory
|
||||
*/
|
||||
dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
|
||||
&dp_tx_ext_desc_pool->desc_link_pages,
|
||||
link_elem_size, num_elem, 0, true);
|
||||
|
||||
if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"ext link desc page alloc fail");
|
||||
status = QDF_STATUS_E_NOMEM;
|
||||
goto free_ext_desc;
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
free_ext_desc:
|
||||
dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
|
||||
&dp_tx_ext_desc_pool->desc_pages,
|
||||
memctx, false);
|
||||
return status;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
|
||||
uint32_t num_elem)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
uint8_t pool_id, count;
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++) {
|
||||
status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
|
||||
if (QDF_IS_STATUS_ERROR(status)) {
|
||||
dp_err("failed to allocate tx ext desc pool %d", pool_id);
|
||||
goto free_ext_desc_pool;
|
||||
}
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
free_ext_desc_pool:
|
||||
for (count = 0; count < pool_id; count++)
|
||||
dp_tx_ext_desc_pool_free_by_id(soc, count);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
|
||||
uint8_t pool_id)
|
||||
{
|
||||
uint32_t i;
|
||||
struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
|
||||
struct qdf_mem_dma_page_t *page_info;
|
||||
struct qdf_mem_multi_page_t *pages;
|
||||
struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
|
||||
QDF_STATUS status;
|
||||
|
||||
/* link tx descriptors into a freelist */
|
||||
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
|
||||
soc->tx_ext_desc[pool_id].elem_size =
|
||||
HAL_TX_EXT_DESC_WITH_META_DATA;
|
||||
soc->tx_ext_desc[pool_id].link_elem_size =
|
||||
sizeof(struct dp_tx_ext_desc_elem_s);
|
||||
soc->tx_ext_desc[pool_id].elem_count = num_elem;
|
||||
|
||||
dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
|
||||
*dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
|
||||
|
||||
if (qdf_mem_multi_page_link(soc->osdev,
|
||||
&dp_tx_ext_desc_pool->desc_link_pages,
|
||||
dp_tx_ext_desc_pool->link_elem_size,
|
||||
dp_tx_ext_desc_pool->elem_count,
|
||||
true)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"ext link desc page linking fail");
|
||||
status = QDF_STATUS_E_FAULT;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Assign coherent memory pointer into linked free list */
|
||||
pages = &dp_tx_ext_desc_pool->desc_pages;
|
||||
page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
|
||||
c_elem = dp_tx_ext_desc_pool->freelist;
|
||||
p_elem = c_elem;
|
||||
for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
|
||||
if (!(i % pages->num_element_per_page)) {
|
||||
/**
|
||||
* First element for new page,
|
||||
* should point next page
|
||||
*/
|
||||
if (!pages->dma_pages->page_v_addr_start) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP,
|
||||
QDF_TRACE_LEVEL_ERROR,
|
||||
"link over flow");
|
||||
status = QDF_STATUS_E_FAULT;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
c_elem->vaddr =
|
||||
(void *)page_info->page_v_addr_start;
|
||||
c_elem->paddr = page_info->page_p_addr;
|
||||
page_info++;
|
||||
} else {
|
||||
c_elem->vaddr = (void *)(p_elem->vaddr +
|
||||
dp_tx_ext_desc_pool->elem_size);
|
||||
c_elem->paddr = (p_elem->paddr +
|
||||
dp_tx_ext_desc_pool->elem_size);
|
||||
}
|
||||
p_elem = c_elem;
|
||||
c_elem = c_elem->next;
|
||||
if (!c_elem)
|
||||
break;
|
||||
}
|
||||
dp_tx_ext_desc_pool->num_free = num_elem;
|
||||
qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
fail:
|
||||
return status;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
|
||||
uint32_t num_elem)
|
||||
{
|
||||
uint8_t pool_id;
|
||||
QDF_STATUS status;
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++) {
|
||||
status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
|
||||
if (QDF_IS_STATUS_ERROR(status)) {
|
||||
dp_err("failed to init ext desc pool %d", pool_id);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
fail:
|
||||
return status;
|
||||
}
|
||||
|
||||
void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
|
||||
{
|
||||
struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
|
||||
qdf_dma_context_t memctx = 0;
|
||||
|
||||
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
|
||||
memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
|
||||
|
||||
dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
|
||||
&dp_tx_ext_desc_pool->desc_link_pages,
|
||||
0, true);
|
||||
|
||||
dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
|
||||
&dp_tx_ext_desc_pool->desc_pages,
|
||||
memctx, false);
|
||||
}
|
||||
|
||||
void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
|
||||
{
|
||||
uint8_t pool_id;
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++)
|
||||
dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
|
||||
}
|
||||
|
||||
void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
|
||||
{
|
||||
struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
|
||||
|
||||
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
|
||||
qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
|
||||
}
|
||||
|
||||
void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
|
||||
{
|
||||
uint8_t pool_id;
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++)
|
||||
dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
|
||||
}
|
||||
|
||||
#if defined(FEATURE_TSO)
|
||||
QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
|
||||
uint8_t pool_id)
|
||||
{
|
||||
struct dp_tx_tso_seg_pool_s *tso_desc_pool;
|
||||
uint32_t desc_size;
|
||||
|
||||
desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
|
||||
|
||||
tso_desc_pool = &soc->tx_tso_desc[pool_id];
|
||||
tso_desc_pool->num_free = 0;
|
||||
dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_DESC_TYPE,
|
||||
&tso_desc_pool->desc_pages,
|
||||
desc_size, num_elem, 0, true);
|
||||
if (!tso_desc_pool->desc_pages.num_pages) {
|
||||
dp_err("Multi page alloc fail, tx desc");
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
|
||||
uint32_t num_elem)
|
||||
{
|
||||
uint32_t pool_id, i;
|
||||
QDF_STATUS status;
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++) {
|
||||
status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem,
|
||||
pool_id);
|
||||
if (QDF_IS_STATUS_ERROR(status)) {
|
||||
dp_err("failed to allocate TSO desc pool %d", pool_id);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
fail:
|
||||
for (i = 0; i < pool_id; i++)
|
||||
dp_tx_tso_desc_pool_free_by_id(soc, i);
|
||||
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
|
||||
{
|
||||
struct dp_tx_tso_seg_pool_s *tso_desc_pool;
|
||||
|
||||
tso_desc_pool = &soc->tx_tso_desc[pool_id];
|
||||
dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_DESC_TYPE,
|
||||
&tso_desc_pool->desc_pages,
|
||||
0, true);
|
||||
}
|
||||
|
||||
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
|
||||
{
|
||||
uint32_t pool_id;
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++)
|
||||
dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
|
||||
uint8_t pool_id)
|
||||
{
|
||||
struct dp_tx_tso_seg_pool_s *tso_desc_pool;
|
||||
uint32_t desc_size;
|
||||
|
||||
desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
|
||||
|
||||
tso_desc_pool = &soc->tx_tso_desc[pool_id];
|
||||
|
||||
if (qdf_mem_multi_page_link(soc->osdev,
|
||||
&tso_desc_pool->desc_pages,
|
||||
desc_size,
|
||||
num_elem, true)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"invalid tso desc allocation - overflow num link");
|
||||
return QDF_STATUS_E_FAULT;
|
||||
}
|
||||
|
||||
tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
|
||||
*tso_desc_pool->desc_pages.cacheable_pages;
|
||||
tso_desc_pool->num_free = num_elem;
|
||||
|
||||
TSO_DEBUG("Number of free descriptors: %u\n",
|
||||
tso_desc_pool->num_free);
|
||||
tso_desc_pool->pool_size = num_elem;
|
||||
qdf_spinlock_create(&tso_desc_pool->lock);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
|
||||
uint32_t num_elem)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
uint32_t pool_id;
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++) {
|
||||
status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem,
|
||||
pool_id);
|
||||
if (QDF_IS_STATUS_ERROR(status)) {
|
||||
dp_err("failed to initialise TSO desc pool %d", pool_id);
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
|
||||
{
|
||||
struct dp_tx_tso_seg_pool_s *tso_desc_pool;
|
||||
|
||||
tso_desc_pool = &soc->tx_tso_desc[pool_id];
|
||||
|
||||
if (tso_desc_pool->pool_size) {
|
||||
qdf_spin_lock_bh(&tso_desc_pool->lock);
|
||||
tso_desc_pool->freelist = NULL;
|
||||
tso_desc_pool->num_free = 0;
|
||||
tso_desc_pool->pool_size = 0;
|
||||
qdf_spin_unlock_bh(&tso_desc_pool->lock);
|
||||
qdf_spinlock_destroy(&tso_desc_pool->lock);
|
||||
}
|
||||
}
|
||||
|
||||
void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
|
||||
{
|
||||
uint32_t pool_id;
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++)
|
||||
dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
|
||||
uint32_t num_elem,
|
||||
uint8_t pool_id)
|
||||
{
|
||||
struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
|
||||
uint32_t desc_size;
|
||||
|
||||
desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
|
||||
|
||||
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
|
||||
tso_num_seg_pool->num_free = 0;
|
||||
dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
|
||||
&tso_num_seg_pool->desc_pages,
|
||||
desc_size,
|
||||
num_elem, 0, true);
|
||||
|
||||
if (!tso_num_seg_pool->desc_pages.num_pages) {
|
||||
dp_err("Multi page alloc fail, tso_num_seg_pool");
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
|
||||
uint32_t num_elem)
|
||||
{
|
||||
uint32_t pool_id, i;
|
||||
QDF_STATUS status;
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++) {
|
||||
status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem,
|
||||
pool_id);
|
||||
if (QDF_IS_STATUS_ERROR(status)) {
|
||||
dp_err("failed to allocate TSO num seg pool %d", pool_id);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
fail:
|
||||
for (i = 0; i < pool_id; i++)
|
||||
dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
|
||||
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
|
||||
{
|
||||
struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
|
||||
|
||||
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
|
||||
dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
|
||||
&tso_num_seg_pool->desc_pages,
|
||||
0, true);
|
||||
}
|
||||
|
||||
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
|
||||
{
|
||||
uint32_t pool_id;
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++)
|
||||
dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
|
||||
}
|
||||
|
||||
QDF_STATUS
|
||||
dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
|
||||
uint8_t pool_id)
|
||||
{
|
||||
struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
|
||||
uint32_t desc_size;
|
||||
|
||||
desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
|
||||
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
|
||||
|
||||
if (qdf_mem_multi_page_link(soc->osdev,
|
||||
&tso_num_seg_pool->desc_pages,
|
||||
desc_size,
|
||||
num_elem, true)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"invalid tso desc allocation - overflow num link");
|
||||
return QDF_STATUS_E_FAULT;
|
||||
}
|
||||
|
||||
tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
|
||||
*tso_num_seg_pool->desc_pages.cacheable_pages;
|
||||
tso_num_seg_pool->num_free = num_elem;
|
||||
tso_num_seg_pool->num_seg_pool_size = num_elem;
|
||||
|
||||
qdf_spinlock_create(&tso_num_seg_pool->lock);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
|
||||
uint32_t num_elem)
|
||||
{
|
||||
uint32_t pool_id;
|
||||
QDF_STATUS status;
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++) {
|
||||
status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem,
|
||||
pool_id);
|
||||
if (QDF_IS_STATUS_ERROR(status)) {
|
||||
dp_err("failed to initialise TSO num seg pool %d", pool_id);
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
|
||||
{
|
||||
struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
|
||||
|
||||
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
|
||||
|
||||
if (tso_num_seg_pool->num_seg_pool_size) {
|
||||
qdf_spin_lock_bh(&tso_num_seg_pool->lock);
|
||||
tso_num_seg_pool->freelist = NULL;
|
||||
tso_num_seg_pool->num_free = 0;
|
||||
tso_num_seg_pool->num_seg_pool_size = 0;
|
||||
qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
|
||||
qdf_spinlock_destroy(&tso_num_seg_pool->lock);
|
||||
}
|
||||
}
|
||||
|
||||
void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
|
||||
{
|
||||
uint32_t pool_id;
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++)
|
||||
dp_tx_tso_num_seg_pool_deinit_by_id(soc, pool_id);
|
||||
}
|
||||
#else
|
||||
QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
|
||||
uint8_t pool_id)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
|
||||
uint32_t num_elem)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
|
||||
uint8_t pool_id)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
|
||||
uint32_t num_elem)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
|
||||
{
|
||||
}
|
||||
|
||||
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
|
||||
{
|
||||
}
|
||||
|
||||
void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
|
||||
{
|
||||
}
|
||||
|
||||
void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
|
||||
{
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
|
||||
uint32_t num_elem,
|
||||
uint8_t pool_id)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
|
||||
uint32_t num_elem)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
|
||||
{
|
||||
}
|
||||
|
||||
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
|
||||
{
|
||||
}
|
||||
|
||||
QDF_STATUS
|
||||
dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
|
||||
uint8_t pool_id)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
|
||||
uint32_t num_elem)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
|
||||
{
|
||||
}
|
||||
|
||||
void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
|
||||
{
|
||||
}
|
||||
#endif
|
1552
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h
Normal file
1552
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,757 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <cds_api.h>
|
||||
|
||||
/* OS abstraction libraries */
|
||||
#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
|
||||
#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
|
||||
#include <qdf_util.h> /* qdf_unlikely */
|
||||
#include "dp_types.h"
|
||||
#include "dp_tx_desc.h"
|
||||
#include "dp_peer.h"
|
||||
|
||||
#include <cdp_txrx_handle.h>
|
||||
#include "dp_internal.h"
|
||||
#define INVALID_FLOW_ID 0xFF
|
||||
#define MAX_INVALID_BIN 3
|
||||
#define GLOBAL_FLOW_POOL_STATS_LEN 25
|
||||
#define FLOW_POOL_LOG_LEN 50
|
||||
|
||||
#ifdef QCA_AC_BASED_FLOW_CONTROL
|
||||
/**
|
||||
* dp_tx_initialize_threshold() - Threshold of flow Pool initialization
|
||||
* @pool: flow_pool
|
||||
* @stop_threshold: stop threshold of certain AC
|
||||
* @start_threshold: start threshold of certain AC
|
||||
* @flow_pool_size: flow pool size
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
static inline void
|
||||
dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
|
||||
uint32_t start_threshold,
|
||||
uint32_t stop_threshold,
|
||||
uint16_t flow_pool_size)
|
||||
{
|
||||
/* BE_BK threshold is same as previous threahold */
|
||||
pool->start_th[DP_TH_BE_BK] = (start_threshold
|
||||
* flow_pool_size) / 100;
|
||||
pool->stop_th[DP_TH_BE_BK] = (stop_threshold
|
||||
* flow_pool_size) / 100;
|
||||
|
||||
/* Update VI threshold based on BE_BK threshold */
|
||||
pool->start_th[DP_TH_VI] = (pool->start_th[DP_TH_BE_BK]
|
||||
* FL_TH_VI_PERCENTAGE) / 100;
|
||||
pool->stop_th[DP_TH_VI] = (pool->stop_th[DP_TH_BE_BK]
|
||||
* FL_TH_VI_PERCENTAGE) / 100;
|
||||
|
||||
/* Update VO threshold based on BE_BK threshold */
|
||||
pool->start_th[DP_TH_VO] = (pool->start_th[DP_TH_BE_BK]
|
||||
* FL_TH_VO_PERCENTAGE) / 100;
|
||||
pool->stop_th[DP_TH_VO] = (pool->stop_th[DP_TH_BE_BK]
|
||||
* FL_TH_VO_PERCENTAGE) / 100;
|
||||
|
||||
/* Update High Priority threshold based on BE_BK threshold */
|
||||
pool->start_th[DP_TH_HI] = (pool->start_th[DP_TH_BE_BK]
|
||||
* FL_TH_HI_PERCENTAGE) / 100;
|
||||
pool->stop_th[DP_TH_HI] = (pool->stop_th[DP_TH_BE_BK]
|
||||
* FL_TH_HI_PERCENTAGE) / 100;
|
||||
|
||||
dp_debug("tx flow control threshold is set, pool size is %d",
|
||||
flow_pool_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_flow_pool_reattach() - Reattach flow_pool
|
||||
* @pool: flow_pool
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
static inline void
|
||||
dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
|
||||
{
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: flow pool already allocated, attached %d times",
|
||||
__func__, pool->pool_create_cnt);
|
||||
|
||||
pool->status = FLOW_POOL_ACTIVE_UNPAUSED_REATTACH;
|
||||
pool->pool_create_cnt++;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_flow_pool_dump_threshold() - Dump threshold of the flow_pool
|
||||
* @pool: flow_pool
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
static inline void
|
||||
dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < FL_TH_MAX; i++) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Level %d :: Start threshold %d :: Stop threshold %d",
|
||||
i, pool->start_th[i], pool->stop_th[i]);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Level %d :: Maximum pause time %lu ms",
|
||||
i, pool->max_pause_time[i]);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Level %d :: Latest pause timestamp %lu",
|
||||
i, pool->latest_pause_time[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_flow_ctrl_reset_subqueues() - Reset subqueues to original state
|
||||
* @soc: dp soc
|
||||
* @pool: flow pool
|
||||
* @pool_status: flow pool status
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
static inline void
|
||||
dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
|
||||
struct dp_tx_desc_pool_s *pool,
|
||||
enum flow_pool_status pool_status)
|
||||
{
|
||||
switch (pool_status) {
|
||||
case FLOW_POOL_ACTIVE_PAUSED:
|
||||
soc->pause_cb(pool->flow_pool_id,
|
||||
WLAN_NETIF_PRIORITY_QUEUE_ON,
|
||||
WLAN_DATA_FLOW_CTRL_PRI);
|
||||
fallthrough;
|
||||
|
||||
case FLOW_POOL_VO_PAUSED:
|
||||
soc->pause_cb(pool->flow_pool_id,
|
||||
WLAN_NETIF_VO_QUEUE_ON,
|
||||
WLAN_DATA_FLOW_CTRL_VO);
|
||||
fallthrough;
|
||||
|
||||
case FLOW_POOL_VI_PAUSED:
|
||||
soc->pause_cb(pool->flow_pool_id,
|
||||
WLAN_NETIF_VI_QUEUE_ON,
|
||||
WLAN_DATA_FLOW_CTRL_VI);
|
||||
fallthrough;
|
||||
|
||||
case FLOW_POOL_BE_BK_PAUSED:
|
||||
soc->pause_cb(pool->flow_pool_id,
|
||||
WLAN_NETIF_BE_BK_QUEUE_ON,
|
||||
WLAN_DATA_FLOW_CTRL_BE_BK);
|
||||
fallthrough;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void
|
||||
dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
|
||||
uint32_t start_threshold,
|
||||
uint32_t stop_threshold,
|
||||
uint16_t flow_pool_size)
|
||||
|
||||
{
|
||||
/* INI is in percentage so divide by 100 */
|
||||
pool->start_th = (start_threshold * flow_pool_size) / 100;
|
||||
pool->stop_th = (stop_threshold * flow_pool_size) / 100;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
|
||||
{
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: flow pool already allocated, attached %d times",
|
||||
__func__, pool->pool_create_cnt);
|
||||
if (pool->avail_desc > pool->start_th)
|
||||
pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
|
||||
else
|
||||
pool->status = FLOW_POOL_ACTIVE_PAUSED;
|
||||
|
||||
pool->pool_create_cnt++;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
|
||||
{
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Start threshold %d :: Stop threshold %d",
|
||||
pool->start_th, pool->stop_th);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
|
||||
struct dp_tx_desc_pool_s *pool,
|
||||
enum flow_pool_status pool_status)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
|
||||
{
|
||||
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
|
||||
struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
|
||||
struct dp_tx_desc_pool_s *pool = NULL;
|
||||
struct dp_tx_desc_pool_s tmp_pool;
|
||||
int i;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"No of pool map received %d", pool_stats->pool_map_count);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"No of pool unmap received %d", pool_stats->pool_unmap_count);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Pkt dropped due to unavailablity of pool %d",
|
||||
pool_stats->pkt_drop_no_pool);
|
||||
|
||||
/*
|
||||
* Nested spin lock.
|
||||
* Always take in below order.
|
||||
* flow_pool_array_lock -> flow_pool_lock
|
||||
*/
|
||||
qdf_spin_lock_bh(&soc->flow_pool_array_lock);
|
||||
for (i = 0; i < MAX_TXDESC_POOLS; i++) {
|
||||
pool = &soc->tx_desc[i];
|
||||
if (pool->status > FLOW_POOL_INVALID)
|
||||
continue;
|
||||
qdf_spin_lock_bh(&pool->flow_pool_lock);
|
||||
qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Flow_pool_id %d :: status %d",
|
||||
tmp_pool.flow_pool_id, tmp_pool.status);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Total %d :: Available %d",
|
||||
tmp_pool.pool_size, tmp_pool.avail_desc);
|
||||
dp_tx_flow_pool_dump_threshold(&tmp_pool);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Member flow_id %d :: flow_type %d",
|
||||
tmp_pool.flow_pool_id, tmp_pool.flow_type);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Pkt dropped due to unavailablity of descriptors %d",
|
||||
tmp_pool.pkt_drop_no_desc);
|
||||
qdf_spin_lock_bh(&soc->flow_pool_array_lock);
|
||||
}
|
||||
qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
|
||||
}
|
||||
|
||||
void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc)
|
||||
{
|
||||
struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
|
||||
struct dp_tx_desc_pool_s *pool = NULL;
|
||||
char *comb_log_str;
|
||||
uint32_t comb_log_str_size;
|
||||
int bytes_written = 0;
|
||||
int i;
|
||||
|
||||
comb_log_str_size = GLOBAL_FLOW_POOL_STATS_LEN +
|
||||
(FLOW_POOL_LOG_LEN * MAX_TXDESC_POOLS) + 1;
|
||||
comb_log_str = qdf_mem_malloc(comb_log_str_size);
|
||||
if (!comb_log_str)
|
||||
return;
|
||||
|
||||
bytes_written = qdf_snprintf(&comb_log_str[bytes_written],
|
||||
comb_log_str_size, "G:(%d,%d,%d) ",
|
||||
pool_stats->pool_map_count,
|
||||
pool_stats->pool_unmap_count,
|
||||
pool_stats->pkt_drop_no_pool);
|
||||
|
||||
for (i = 0; i < MAX_TXDESC_POOLS; i++) {
|
||||
pool = &soc->tx_desc[i];
|
||||
if (pool->status > FLOW_POOL_INVALID)
|
||||
continue;
|
||||
bytes_written += qdf_snprintf(&comb_log_str[bytes_written],
|
||||
(bytes_written >= comb_log_str_size) ? 0 :
|
||||
comb_log_str_size - bytes_written,
|
||||
"| %d %d: (%d,%d,%d)",
|
||||
pool->flow_pool_id, pool->status,
|
||||
pool->pool_size, pool->avail_desc,
|
||||
pool->pkt_drop_no_desc);
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
"FLOW_POOL_STATS %s", comb_log_str);
|
||||
|
||||
qdf_mem_free(comb_log_str);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_clear_flow_pool_stats() - clear flow pool statistics
|
||||
*
|
||||
* @soc: Handle to struct dp_soc.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
|
||||
{
|
||||
|
||||
if (!soc) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: soc is null", __func__);
|
||||
return;
|
||||
}
|
||||
qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_create_flow_pool() - create flow pool
|
||||
* @soc: Handle to struct dp_soc
|
||||
* @flow_pool_id: flow pool id
|
||||
* @flow_pool_size: flow pool size
|
||||
*
|
||||
* Return: flow_pool pointer / NULL for error
|
||||
*/
|
||||
struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
|
||||
uint8_t flow_pool_id, uint32_t flow_pool_size)
|
||||
{
|
||||
struct dp_tx_desc_pool_s *pool;
|
||||
uint32_t stop_threshold;
|
||||
uint32_t start_threshold;
|
||||
|
||||
if (flow_pool_id >= MAX_TXDESC_POOLS) {
|
||||
dp_err("invalid flow_pool_id %d", flow_pool_id);
|
||||
return NULL;
|
||||
}
|
||||
pool = &soc->tx_desc[flow_pool_id];
|
||||
qdf_spin_lock_bh(&pool->flow_pool_lock);
|
||||
if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
|
||||
dp_tx_flow_pool_reattach(pool);
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
dp_err("cannot alloc desc, status=%d, create_cnt=%d",
|
||||
pool->status, pool->pool_create_cnt);
|
||||
return pool;
|
||||
}
|
||||
|
||||
if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size, false)) {
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
dp_err("dp_tx_desc_pool_alloc failed flow_pool_id: %d",
|
||||
flow_pool_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (dp_tx_desc_pool_init(soc, flow_pool_id, flow_pool_size, false)) {
|
||||
dp_tx_desc_pool_free(soc, flow_pool_id, false);
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
dp_err("dp_tx_desc_pool_init failed flow_pool_id: %d",
|
||||
flow_pool_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
|
||||
start_threshold = stop_threshold +
|
||||
wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
|
||||
|
||||
pool->flow_pool_id = flow_pool_id;
|
||||
pool->pool_size = flow_pool_size;
|
||||
pool->avail_desc = flow_pool_size;
|
||||
pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
|
||||
dp_tx_initialize_threshold(pool, start_threshold, stop_threshold,
|
||||
flow_pool_size);
|
||||
pool->pool_create_cnt++;
|
||||
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
|
||||
return pool;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_is_tx_flow_pool_delete_allowed() - Can flow pool be deleted
|
||||
* @soc: Handle to struct dp_soc
|
||||
* @vdev_id: vdev_id corresponding to flow pool
|
||||
*
|
||||
* Check if it is OK to go ahead delete the flow pool. One of the case is
|
||||
* MLO where it is not OK to delete the flow pool when link switch happens.
|
||||
*
|
||||
* Return: 0 for success or error
|
||||
*/
|
||||
static bool dp_is_tx_flow_pool_delete_allowed(struct dp_soc *soc,
|
||||
uint8_t vdev_id)
|
||||
{
|
||||
struct dp_peer *peer;
|
||||
struct dp_peer *tmp_peer;
|
||||
struct dp_vdev *vdev = NULL;
|
||||
bool is_allow = true;
|
||||
|
||||
vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MISC);
|
||||
|
||||
/* only check for sta mode */
|
||||
if (!vdev || vdev->opmode != wlan_op_mode_sta)
|
||||
goto comp_ret;
|
||||
|
||||
/*
|
||||
* Only if current vdev is belong to MLO connection and connected,
|
||||
* then it's not allowed to delete current pool, for legacy
|
||||
* connection, allowed always.
|
||||
*/
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
|
||||
peer_list_elem,
|
||||
tmp_peer) {
|
||||
if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
|
||||
QDF_STATUS_SUCCESS) {
|
||||
if (peer->valid && !peer->sta_self_peer)
|
||||
is_allow = false;
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
|
||||
}
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
|
||||
comp_ret:
|
||||
if (vdev)
|
||||
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MISC);
|
||||
|
||||
return is_allow;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_delete_flow_pool() - delete flow pool
|
||||
* @soc: Handle to struct dp_soc
|
||||
* @pool: flow pool pointer
|
||||
* @force: free pool forcefully
|
||||
*
|
||||
* Delete flow_pool if all tx descriptors are available.
|
||||
* Otherwise put it in FLOW_POOL_INVALID state.
|
||||
* If force is set then pull all available descriptors to
|
||||
* global pool.
|
||||
*
|
||||
* Return: 0 for success or error
|
||||
*/
|
||||
int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
|
||||
bool force)
|
||||
{
|
||||
struct dp_vdev *vdev;
|
||||
enum flow_pool_status pool_status;
|
||||
|
||||
if (!soc || !pool) {
|
||||
dp_err("pool or soc is NULL");
|
||||
QDF_ASSERT(0);
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
dp_info("pool_id %d create_cnt=%d, avail_desc=%d, size=%d, status=%d",
|
||||
pool->flow_pool_id, pool->pool_create_cnt, pool->avail_desc,
|
||||
pool->pool_size, pool->status);
|
||||
|
||||
if (!dp_is_tx_flow_pool_delete_allowed(soc, pool->flow_pool_id)) {
|
||||
dp_info("skip pool id %d delete as it's not allowed",
|
||||
pool->flow_pool_id);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
qdf_spin_lock_bh(&pool->flow_pool_lock);
|
||||
if (!pool->pool_create_cnt) {
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
dp_err("flow pool either not created or already deleted");
|
||||
return -ENOENT;
|
||||
}
|
||||
pool->pool_create_cnt--;
|
||||
if (pool->pool_create_cnt) {
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
dp_err("pool is still attached, pending detach %d",
|
||||
pool->pool_create_cnt);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (pool->avail_desc < pool->pool_size) {
|
||||
pool_status = pool->status;
|
||||
pool->status = FLOW_POOL_INVALID;
|
||||
dp_tx_flow_ctrl_reset_subqueues(soc, pool, pool_status);
|
||||
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
/* Reset TX desc associated to this Vdev as NULL */
|
||||
vdev = dp_vdev_get_ref_by_id(soc, pool->flow_pool_id,
|
||||
DP_MOD_ID_MISC);
|
||||
if (vdev) {
|
||||
dp_tx_desc_flush(vdev->pdev, vdev, false);
|
||||
dp_vdev_unref_delete(soc, vdev,
|
||||
DP_MOD_ID_MISC);
|
||||
}
|
||||
dp_err("avail desc less than pool size");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/* We have all the descriptors for the pool, we can delete the pool */
|
||||
dp_tx_desc_pool_deinit(soc, pool->flow_pool_id, false);
|
||||
dp_tx_desc_pool_free(soc, pool->flow_pool_id, false);
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
|
||||
* @pdev: Handle to struct dp_pdev
|
||||
* @pool: flow_pool
|
||||
* @vdev_id: flow_id /vdev_id
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
|
||||
struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
|
||||
{
|
||||
struct dp_vdev *vdev;
|
||||
struct dp_soc *soc = pdev->soc;
|
||||
|
||||
vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
|
||||
if (!vdev) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: invalid vdev_id %d",
|
||||
__func__, vdev_id);
|
||||
return;
|
||||
}
|
||||
|
||||
vdev->pool = pool;
|
||||
qdf_spin_lock_bh(&pool->flow_pool_lock);
|
||||
pool->pool_owner_ctx = soc;
|
||||
pool->flow_pool_id = vdev_id;
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
|
||||
* @pdev: Handle to struct dp_pdev
|
||||
* @pool: flow_pool
|
||||
* @vdev_id: flow_id /vdev_id
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
|
||||
struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
|
||||
{
|
||||
struct dp_vdev *vdev;
|
||||
struct dp_soc *soc = pdev->soc;
|
||||
|
||||
vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
|
||||
if (!vdev) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: invalid vdev_id %d",
|
||||
__func__, vdev_id);
|
||||
return;
|
||||
}
|
||||
|
||||
vdev->pool = NULL;
|
||||
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
|
||||
* @pdev: Handle to struct dp_pdev
|
||||
* @flow_id: flow id
|
||||
* @flow_type: flow type
|
||||
* @flow_pool_id: pool id
|
||||
* @flow_pool_size: pool size
|
||||
*
|
||||
* Process below target to host message
|
||||
* HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
|
||||
uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size)
|
||||
{
|
||||
struct dp_soc *soc = pdev->soc;
|
||||
struct dp_tx_desc_pool_s *pool;
|
||||
enum htt_flow_type type = flow_type;
|
||||
|
||||
|
||||
dp_info("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
|
||||
flow_id, flow_type, flow_pool_id, flow_pool_size);
|
||||
|
||||
if (qdf_unlikely(!soc)) {
|
||||
dp_err("soc is NULL");
|
||||
return QDF_STATUS_E_FAULT;
|
||||
}
|
||||
soc->pool_stats.pool_map_count++;
|
||||
|
||||
pool = dp_tx_create_flow_pool(soc, flow_pool_id,
|
||||
flow_pool_size);
|
||||
if (!pool) {
|
||||
dp_err("creation of flow_pool %d size %d failed",
|
||||
flow_pool_id, flow_pool_size);
|
||||
return QDF_STATUS_E_RESOURCES;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
|
||||
case FLOW_TYPE_VDEV:
|
||||
dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
|
||||
break;
|
||||
default:
|
||||
dp_err("flow type %d not supported", type);
|
||||
break;
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
|
||||
* @pdev: Handle to struct dp_pdev
|
||||
* @flow_id: flow id
|
||||
* @flow_type: flow type
|
||||
* @flow_pool_id: pool id
|
||||
*
|
||||
* Process below target to host message
|
||||
* HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
|
||||
uint8_t flow_type, uint8_t flow_pool_id)
|
||||
{
|
||||
struct dp_soc *soc = pdev->soc;
|
||||
struct dp_tx_desc_pool_s *pool;
|
||||
enum htt_flow_type type = flow_type;
|
||||
|
||||
dp_info("flow_id %d flow_type %d flow_pool_id %d", flow_id, flow_type,
|
||||
flow_pool_id);
|
||||
|
||||
if (qdf_unlikely(!pdev)) {
|
||||
dp_err("pdev is NULL");
|
||||
return;
|
||||
}
|
||||
soc->pool_stats.pool_unmap_count++;
|
||||
|
||||
pool = &soc->tx_desc[flow_pool_id];
|
||||
dp_info("pool status: %d", pool->status);
|
||||
|
||||
if (pool->status == FLOW_POOL_INACTIVE) {
|
||||
dp_err("flow pool id: %d is inactive, ignore unmap",
|
||||
flow_pool_id);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
|
||||
case FLOW_TYPE_VDEV:
|
||||
dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
|
||||
break;
|
||||
default:
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: flow type %d not supported !!!",
|
||||
__func__, type);
|
||||
return;
|
||||
}
|
||||
|
||||
/* only delete if all descriptors are available */
|
||||
dp_tx_delete_flow_pool(soc, pool, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_flow_control_init() - Initialize tx flow control
|
||||
* @soc: Handle to struct dp_soc
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_tx_flow_control_init(struct dp_soc *soc)
|
||||
{
|
||||
qdf_spinlock_create(&soc->flow_pool_array_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_desc_pool_dealloc() - De-allocate tx desc pool
|
||||
* @soc: Handle to struct dp_soc
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc)
|
||||
{
|
||||
struct dp_tx_desc_pool_s *tx_desc_pool;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_TXDESC_POOLS; i++) {
|
||||
tx_desc_pool = &((soc)->tx_desc[i]);
|
||||
if (!tx_desc_pool->desc_pages.num_pages)
|
||||
continue;
|
||||
|
||||
dp_tx_desc_pool_deinit(soc, i, false);
|
||||
dp_tx_desc_pool_free(soc, i, false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_flow_control_deinit() - Deregister fw based tx flow control
|
||||
* @soc: Handle to struct dp_soc
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_tx_flow_control_deinit(struct dp_soc *soc)
|
||||
{
|
||||
dp_tx_desc_pool_dealloc(soc);
|
||||
|
||||
qdf_spinlock_destroy(&soc->flow_pool_array_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_txrx_register_pause_cb() - Register pause callback
|
||||
* @handle: Handle to struct dp_soc
|
||||
* @pause_cb: Tx pause_cb
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
|
||||
tx_pause_callback pause_cb)
|
||||
{
|
||||
struct dp_soc *soc = (struct dp_soc *)handle;
|
||||
|
||||
if (!soc || !pause_cb) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("soc or pause_cb is NULL"));
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
soc->pause_cb = pause_cb;
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, uint8_t pdev_id,
|
||||
uint8_t vdev_id)
|
||||
{
|
||||
struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
|
||||
struct dp_pdev *pdev =
|
||||
dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
|
||||
int tx_ring_size = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
|
||||
|
||||
if (!pdev) {
|
||||
dp_err("pdev is NULL");
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
return dp_tx_flow_pool_map_handler(pdev, vdev_id, FLOW_TYPE_VDEV,
|
||||
vdev_id, tx_ring_size);
|
||||
}
|
||||
|
||||
void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
|
||||
uint8_t vdev_id)
|
||||
{
|
||||
struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
|
||||
struct dp_pdev *pdev =
|
||||
dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
|
||||
|
||||
if (!pdev) {
|
||||
dp_err("pdev is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
return dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
|
||||
FLOW_TYPE_VDEV, vdev_id);
|
||||
}
|
925
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_txrx_wds.c
Normal file
925
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_txrx_wds.c
Normal file
@ -0,0 +1,925 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
#include "htt.h"
|
||||
#include "dp_peer.h"
|
||||
#include "hal_rx.h"
|
||||
#include "hal_api.h"
|
||||
#include "qdf_nbuf.h"
|
||||
#include "dp_types.h"
|
||||
#include "dp_internal.h"
|
||||
#include "dp_tx.h"
|
||||
#include "enet.h"
|
||||
#ifdef WIFI_MONITOR_SUPPORT
|
||||
#include "dp_mon.h"
|
||||
#endif
|
||||
#include "dp_txrx_wds.h"
|
||||
|
||||
/* Generic AST entry aging timer value */
|
||||
#define DP_AST_AGING_TIMER_DEFAULT_MS 5000
|
||||
#define DP_INVALID_AST_IDX 0xffff
|
||||
#define DP_INVALID_FLOW_PRIORITY 0xff
|
||||
#define DP_PEER_AST0_FLOW_MASK 0x4
|
||||
#define DP_PEER_AST1_FLOW_MASK 0x8
|
||||
#define DP_PEER_AST2_FLOW_MASK 0x1
|
||||
#define DP_PEER_AST3_FLOW_MASK 0x2
|
||||
#define DP_MAX_AST_INDEX_PER_PEER 4
|
||||
|
||||
#ifdef WLAN_FEATURE_MULTI_AST_DEL
|
||||
|
||||
void dp_peer_free_peer_ase_list(struct dp_soc *soc,
|
||||
struct peer_del_multi_wds_entries *wds_list)
|
||||
{
|
||||
struct peer_wds_entry_list *wds_entry, *tmp_entry;
|
||||
|
||||
TAILQ_FOREACH_SAFE(wds_entry, &wds_list->ase_list,
|
||||
ase_list_elem, tmp_entry) {
|
||||
dp_peer_debug("type: %d mac_addr: " QDF_MAC_ADDR_FMT,
|
||||
wds_entry->type,
|
||||
QDF_MAC_ADDR_REF(wds_entry->dest_addr));
|
||||
TAILQ_REMOVE(&wds_list->ase_list, wds_entry, ase_list_elem);
|
||||
wds_list->num_entries--;
|
||||
qdf_mem_free(wds_entry);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
dp_pdev_build_peer_ase_list(struct dp_soc *soc, struct dp_peer *peer,
|
||||
void *arg)
|
||||
{
|
||||
struct dp_ast_entry *ase, *temp_ase;
|
||||
struct peer_del_multi_wds_entries *list = arg;
|
||||
struct peer_wds_entry_list *wds_entry;
|
||||
|
||||
if (!soc || !peer || !arg) {
|
||||
dp_peer_err("Invalid input");
|
||||
return;
|
||||
}
|
||||
|
||||
list->vdev_id = peer->vdev->vdev_id;
|
||||
DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
|
||||
if (ase->type != CDP_TXRX_AST_TYPE_WDS &&
|
||||
ase->type != CDP_TXRX_AST_TYPE_DA)
|
||||
continue;
|
||||
|
||||
if (ase->is_active) {
|
||||
ase->is_active = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ase->delete_in_progress) {
|
||||
dp_info_rl("Del set addr:" QDF_MAC_ADDR_FMT " type:%d",
|
||||
QDF_MAC_ADDR_REF(ase->mac_addr.raw),
|
||||
ase->type);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ase->is_mapped)
|
||||
soc->ast_table[ase->ast_idx] = NULL;
|
||||
|
||||
if (!ase->next_hop) {
|
||||
dp_peer_unlink_ast_entry(soc, ase, peer);
|
||||
continue;
|
||||
}
|
||||
|
||||
wds_entry = (struct peer_wds_entry_list *)
|
||||
qdf_mem_malloc(sizeof(*wds_entry));
|
||||
if (!wds_entry) {
|
||||
dp_peer_err("%pK: fail to allocate wds_entry", soc);
|
||||
dp_peer_free_peer_ase_list(soc, list);
|
||||
return;
|
||||
}
|
||||
|
||||
DP_STATS_INC(soc, ast.aged_out, 1);
|
||||
ase->delete_in_progress = true;
|
||||
wds_entry->dest_addr = ase->mac_addr.raw;
|
||||
wds_entry->type = ase->type;
|
||||
|
||||
if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE))
|
||||
wds_entry->delete_in_fw = false;
|
||||
else
|
||||
wds_entry->delete_in_fw = true;
|
||||
|
||||
dp_peer_debug("ase->type: %d pdev: %u vdev: %u mac_addr: " QDF_MAC_ADDR_FMT " next_hop: %u peer: %u",
|
||||
ase->type, ase->pdev_id, ase->vdev_id,
|
||||
QDF_MAC_ADDR_REF(ase->mac_addr.raw),
|
||||
ase->next_hop, ase->peer_id);
|
||||
TAILQ_INSERT_TAIL(&list->ase_list, wds_entry, ase_list_elem);
|
||||
list->num_entries++;
|
||||
}
|
||||
dp_peer_info("Total num of entries :%d", list->num_entries);
|
||||
}
|
||||
|
||||
static void
|
||||
dp_peer_age_multi_ast_entries(struct dp_soc *soc, void *arg,
|
||||
enum dp_mod_id mod_id)
|
||||
{
|
||||
uint8_t i;
|
||||
struct dp_pdev *pdev = NULL;
|
||||
struct peer_del_multi_wds_entries wds_list = {0};
|
||||
|
||||
TAILQ_INIT(&wds_list.ase_list);
|
||||
for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
|
||||
pdev = soc->pdev_list[i];
|
||||
dp_pdev_iterate_peer(pdev, dp_pdev_build_peer_ase_list,
|
||||
&wds_list, mod_id);
|
||||
if (wds_list.num_entries > 0) {
|
||||
dp_peer_ast_send_multi_wds_del(soc, wds_list.vdev_id,
|
||||
&wds_list);
|
||||
dp_peer_free_peer_ase_list(soc, &wds_list);
|
||||
} else {
|
||||
dp_peer_debug("No AST entries for pdev:%u",
|
||||
pdev->pdev_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* WLAN_FEATURE_MULTI_AST_DEL */
|
||||
|
||||
static void
|
||||
dp_peer_age_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
|
||||
{
|
||||
struct dp_ast_entry *ase, *temp_ase;
|
||||
struct ast_del_ctxt *del_ctxt = (struct ast_del_ctxt *)arg;
|
||||
|
||||
if ((del_ctxt->del_count >= soc->max_ast_ageout_count) &&
|
||||
!del_ctxt->age) {
|
||||
return;
|
||||
}
|
||||
|
||||
DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
|
||||
/*
|
||||
* Do not expire static ast entries and HM WDS entries
|
||||
*/
|
||||
if (ase->type != CDP_TXRX_AST_TYPE_WDS &&
|
||||
ase->type != CDP_TXRX_AST_TYPE_DA)
|
||||
continue;
|
||||
|
||||
if (ase->is_active) {
|
||||
if (del_ctxt->age)
|
||||
ase->is_active = FALSE;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (del_ctxt->del_count < soc->max_ast_ageout_count) {
|
||||
DP_STATS_INC(soc, ast.aged_out, 1);
|
||||
dp_peer_del_ast(soc, ase);
|
||||
del_ctxt->del_count++;
|
||||
} else {
|
||||
soc->pending_ageout = true;
|
||||
if (!del_ctxt->age)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
dp_peer_age_mec_entries(struct dp_soc *soc)
|
||||
{
|
||||
uint32_t index;
|
||||
struct dp_mec_entry *mecentry, *mecentry_next;
|
||||
|
||||
TAILQ_HEAD(, dp_mec_entry) free_list;
|
||||
TAILQ_INIT(&free_list);
|
||||
|
||||
for (index = 0; index <= soc->mec_hash.mask; index++) {
|
||||
qdf_spin_lock_bh(&soc->mec_lock);
|
||||
/*
|
||||
* Expire MEC entry every n sec.
|
||||
*/
|
||||
if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
|
||||
TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
|
||||
hash_list_elem, mecentry_next) {
|
||||
if (mecentry->is_active) {
|
||||
mecentry->is_active = FALSE;
|
||||
continue;
|
||||
}
|
||||
dp_peer_mec_detach_entry(soc, mecentry,
|
||||
&free_list);
|
||||
}
|
||||
}
|
||||
qdf_spin_unlock_bh(&soc->mec_lock);
|
||||
}
|
||||
|
||||
dp_peer_mec_free_list(soc, &free_list);
|
||||
}
|
||||
|
||||
#ifdef WLAN_FEATURE_MULTI_AST_DEL
|
||||
static void dp_ast_aging_timer_fn(void *soc_hdl)
|
||||
{
|
||||
struct dp_soc *soc = (struct dp_soc *)soc_hdl;
|
||||
struct ast_del_ctxt del_ctxt = {0};
|
||||
|
||||
if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
|
||||
del_ctxt.age = true;
|
||||
soc->wds_ast_aging_timer_cnt = 0;
|
||||
}
|
||||
|
||||
if (soc->pending_ageout || del_ctxt.age) {
|
||||
soc->pending_ageout = false;
|
||||
|
||||
/* AST list access lock */
|
||||
qdf_spin_lock_bh(&soc->ast_lock);
|
||||
|
||||
if (soc->multi_peer_grp_cmd_supported)
|
||||
dp_peer_age_multi_ast_entries(soc, NULL, DP_MOD_ID_AST);
|
||||
else
|
||||
dp_soc_iterate_peer(soc, dp_peer_age_ast_entries,
|
||||
&del_ctxt, DP_MOD_ID_AST);
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* If NSS offload is enabled, the MEC timeout
|
||||
* will be managed by NSS.
|
||||
*/
|
||||
if (qdf_atomic_read(&soc->mec_cnt) &&
|
||||
!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
|
||||
dp_peer_age_mec_entries(soc);
|
||||
|
||||
if (qdf_atomic_read(&soc->cmn_init_done))
|
||||
qdf_timer_mod(&soc->ast_aging_timer,
|
||||
DP_AST_AGING_TIMER_DEFAULT_MS);
|
||||
}
|
||||
#else
|
||||
static void dp_ast_aging_timer_fn(void *soc_hdl)
|
||||
{
|
||||
struct dp_soc *soc = (struct dp_soc *)soc_hdl;
|
||||
struct ast_del_ctxt del_ctxt = {0};
|
||||
|
||||
if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
|
||||
del_ctxt.age = true;
|
||||
soc->wds_ast_aging_timer_cnt = 0;
|
||||
}
|
||||
|
||||
if (soc->pending_ageout || del_ctxt.age) {
|
||||
soc->pending_ageout = false;
|
||||
|
||||
/* AST list access lock */
|
||||
qdf_spin_lock_bh(&soc->ast_lock);
|
||||
dp_soc_iterate_peer(soc, dp_peer_age_ast_entries,
|
||||
&del_ctxt, DP_MOD_ID_AST);
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* If NSS offload is enabled, the MEC timeout
|
||||
* will be managed by NSS.
|
||||
*/
|
||||
if (qdf_atomic_read(&soc->mec_cnt) &&
|
||||
!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
|
||||
dp_peer_age_mec_entries(soc);
|
||||
|
||||
if (qdf_atomic_read(&soc->cmn_init_done))
|
||||
qdf_timer_mod(&soc->ast_aging_timer,
|
||||
DP_AST_AGING_TIMER_DEFAULT_MS);
|
||||
}
|
||||
#endif /* WLAN_FEATURE_MULTI_AST_DEL */
|
||||
|
||||
#ifndef IPA_WDS_EASYMESH_FEATURE
|
||||
void dp_soc_wds_attach(struct dp_soc *soc)
|
||||
{
|
||||
if (soc->ast_offload_support)
|
||||
return;
|
||||
|
||||
soc->wds_ast_aging_timer_cnt = 0;
|
||||
soc->pending_ageout = false;
|
||||
qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
|
||||
dp_ast_aging_timer_fn, (void *)soc,
|
||||
QDF_TIMER_TYPE_WAKE_APPS);
|
||||
|
||||
qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
|
||||
}
|
||||
|
||||
void dp_soc_wds_detach(struct dp_soc *soc)
|
||||
{
|
||||
qdf_timer_stop(&soc->ast_aging_timer);
|
||||
qdf_timer_free(&soc->ast_aging_timer);
|
||||
}
|
||||
#else
|
||||
void dp_soc_wds_attach(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
void dp_soc_wds_detach(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
|
||||
{
|
||||
struct dp_soc *soc;
|
||||
QDF_STATUS add_mec_status;
|
||||
uint8_t mac_addr[QDF_MAC_ADDR_SIZE], i;
|
||||
|
||||
if (!vdev->mec_enabled)
|
||||
return;
|
||||
|
||||
/* MEC required only in STA mode */
|
||||
if (vdev->opmode != wlan_op_mode_sta)
|
||||
return;
|
||||
|
||||
soc = vdev->pdev->soc;
|
||||
|
||||
for (i = 0; i < QDF_MAC_ADDR_SIZE; i++)
|
||||
mac_addr[(QDF_MAC_ADDR_SIZE - 1) - i] =
|
||||
status[(QDF_MAC_ADDR_SIZE - 2) + i];
|
||||
|
||||
dp_peer_debug("%pK: MEC add for mac_addr "QDF_MAC_ADDR_FMT,
|
||||
soc, QDF_MAC_ADDR_REF(mac_addr));
|
||||
|
||||
if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE)) {
|
||||
add_mec_status = dp_peer_mec_add_entry(soc, vdev, mac_addr);
|
||||
dp_peer_debug("%pK: MEC add status %d", vdev, add_mec_status);
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef QCA_HOST_MODE_WIFI_DISABLED
|
||||
|
||||
void
|
||||
dp_rx_da_learn(struct dp_soc *soc,
|
||||
uint8_t *rx_tlv_hdr,
|
||||
struct dp_txrx_peer *ta_txrx_peer,
|
||||
qdf_nbuf_t nbuf)
|
||||
{
|
||||
struct dp_peer *base_peer;
|
||||
/* For HKv2 DA port learing is not needed */
|
||||
if (qdf_likely(soc->ast_override_support))
|
||||
return;
|
||||
|
||||
if (qdf_unlikely(!ta_txrx_peer))
|
||||
return;
|
||||
|
||||
if (qdf_unlikely(ta_txrx_peer->vdev->opmode != wlan_op_mode_ap))
|
||||
return;
|
||||
|
||||
if (!soc->da_war_enabled)
|
||||
return;
|
||||
|
||||
if (qdf_unlikely(!qdf_nbuf_is_da_valid(nbuf) &&
|
||||
!qdf_nbuf_is_da_mcbc(nbuf))) {
|
||||
base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id,
|
||||
DP_MOD_ID_AST);
|
||||
|
||||
if (base_peer) {
|
||||
dp_peer_add_ast(soc,
|
||||
base_peer,
|
||||
qdf_nbuf_data(nbuf),
|
||||
CDP_TXRX_AST_TYPE_DA,
|
||||
DP_AST_FLAGS_HM);
|
||||
|
||||
dp_peer_unref_delete(base_peer, DP_MOD_ID_AST);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef WDS_VENDOR_EXTENSION
|
||||
QDF_STATUS
|
||||
dp_txrx_set_wds_rx_policy(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
u_int32_t val)
|
||||
{
|
||||
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
|
||||
struct dp_peer *peer;
|
||||
struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
|
||||
DP_MOD_ID_MISC);
|
||||
if (!vdev) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("vdev is NULL for vdev_id %d"), vdev_id);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST);
|
||||
|
||||
if (peer) {
|
||||
peer->txrx_peer->wds_ecm.wds_rx_filter = 1;
|
||||
peer->txrx_peer->wds_ecm.wds_rx_ucast_4addr =
|
||||
(val & WDS_POLICY_RX_UCAST_4ADDR) ? 1 : 0;
|
||||
peer->txrx_peer->wds_ecm.wds_rx_mcast_4addr =
|
||||
(val & WDS_POLICY_RX_MCAST_4ADDR) ? 1 : 0;
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
}
|
||||
|
||||
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MISC);
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS
|
||||
dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t *soc, uint8_t vdev_id,
|
||||
uint8_t *peer_mac, int wds_tx_ucast,
|
||||
int wds_tx_mcast)
|
||||
{
|
||||
struct dp_peer *peer =
|
||||
dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
|
||||
peer_mac, 0,
|
||||
vdev_id,
|
||||
DP_MOD_ID_AST);
|
||||
if (!peer) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("peer is NULL for mac" QDF_MAC_ADDR_FMT
|
||||
" vdev_id %d"), QDF_MAC_ADDR_REF(peer_mac),
|
||||
vdev_id);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (!peer->txrx_peer) {
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (wds_tx_ucast || wds_tx_mcast) {
|
||||
peer->txrx_peer->wds_enabled = 1;
|
||||
peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
|
||||
peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
|
||||
} else {
|
||||
peer->txrx_peer->wds_enabled = 0;
|
||||
peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr = 0;
|
||||
peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr = 0;
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"Policy Update set to :\n");
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"peer->wds_enabled %d\n", peer->wds_enabled);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"peer->wds_ecm.wds_tx_ucast_4addr %d\n",
|
||||
peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"peer->wds_ecm.wds_tx_mcast_4addr %d\n",
|
||||
peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr);
|
||||
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
|
||||
struct dp_vdev *vdev,
|
||||
struct dp_txrx_peer *txrx_peer)
|
||||
{
|
||||
struct dp_peer *bss_peer;
|
||||
int fr_ds, to_ds, rx_3addr, rx_4addr;
|
||||
int rx_policy_ucast, rx_policy_mcast;
|
||||
hal_soc_handle_t hal_soc = vdev->pdev->soc->hal_soc;
|
||||
int rx_mcast = hal_rx_msdu_end_da_is_mcbc_get(hal_soc, rx_tlv_hdr);
|
||||
|
||||
if (vdev->opmode == wlan_op_mode_ap) {
|
||||
bss_peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST);
|
||||
/* if wds policy check is not enabled on this vdev, accept all frames */
|
||||
if (bss_peer && !bss_peer->txrx_peer->wds_ecm.wds_rx_filter) {
|
||||
dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST);
|
||||
return 1;
|
||||
}
|
||||
rx_policy_ucast = bss_peer->txrx_peerwds_ecm.wds_rx_ucast_4addr;
|
||||
rx_policy_mcast = bss_peer->txrx_peerwds_ecm.wds_rx_mcast_4addr;
|
||||
dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST);
|
||||
} else { /* sta mode */
|
||||
if (!txrx_peer->wds_ecm.wds_rx_filter)
|
||||
return 1;
|
||||
|
||||
rx_policy_ucast = txrx_peer->wds_ecm.wds_rx_ucast_4addr;
|
||||
rx_policy_mcast = txrx_peer->wds_ecm.wds_rx_mcast_4addr;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------
|
||||
* self
|
||||
* peer- rx rx-
|
||||
* wds ucast mcast dir policy accept note
|
||||
* ------------------------------------------------
|
||||
* 1 1 0 11 x1 1 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept
|
||||
* 1 1 0 01 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
|
||||
* 1 1 0 10 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
|
||||
* 1 1 0 00 x1 0 bad frame, won't see it
|
||||
* 1 0 1 11 1x 1 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept
|
||||
* 1 0 1 01 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
|
||||
* 1 0 1 10 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
|
||||
* 1 0 1 00 1x 0 bad frame, won't see it
|
||||
* 1 1 0 11 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
|
||||
* 1 1 0 01 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
|
||||
* 1 1 0 10 x0 1 AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept
|
||||
* 1 1 0 00 x0 0 bad frame, won't see it
|
||||
* 1 0 1 11 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
|
||||
* 1 0 1 01 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
|
||||
* 1 0 1 10 0x 1 AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept
|
||||
* 1 0 1 00 0x 0 bad frame, won't see it
|
||||
*
|
||||
* 0 x x 11 xx 0 we only accept td-ds Rx frames from non-wds peers in mode.
|
||||
* 0 x x 01 xx 1
|
||||
* 0 x x 10 xx 0
|
||||
* 0 x x 00 xx 0 bad frame, won't see it
|
||||
* ------------------------------------------------
|
||||
*/
|
||||
|
||||
fr_ds = hal_rx_mpdu_get_fr_ds(hal_soc, rx_tlv_hdr);
|
||||
to_ds = hal_rx_mpdu_get_to_ds(hal_soc, rx_tlv_hdr);
|
||||
rx_3addr = fr_ds ^ to_ds;
|
||||
rx_4addr = fr_ds & to_ds;
|
||||
|
||||
if (vdev->opmode == wlan_op_mode_ap) {
|
||||
if ((!txrx_peer->wds_enabled && rx_3addr && to_ds) ||
|
||||
(txrx_peer->wds_enabled && !rx_mcast &&
|
||||
(rx_4addr == rx_policy_ucast)) ||
|
||||
(txrx_peer->wds_enabled && rx_mcast &&
|
||||
(rx_4addr == rx_policy_mcast))) {
|
||||
return 1;
|
||||
}
|
||||
} else { /* sta mode */
|
||||
if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) ||
|
||||
(rx_mcast && (rx_4addr == rx_policy_mcast))) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* QCA_HOST_MODE_WIFI_DISABLED */
|
||||
|
||||
#ifdef QCA_PEER_MULTIQ_SUPPORT
|
||||
|
||||
void dp_peer_reset_flowq_map(struct dp_peer *peer)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
if (!peer)
|
||||
return;
|
||||
|
||||
for (i = 0; i < DP_PEER_AST_FLOWQ_MAX; i++) {
|
||||
peer->peer_ast_flowq_idx[i].is_valid = false;
|
||||
peer->peer_ast_flowq_idx[i].valid_tid_mask = false;
|
||||
peer->peer_ast_flowq_idx[i].ast_idx = DP_INVALID_AST_IDX;
|
||||
peer->peer_ast_flowq_idx[i].flowQ = DP_INVALID_FLOW_PRIORITY;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_peer_get_flowid_from_flowmask() - get flow id from flow mask
|
||||
* @peer: dp peer handle
|
||||
* @mask: flow mask
|
||||
*
|
||||
* Return: flow id
|
||||
*/
|
||||
static int dp_peer_get_flowid_from_flowmask(struct dp_peer *peer,
|
||||
uint8_t mask)
|
||||
{
|
||||
if (!peer) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Invalid peer\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (mask & DP_PEER_AST0_FLOW_MASK)
|
||||
return DP_PEER_AST_FLOWQ_UDP;
|
||||
else if (mask & DP_PEER_AST1_FLOW_MASK)
|
||||
return DP_PEER_AST_FLOWQ_NON_UDP;
|
||||
else if (mask & DP_PEER_AST2_FLOW_MASK)
|
||||
return DP_PEER_AST_FLOWQ_HI_PRIO;
|
||||
else if (mask & DP_PEER_AST3_FLOW_MASK)
|
||||
return DP_PEER_AST_FLOWQ_LOW_PRIO;
|
||||
|
||||
return DP_PEER_AST_FLOWQ_MAX;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_peer_get_ast_valid() - get ast index valid from mask
|
||||
* @mask: mask for ast valid bits
|
||||
* @index: index for an ast
|
||||
*
|
||||
* Return: 1 if ast index is valid from mask else 0
|
||||
*/
|
||||
static inline bool dp_peer_get_ast_valid(uint8_t mask, uint16_t index)
|
||||
{
|
||||
if (index == 0)
|
||||
return 1;
|
||||
return ((mask) & (1 << ((index) - 1)));
|
||||
}
|
||||
|
||||
void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
|
||||
bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
|
||||
struct dp_ast_flow_override_info *ast_info)
|
||||
{
|
||||
struct dp_soc *soc = (struct dp_soc *)soc_hdl;
|
||||
struct dp_peer *peer = NULL;
|
||||
uint8_t i;
|
||||
|
||||
/*
|
||||
* Ast flow override feature is supported
|
||||
* only for connected client
|
||||
*/
|
||||
if (is_wds)
|
||||
return;
|
||||
|
||||
peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_AST);
|
||||
if (!peer) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Invalid peer\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Valid only in AP mode */
|
||||
if (peer->vdev->opmode != wlan_op_mode_ap) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Peer ast flow map not in STA mode\n", __func__);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Making sure the peer is for this mac address */
|
||||
if (!qdf_is_macaddr_equal((struct qdf_mac_addr *)peer_mac_addr,
|
||||
(struct qdf_mac_addr *)peer->mac_addr.raw)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Peer mac address mismatch\n", __func__);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Ast entry flow mapping not valid for self peer map */
|
||||
if (qdf_is_macaddr_equal((struct qdf_mac_addr *)peer_mac_addr,
|
||||
(struct qdf_mac_addr *)peer->vdev->mac_addr.raw)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Ast flow mapping not valid for self peer \n", __func__);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Fill up ast index <---> flow id mapping table for this peer */
|
||||
for (i = 0; i < DP_MAX_AST_INDEX_PER_PEER; i++) {
|
||||
|
||||
/* Check if this ast index is valid */
|
||||
peer->peer_ast_flowq_idx[i].is_valid =
|
||||
dp_peer_get_ast_valid(ast_info->ast_valid_mask, i);
|
||||
if (!peer->peer_ast_flowq_idx[i].is_valid)
|
||||
continue;
|
||||
|
||||
/* Get the flow queue id which is mapped to this ast index */
|
||||
peer->peer_ast_flowq_idx[i].flowQ =
|
||||
dp_peer_get_flowid_from_flowmask(peer,
|
||||
ast_info->ast_flow_mask[i]);
|
||||
/*
|
||||
* Update tid valid mask only if flow id HIGH or
|
||||
* Low priority
|
||||
*/
|
||||
if (peer->peer_ast_flowq_idx[i].flowQ ==
|
||||
DP_PEER_AST_FLOWQ_HI_PRIO) {
|
||||
peer->peer_ast_flowq_idx[i].valid_tid_mask =
|
||||
ast_info->tid_valid_hi_pri_mask;
|
||||
} else if (peer->peer_ast_flowq_idx[i].flowQ ==
|
||||
DP_PEER_AST_FLOWQ_LOW_PRIO) {
|
||||
peer->peer_ast_flowq_idx[i].valid_tid_mask =
|
||||
ast_info->tid_valid_low_pri_mask;
|
||||
}
|
||||
|
||||
/* Save the ast index for this entry */
|
||||
peer->peer_ast_flowq_idx[i].ast_idx = ast_info->ast_idx[i];
|
||||
}
|
||||
|
||||
if (soc->cdp_soc.ol_ops->peer_ast_flowid_map) {
|
||||
soc->cdp_soc.ol_ops->peer_ast_flowid_map(
|
||||
soc->ctrl_psoc, peer->peer_id,
|
||||
peer->vdev->vdev_id, peer_mac_addr);
|
||||
}
|
||||
|
||||
end:
|
||||
/* Release peer reference */
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
}
|
||||
|
||||
int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
|
||||
uint16_t vdev_id, uint8_t *peer_mac_addr,
|
||||
uint8_t flow_id, uint8_t tid)
|
||||
{
|
||||
struct dp_peer *peer = NULL;
|
||||
uint8_t i;
|
||||
uint16_t ast_index;
|
||||
|
||||
if (flow_id >= DP_PEER_AST_FLOWQ_MAX) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid Flow ID %d\n", flow_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
peer = dp_peer_find_hash_find((struct dp_soc *)soc,
|
||||
peer_mac_addr, 0, vdev_id,
|
||||
DP_MOD_ID_AST);
|
||||
if (!peer) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Invalid peer\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Loop over the ast entry <----> flow-id mapping to find
|
||||
* which ast index entry has this flow queue id enabled.
|
||||
*/
|
||||
for (i = 0; i < DP_PEER_AST_FLOWQ_MAX; i++) {
|
||||
if (peer->peer_ast_flowq_idx[i].flowQ == flow_id)
|
||||
/*
|
||||
* Found the matching index for this flow id
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* No match found for this flow id
|
||||
*/
|
||||
if (i == DP_PEER_AST_FLOWQ_MAX) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: ast index not found for flow %d\n", __func__, flow_id);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Check whether this ast entry is valid */
|
||||
if (!peer->peer_ast_flowq_idx[i].is_valid) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: ast index is invalid for flow %d\n", __func__, flow_id);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (flow_id == DP_PEER_AST_FLOWQ_HI_PRIO ||
|
||||
flow_id == DP_PEER_AST_FLOWQ_LOW_PRIO) {
|
||||
/*
|
||||
* check if this tid is valid for Hi
|
||||
* and Low priority flow id
|
||||
*/
|
||||
if ((peer->peer_ast_flowq_idx[i].valid_tid_mask
|
||||
& (1 << tid))) {
|
||||
/* Release peer reference */
|
||||
ast_index = peer->peer_ast_flowq_idx[i].ast_idx;
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return ast_index;
|
||||
} else {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: TID %d is not valid for flow %d\n",
|
||||
__func__, tid, flow_id);
|
||||
/*
|
||||
* TID is not valid for this flow
|
||||
* Return -1
|
||||
*/
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* TID valid check not required for
|
||||
* UDP/NON UDP flow id
|
||||
*/
|
||||
ast_index = peer->peer_ast_flowq_idx[i].ast_idx;
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return ast_index;
|
||||
}
|
||||
#endif
|
||||
|
||||
void dp_hmwds_ast_add_notify(struct dp_peer *peer,
|
||||
uint8_t *mac_addr,
|
||||
enum cdp_txrx_ast_entry_type type,
|
||||
QDF_STATUS err,
|
||||
bool is_peer_map)
|
||||
{
|
||||
struct dp_vdev *dp_vdev = peer->vdev;
|
||||
struct dp_pdev *dp_pdev = dp_vdev->pdev;
|
||||
struct cdp_peer_hmwds_ast_add_status add_status;
|
||||
|
||||
/* Ignore ast types other than HM */
|
||||
if ((type != CDP_TXRX_AST_TYPE_WDS_HM) &&
|
||||
(type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
|
||||
return;
|
||||
|
||||
/* existing ast delete in progress, will be attempted
|
||||
* to add again after delete is complete. Send status then.
|
||||
*/
|
||||
if (err == QDF_STATUS_E_AGAIN)
|
||||
return;
|
||||
|
||||
/* peer map pending, notify actual status
|
||||
* when peer map is received.
|
||||
*/
|
||||
if (!is_peer_map && (err == QDF_STATUS_SUCCESS))
|
||||
return;
|
||||
|
||||
qdf_mem_zero(&add_status, sizeof(add_status));
|
||||
add_status.vdev_id = dp_vdev->vdev_id;
|
||||
/* For type CDP_TXRX_AST_TYPE_WDS_HM_SEC dp_peer_add_ast()
|
||||
* returns QDF_STATUS_E_FAILURE as it is host only entry.
|
||||
* In such cases set err as success. Also err code set to
|
||||
* QDF_STATUS_E_ALREADY indicates entry already exist in
|
||||
* such cases set err as success too. Any other error code
|
||||
* is actual error.
|
||||
*/
|
||||
if (((type == CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
|
||||
(err == QDF_STATUS_E_FAILURE)) ||
|
||||
(err == QDF_STATUS_E_ALREADY)) {
|
||||
err = QDF_STATUS_SUCCESS;
|
||||
}
|
||||
add_status.status = err;
|
||||
qdf_mem_copy(add_status.peer_mac, peer->mac_addr.raw,
|
||||
QDF_MAC_ADDR_SIZE);
|
||||
qdf_mem_copy(add_status.ast_mac, mac_addr,
|
||||
QDF_MAC_ADDR_SIZE);
|
||||
#ifdef WDI_EVENT_ENABLE
|
||||
dp_wdi_event_handler(WDI_EVENT_HMWDS_AST_ADD_STATUS, dp_pdev->soc,
|
||||
(void *)&add_status, 0,
|
||||
WDI_NO_VAL, dp_pdev->pdev_id);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
|
||||
defined(QCA_TX_CAPTURE_SUPPORT) || \
|
||||
defined(QCA_MCOPY_SUPPORT)
|
||||
#ifdef FEATURE_PERPKT_INFO
|
||||
QDF_STATUS
|
||||
dp_get_completion_indication_for_stack(struct dp_soc *soc,
|
||||
struct dp_pdev *pdev,
|
||||
struct dp_txrx_peer *txrx_peer,
|
||||
struct hal_tx_completion_status *ts,
|
||||
qdf_nbuf_t netbuf,
|
||||
uint64_t time_latency)
|
||||
{
|
||||
struct tx_capture_hdr *ppdu_hdr;
|
||||
uint16_t peer_id = ts->peer_id;
|
||||
uint32_t ppdu_id = ts->ppdu_id;
|
||||
uint8_t first_msdu = ts->first_msdu;
|
||||
uint8_t last_msdu = ts->last_msdu;
|
||||
uint32_t txcap_hdr_size = sizeof(struct tx_capture_hdr);
|
||||
struct dp_peer *peer;
|
||||
|
||||
if (qdf_unlikely(!dp_monitor_is_enable_tx_sniffer(pdev) &&
|
||||
!dp_monitor_is_enable_mcopy_mode(pdev) &&
|
||||
!pdev->latency_capture_enable))
|
||||
return QDF_STATUS_E_NOSUPPORT;
|
||||
|
||||
if (!txrx_peer) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("txrx_peer is NULL"));
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
/* If mcopy is enabled and mcopy_mode is M_COPY deliver 1st MSDU
|
||||
* per PPDU. If mcopy_mode is M_COPY_EXTENDED deliver 1st MSDU
|
||||
* for each MPDU
|
||||
*/
|
||||
if (dp_monitor_mcopy_check_deliver(pdev,
|
||||
peer_id,
|
||||
ppdu_id,
|
||||
first_msdu) != QDF_STATUS_SUCCESS)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
if (qdf_unlikely(qdf_nbuf_headroom(netbuf) < txcap_hdr_size)) {
|
||||
netbuf = qdf_nbuf_realloc_headroom(netbuf, txcap_hdr_size);
|
||||
if (!netbuf) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("No headroom"));
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (!qdf_nbuf_push_head(netbuf, txcap_hdr_size)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("No headroom"));
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
|
||||
qdf_mem_copy(ppdu_hdr->ta, txrx_peer->vdev->mac_addr.raw,
|
||||
QDF_MAC_ADDR_SIZE);
|
||||
|
||||
peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_TX_COMP);
|
||||
if (peer) {
|
||||
qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
|
||||
QDF_MAC_ADDR_SIZE);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
|
||||
}
|
||||
ppdu_hdr->ppdu_id = ppdu_id;
|
||||
ppdu_hdr->peer_id = peer_id;
|
||||
ppdu_hdr->first_msdu = first_msdu;
|
||||
ppdu_hdr->last_msdu = last_msdu;
|
||||
if (qdf_unlikely(pdev->latency_capture_enable)) {
|
||||
ppdu_hdr->tsf = ts->tsf;
|
||||
ppdu_hdr->time_latency = (uint32_t)time_latency;
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
|
||||
uint16_t peer_id, uint32_t ppdu_id,
|
||||
qdf_nbuf_t netbuf)
|
||||
{
|
||||
dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
|
||||
netbuf, peer_id,
|
||||
WDI_NO_VAL, pdev->pdev_id);
|
||||
}
|
||||
#endif
|
||||
#endif
|
538
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_txrx_wds.h
Normal file
538
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_txrx_wds.h
Normal file
@ -0,0 +1,538 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DP_TXRX_WDS_H_
|
||||
#define _DP_TXRX_WDS_H_
|
||||
|
||||
#ifdef WIFI_MONITOR_SUPPORT
|
||||
#include "dp_htt.h"
|
||||
#include "dp_mon.h"
|
||||
#endif
|
||||
|
||||
/* host managed flag */
|
||||
#define DP_AST_FLAGS_HM 0x0020
|
||||
|
||||
/* WDS AST entry aging timer value */
|
||||
#define DP_WDS_AST_AGING_TIMER_DEFAULT_MS 120000
|
||||
#define DP_WDS_AST_AGING_TIMER_CNT \
|
||||
((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
|
||||
|
||||
/**
|
||||
* dp_soc_wds_attach() - Setup WDS timer and AST table
|
||||
* @soc: Datapath SOC handle
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_soc_wds_attach(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_soc_wds_detach() - Detach WDS data structures and timers
|
||||
* @soc: DP SOC handle
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_soc_wds_detach(struct dp_soc *soc);
|
||||
#ifdef QCA_PEER_MULTIQ_SUPPORT
|
||||
/**
|
||||
* dp_peer_find_ast_index_by_flowq_id() - API to get ast idx for a given flowid
|
||||
* @soc: soc handle
|
||||
* @vdev_id: vdev ID
|
||||
* @peer_mac_addr: mac address of the peer
|
||||
* @flow_id: flow id to find ast index
|
||||
* @tid: TID
|
||||
*
|
||||
* Return: ast index for a given flow id, -1 for fail cases
|
||||
*/
|
||||
int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
|
||||
uint16_t vdev_id, uint8_t *peer_mac_addr,
|
||||
uint8_t flow_id, uint8_t tid);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dp_rx_da_learn() - Add AST entry based on DA lookup
|
||||
* This is a WAR for HK 1.0 and will
|
||||
* be removed in HK 2.0
|
||||
*
|
||||
* @soc: core txrx main context
|
||||
* @rx_tlv_hdr: start address of rx tlvs
|
||||
* @ta_peer: Transmitter peer entry
|
||||
* @nbuf: nbuf to retrieve destination mac for which AST will be added
|
||||
*
|
||||
*/
|
||||
void
|
||||
dp_rx_da_learn(struct dp_soc *soc,
|
||||
uint8_t *rx_tlv_hdr,
|
||||
struct dp_txrx_peer *ta_peer,
|
||||
qdf_nbuf_t nbuf);
|
||||
|
||||
/**
|
||||
* dp_tx_mec_handler() - Tx MEC Notify Handler
|
||||
* @vdev: pointer to dp dev handler
|
||||
* @status : Tx completion status from HTT descriptor
|
||||
*
|
||||
* Handles MEC notify event sent from fw to Host
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status);
|
||||
#ifdef FEATURE_WDS
|
||||
#ifdef FEATURE_MCL_REPEATER
|
||||
static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
|
||||
{
|
||||
if (vdev->mec_enabled)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
|
||||
{
|
||||
struct dp_soc *soc = vdev->pdev->soc;
|
||||
|
||||
/*
|
||||
* If AST index override support is available (HKv2 etc),
|
||||
* DA search flag be enabled always
|
||||
*
|
||||
* If AST index override support is not available (HKv1),
|
||||
* DA search flag should be used for all modes except QWRAP
|
||||
*/
|
||||
if (soc->ast_override_support || !vdev->proxysta_vdev)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif /* FEATURE_MCL_REPEATER */
|
||||
#endif /* FEATURE_WDS */
|
||||
#ifdef WDS_VENDOR_EXTENSION
|
||||
|
||||
/**
|
||||
* dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
|
||||
*
|
||||
* @soc: DP soc handle
|
||||
* @vdev_id: id of vdev handle
|
||||
* @peer_mac: peer mac address
|
||||
* @wds_tx_ucast: policy for unicast transmission
|
||||
* @wds_tx_mcast: policy for multicast transmission
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t *soc, uint8_t vdev_id,
|
||||
uint8_t *peer_mac, int wds_tx_ucast,
|
||||
int wds_tx_mcast);
|
||||
|
||||
/**
|
||||
* dp_txrx_set_wds_rx_policy() - API to store datapath
|
||||
* config parameters
|
||||
* @soc_hdl: datapath soc handle
|
||||
* @vdev_id: id of datapath vdev handle
|
||||
* @val: WDS rx policy value
|
||||
*
|
||||
* Return: status
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_txrx_set_wds_rx_policy(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
u_int32_t val);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dp_hmwds_ast_add_notify() - schedules hmwds ast add status work
|
||||
* @peer: DP peer
|
||||
* @mac_addr: ast mac addr
|
||||
* @type: ast type
|
||||
* @err: QDF_STATUS error code
|
||||
* @is_peer_map: notify is from peer map
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void dp_hmwds_ast_add_notify(struct dp_peer *peer,
|
||||
uint8_t *mac_addr,
|
||||
enum cdp_txrx_ast_entry_type type,
|
||||
QDF_STATUS err,
|
||||
bool is_peer_map);
|
||||
|
||||
#ifdef QCA_SUPPORT_WDS_EXTENDED
|
||||
/**
|
||||
* dp_wds_ext_peer_learn() - function to send event to control
|
||||
* path on receiving 1st 4-address frame from backhaul.
|
||||
* @soc: DP soc
|
||||
* @ta_peer: WDS repeater peer
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static inline void dp_wds_ext_peer_learn(struct dp_soc *soc,
|
||||
struct dp_peer *ta_peer)
|
||||
{
|
||||
uint8_t wds_ext_src_mac[QDF_MAC_ADDR_SIZE];
|
||||
|
||||
if (ta_peer->vdev->wds_ext_enabled &&
|
||||
!qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT,
|
||||
&ta_peer->txrx_peer->wds_ext.init)) {
|
||||
qdf_mem_copy(wds_ext_src_mac, &ta_peer->mac_addr.raw[0],
|
||||
QDF_MAC_ADDR_SIZE);
|
||||
soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn(
|
||||
soc->ctrl_psoc,
|
||||
ta_peer->peer_id,
|
||||
ta_peer->vdev->vdev_id,
|
||||
wds_ext_src_mac);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void dp_wds_ext_peer_learn(struct dp_soc *soc,
|
||||
struct dp_peer *ta_peer)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dp_rx_wds_add_or_update_ast() - Add or update the ast entry.
|
||||
*
|
||||
* @soc: core txrx main context
|
||||
* @ta_peer: WDS repeater txrx peer
|
||||
* @nbuf: network buffer
|
||||
* @is_ad4_valid: 4-address valid flag
|
||||
* @is_sa_valid: source address valid flag
|
||||
* @is_chfrag_start: frag start flag
|
||||
* @sa_idx: source-address index for peer
|
||||
* @sa_sw_peer_id: software source-address peer-id
|
||||
*
|
||||
* Return: void:
|
||||
*/
|
||||
static inline void
|
||||
dp_rx_wds_add_or_update_ast(struct dp_soc *soc,
|
||||
struct dp_txrx_peer *ta_peer,
|
||||
qdf_nbuf_t nbuf, uint8_t is_ad4_valid,
|
||||
uint8_t is_sa_valid, uint8_t is_chfrag_start,
|
||||
uint16_t sa_idx, uint16_t sa_sw_peer_id)
|
||||
{
|
||||
struct dp_peer *sa_peer;
|
||||
struct dp_ast_entry *ast;
|
||||
uint32_t flags = DP_AST_FLAGS_HM;
|
||||
struct dp_pdev *pdev = ta_peer->vdev->pdev;
|
||||
uint8_t wds_src_mac[QDF_MAC_ADDR_SIZE];
|
||||
struct dp_peer *ta_base_peer;
|
||||
|
||||
|
||||
if (!(is_chfrag_start && is_ad4_valid))
|
||||
return;
|
||||
|
||||
if (qdf_unlikely(!is_sa_valid)) {
|
||||
qdf_mem_copy(wds_src_mac,
|
||||
(qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE),
|
||||
QDF_MAC_ADDR_SIZE);
|
||||
|
||||
ta_base_peer = dp_peer_get_ref_by_id(soc, ta_peer->peer_id,
|
||||
DP_MOD_ID_RX);
|
||||
if (ta_base_peer) {
|
||||
if (ta_peer->vdev->opmode == wlan_op_mode_ap)
|
||||
dp_wds_ext_peer_learn(soc, ta_base_peer);
|
||||
|
||||
dp_peer_add_ast(soc, ta_base_peer, wds_src_mac,
|
||||
CDP_TXRX_AST_TYPE_WDS, flags);
|
||||
|
||||
dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
qdf_spin_lock_bh(&soc->ast_lock);
|
||||
ast = soc->ast_table[sa_idx];
|
||||
|
||||
if (!ast) {
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
/*
|
||||
* In HKv1, it is possible that HW retains the AST entry in
|
||||
* GSE cache on 1 radio , even after the AST entry is deleted
|
||||
* (on another radio).
|
||||
*
|
||||
* Due to this, host might still get sa_is_valid indications
|
||||
* for frames with SA not really present in AST table.
|
||||
*
|
||||
* So we go ahead and send an add_ast command to FW in such
|
||||
* cases where sa is reported still as valid, so that FW will
|
||||
* invalidate this GSE cache entry and new AST entry gets
|
||||
* cached.
|
||||
*/
|
||||
if (!soc->ast_override_support) {
|
||||
qdf_mem_copy(wds_src_mac,
|
||||
(qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE),
|
||||
QDF_MAC_ADDR_SIZE);
|
||||
|
||||
ta_base_peer = dp_peer_get_ref_by_id(soc,
|
||||
ta_peer->peer_id,
|
||||
DP_MOD_ID_RX);
|
||||
|
||||
if (ta_base_peer) {
|
||||
dp_peer_add_ast(soc, ta_base_peer,
|
||||
wds_src_mac,
|
||||
CDP_TXRX_AST_TYPE_WDS,
|
||||
flags);
|
||||
|
||||
dp_peer_unref_delete(ta_base_peer,
|
||||
DP_MOD_ID_RX);
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
/* In HKv2 smart monitor case, when NAC client is
|
||||
* added first and this client roams within BSS to
|
||||
* connect to RE, since we have an AST entry for
|
||||
* NAC we get sa_is_valid bit set. So we check if
|
||||
* smart monitor is enabled and send add_ast command
|
||||
* to FW.
|
||||
*/
|
||||
ta_base_peer = dp_peer_get_ref_by_id(soc,
|
||||
ta_peer->peer_id,
|
||||
DP_MOD_ID_RX);
|
||||
if (ta_base_peer) {
|
||||
dp_monitor_neighbour_peer_add_ast(pdev,
|
||||
ta_base_peer,
|
||||
wds_src_mac,
|
||||
nbuf,
|
||||
flags);
|
||||
dp_peer_unref_delete(ta_base_peer,
|
||||
DP_MOD_ID_RX);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure we are updating the right AST entry by
|
||||
* validating ast_idx.
|
||||
* There is a possibility we might arrive here without
|
||||
* AST MAP event , so this check is mandatory
|
||||
*/
|
||||
if (ast->is_mapped && (ast->ast_idx == sa_idx))
|
||||
ast->is_active = TRUE;
|
||||
|
||||
if (ast->peer_id != ta_peer->peer_id) {
|
||||
if ((ast->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
|
||||
(ast->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((ast->type != CDP_TXRX_AST_TYPE_STATIC) &&
|
||||
(ast->type != CDP_TXRX_AST_TYPE_SELF) &&
|
||||
(ast->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
|
||||
if (ast->pdev_id != ta_peer->vdev->pdev->pdev_id) {
|
||||
/* This case is when a STA roams from one
|
||||
* repeater to another repeater, but these
|
||||
* repeaters are connected to root AP on
|
||||
* different radios.
|
||||
* Ex: rptr1 connected to ROOT AP over 5G
|
||||
* and rptr2 connected to ROOT AP over 2G
|
||||
* radio
|
||||
*/
|
||||
dp_peer_del_ast(soc, ast);
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
return;
|
||||
} else {
|
||||
/* this case is when a STA roams from one
|
||||
* reapter to another repeater, but inside
|
||||
* same radio.
|
||||
*/
|
||||
/* For HKv2 do not update the AST entry if
|
||||
* new ta_peer is on STA vap as SRC port
|
||||
* learning is disable on STA vap
|
||||
*/
|
||||
if (soc->ast_override_support &&
|
||||
(ta_peer->vdev->opmode == wlan_op_mode_sta)) {
|
||||
dp_peer_del_ast(soc, ast);
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
return;
|
||||
} else {
|
||||
ta_base_peer =
|
||||
dp_peer_get_ref_by_id(soc,
|
||||
ta_peer->peer_id,
|
||||
DP_MOD_ID_RX);
|
||||
if (ta_base_peer) {
|
||||
dp_wds_ext_peer_learn(soc,
|
||||
ta_base_peer);
|
||||
dp_peer_update_ast(soc,
|
||||
ta_base_peer,
|
||||
ast, flags);
|
||||
|
||||
dp_peer_unref_delete(ta_base_peer,
|
||||
DP_MOD_ID_RX);
|
||||
}
|
||||
}
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Do not kickout STA if it belongs to a different radio.
|
||||
* For DBDC repeater, it is possible to arrive here
|
||||
* for multicast loopback frames originated from connected
|
||||
* clients and looped back (intrabss) by Root AP
|
||||
*/
|
||||
if (ast->pdev_id != ta_peer->vdev->pdev->pdev_id) {
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
/*
|
||||
* Kickout, when direct associated peer(SA) roams
|
||||
* to another AP and reachable via TA peer
|
||||
*/
|
||||
sa_peer = dp_peer_get_ref_by_id(soc, ast->peer_id,
|
||||
DP_MOD_ID_RX);
|
||||
if (!sa_peer)
|
||||
return;
|
||||
|
||||
if ((sa_peer->vdev->opmode == wlan_op_mode_ap) &&
|
||||
!sa_peer->delete_in_progress) {
|
||||
qdf_mem_copy(wds_src_mac,
|
||||
(qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE),
|
||||
QDF_MAC_ADDR_SIZE);
|
||||
sa_peer->delete_in_progress = true;
|
||||
if (soc->cdp_soc.ol_ops->peer_sta_kickout) {
|
||||
soc->cdp_soc.ol_ops->peer_sta_kickout(
|
||||
soc->ctrl_psoc,
|
||||
sa_peer->vdev->pdev->pdev_id,
|
||||
wds_src_mac);
|
||||
}
|
||||
}
|
||||
dp_peer_unref_delete(sa_peer, DP_MOD_ID_RX);
|
||||
return;
|
||||
}
|
||||
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_wds_srcport_learn() - Add or update the STA PEER which
|
||||
* is behind the WDS repeater.
|
||||
* @soc: core txrx main context
|
||||
* @rx_tlv_hdr: base address of RX TLV header
|
||||
* @ta_peer: WDS repeater peer
|
||||
* @nbuf: rx pkt
|
||||
* @msdu_end_info:
|
||||
*
|
||||
* Return: void:
|
||||
*/
|
||||
static inline void
|
||||
dp_rx_wds_srcport_learn(struct dp_soc *soc,
|
||||
uint8_t *rx_tlv_hdr,
|
||||
struct dp_txrx_peer *ta_peer,
|
||||
qdf_nbuf_t nbuf,
|
||||
struct hal_rx_msdu_metadata msdu_end_info)
|
||||
{
|
||||
uint8_t sa_is_valid = qdf_nbuf_is_sa_valid(nbuf);
|
||||
uint8_t is_chfrag_start = 0;
|
||||
uint8_t is_ad4_valid = 0;
|
||||
|
||||
if (qdf_unlikely(!ta_peer))
|
||||
return;
|
||||
|
||||
is_chfrag_start = qdf_nbuf_is_rx_chfrag_start(nbuf);
|
||||
if (is_chfrag_start)
|
||||
is_ad4_valid = hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, rx_tlv_hdr);
|
||||
|
||||
|
||||
/*
|
||||
* Get the AST entry from HW SA index and mark it as active
|
||||
*/
|
||||
dp_rx_wds_add_or_update_ast(soc, ta_peer, nbuf, is_ad4_valid,
|
||||
sa_is_valid, is_chfrag_start,
|
||||
msdu_end_info.sa_idx, msdu_end_info.sa_sw_peer_id);
|
||||
}
|
||||
|
||||
#ifdef IPA_WDS_EASYMESH_FEATURE
|
||||
/**
|
||||
* dp_rx_ipa_wds_srcport_learn() - Add or update the STA PEER which
|
||||
* is behind the WDS repeater.
|
||||
*
|
||||
* @soc: core txrx main context
|
||||
* @ta_peer: WDS repeater peer
|
||||
* @nbuf: rx pkt
|
||||
* @msdu_end_info: msdu end info
|
||||
* @ad4_valid: address4 valid bit
|
||||
* @chfrag_start: Msdu start bit
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static inline void
|
||||
dp_rx_ipa_wds_srcport_learn(struct dp_soc *soc,
|
||||
struct dp_peer *ta_peer, qdf_nbuf_t nbuf,
|
||||
struct hal_rx_msdu_metadata msdu_end_info,
|
||||
bool ad4_valid, bool chfrag_start)
|
||||
{
|
||||
uint8_t sa_is_valid = qdf_nbuf_is_sa_valid(nbuf);
|
||||
uint8_t is_chfrag_start = (uint8_t)chfrag_start;
|
||||
uint8_t is_ad4_valid = (uint8_t)ad4_valid;
|
||||
struct dp_txrx_peer *peer = (struct dp_txrx_peer *)ta_peer;
|
||||
|
||||
if (qdf_unlikely(!ta_peer))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Get the AST entry from HW SA index and mark it as active
|
||||
*/
|
||||
dp_rx_wds_add_or_update_ast(soc, peer, nbuf, is_ad4_valid,
|
||||
sa_is_valid, is_chfrag_start,
|
||||
msdu_end_info.sa_idx,
|
||||
msdu_end_info.sa_sw_peer_id);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dp_rx_ast_set_active() - set the active flag of the astentry
|
||||
* corresponding to a hw index.
|
||||
* @soc: core txrx main context
|
||||
* @sa_idx: hw idx
|
||||
* @is_active: active flag
|
||||
*
|
||||
*/
|
||||
static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc,
|
||||
uint16_t sa_idx, bool is_active)
|
||||
{
|
||||
struct dp_ast_entry *ast;
|
||||
|
||||
qdf_spin_lock_bh(&soc->ast_lock);
|
||||
ast = soc->ast_table[sa_idx];
|
||||
|
||||
if (!ast) {
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
return QDF_STATUS_E_NULL_VALUE;
|
||||
}
|
||||
|
||||
if (!ast->is_mapped) {
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure we are updating the right AST entry by
|
||||
* validating ast_idx.
|
||||
* There is a possibility we might arrive here without
|
||||
* AST MAP event , so this check is mandatory
|
||||
*/
|
||||
if (ast->ast_idx == sa_idx) {
|
||||
ast->is_active = is_active;
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
#endif /* DP_TXRX_WDS*/
|
5449
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h
Normal file
5449
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h
Normal file
File diff suppressed because it is too large
Load Diff
1039
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_umac_reset.c
Normal file
1039
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_umac_reset.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,374 @@
|
||||
/*
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DP_UMAC_RESET_H_
|
||||
#define _DP_UMAC_RESET_H_
|
||||
|
||||
#include <qdf_types.h>
|
||||
struct dp_soc;
|
||||
|
||||
#define DP_UMAC_RESET_NOTIFY_DONE 20
|
||||
/**
|
||||
* enum umac_reset_action - Actions supported by the UMAC reset
|
||||
* @UMAC_RESET_ACTION_NONE: No action
|
||||
* @UMAC_RESET_ACTION_DO_TRIGGER_RECOVERY: Trigger umac recovery
|
||||
* @UMAC_RESET_ACTION_DO_PRE_RESET: DO_PRE_RESET
|
||||
* @UMAC_RESET_ACTION_DO_POST_RESET_START: DO_POST_RESET_START
|
||||
* @UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE: DO_POST_RESET_COMPLETE
|
||||
* @UMAC_RESET_ACTION_ABORT: Abort the current Umac reset session
|
||||
* @UMAC_RESET_ACTION_MAX: Maximum actions
|
||||
*/
|
||||
enum umac_reset_action {
|
||||
UMAC_RESET_ACTION_NONE,
|
||||
UMAC_RESET_ACTION_DO_TRIGGER_RECOVERY,
|
||||
UMAC_RESET_ACTION_DO_PRE_RESET,
|
||||
UMAC_RESET_ACTION_DO_POST_RESET_START,
|
||||
UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE,
|
||||
UMAC_RESET_ACTION_ABORT,
|
||||
UMAC_RESET_ACTION_MAX
|
||||
};
|
||||
|
||||
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||
|
||||
#define dp_umac_reset_alert(params...) \
|
||||
QDF_TRACE_FATAL(QDF_MODULE_ID_DP_UMAC_RESET, params)
|
||||
#define dp_umac_reset_err(params...) \
|
||||
QDF_TRACE_ERROR(QDF_MODULE_ID_DP_UMAC_RESET, params)
|
||||
#define dp_umac_reset_warn(params...) \
|
||||
QDF_TRACE_WARN(QDF_MODULE_ID_DP_UMAC_RESET, params)
|
||||
#define dp_umac_reset_notice(params...) \
|
||||
QDF_TRACE_INFO(QDF_MODULE_ID_DP_UMAC_RESET, params)
|
||||
#define dp_umac_reset_info(params...) \
|
||||
QDF_TRACE_INFO(QDF_MODULE_ID_DP_UMAC_RESET, params)
|
||||
#define dp_umac_reset_debug(params...) \
|
||||
QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_UMAC_RESET, params)
|
||||
|
||||
#define DP_UMAC_RESET_PRINT_STATS(fmt, args ...)\
|
||||
QDF_TRACE(QDF_MODULE_ID_DP_UMAC_RESET, QDF_TRACE_LEVEL_FATAL,\
|
||||
fmt, ## args)
|
||||
|
||||
#define DP_UMAC_RESET_SHMEM_ALIGN 8
|
||||
#define DP_UMAC_RESET_SHMEM_MAGIC_NUM (0xDEADBEEF)
|
||||
|
||||
/**
|
||||
* enum umac_reset_state - States required by the UMAC reset state machine
|
||||
* @UMAC_RESET_STATE_WAIT_FOR_TRIGGER: Waiting for trigger event
|
||||
* @UMAC_RESET_STATE_DO_TRIGGER_RECEIVED: Received the DO_TRIGGER event
|
||||
* @UMAC_RESET_STATE_HOST_TRIGGER_DONE: Host completed handling Trigger event
|
||||
* @UMAC_RESET_STATE_WAIT_FOR_DO_PRE_RESET: Waiting for the DO_PRE_RESET event
|
||||
* @UMAC_RESET_STATE_DO_PRE_RESET_RECEIVED: Received the DO_PRE_RESET event
|
||||
* @UMAC_RESET_STATE_HOST_PRE_RESET_DONE: Host has completed handling the
|
||||
* PRE_RESET event
|
||||
* @UMAC_RESET_STATE_WAIT_FOR_DO_POST_RESET_START: Waiting for the
|
||||
* DO_POST_RESET_START event
|
||||
* @UMAC_RESET_STATE_DO_POST_RESET_START_RECEIVED: Received the
|
||||
* DO_POST_RESET_START event
|
||||
* @UMAC_RESET_STATE_HOST_POST_RESET_START_DONE: Host has completed handling the
|
||||
* POST_RESET_START event
|
||||
* @UMAC_RESET_STATE_WAIT_FOR_DO_POST_RESET_COMPLETE: Waiting for the
|
||||
* DO_POST_RESET_COMPLETE event
|
||||
* @UMAC_RESET_STATE_DO_POST_RESET_COMPLETE_RECEIVED: Received the
|
||||
* DO_POST_RESET_COMPLETE event
|
||||
* @UMAC_RESET_STATE_HOST_POST_RESET_COMPLETE_DONE: Host has completed handling
|
||||
* the DO_POST_RESET_COMPLETE event
|
||||
*/
|
||||
enum umac_reset_state {
|
||||
UMAC_RESET_STATE_WAIT_FOR_TRIGGER = 0,
|
||||
UMAC_RESET_STATE_DO_TRIGGER_RECEIVED,
|
||||
UMAC_RESET_STATE_HOST_TRIGGER_DONE,
|
||||
|
||||
UMAC_RESET_STATE_WAIT_FOR_DO_PRE_RESET,
|
||||
UMAC_RESET_STATE_DO_PRE_RESET_RECEIVED,
|
||||
UMAC_RESET_STATE_HOST_PRE_RESET_DONE,
|
||||
|
||||
UMAC_RESET_STATE_WAIT_FOR_DO_POST_RESET_START,
|
||||
UMAC_RESET_STATE_DO_POST_RESET_START_RECEIVED,
|
||||
UMAC_RESET_STATE_HOST_POST_RESET_START_DONE,
|
||||
|
||||
UMAC_RESET_STATE_WAIT_FOR_DO_POST_RESET_COMPLETE,
|
||||
UMAC_RESET_STATE_DO_POST_RESET_COMPLETE_RECEIVED,
|
||||
UMAC_RESET_STATE_HOST_POST_RESET_COMPLETE_DONE,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum umac_reset_rx_event - Rx events deduced by the UMAC reset
|
||||
* @UMAC_RESET_RX_EVENT_NONE: No event
|
||||
* @UMAC_RESET_RX_EVENT_DO_TRIGGER_RECOVERY: ACTION_DO_TRIGGER_RECOVERY event
|
||||
* @UMAC_RESET_RX_EVENT_DO_TRIGGER_TR_SYNC: ACTION_DO_TRIGGER_RECOVERY event
|
||||
* @UMAC_RESET_RX_EVENT_DO_PRE_RESET: DO_PRE_RESET event
|
||||
* @UMAC_RESET_RX_EVENT_DO_POST_RESET_START: DO_POST_RESET_START event
|
||||
* @UMAC_RESET_RX_EVENT_DO_POST_RESET_COMPELTE: DO_POST_RESET_COMPELTE event
|
||||
* @UMAC_RESET_RX_EVENT_ERROR: Error while processing the Rx event
|
||||
*/
|
||||
enum umac_reset_rx_event {
|
||||
UMAC_RESET_RX_EVENT_NONE = 0x0,
|
||||
UMAC_RESET_RX_EVENT_DO_TRIGGER_RECOVERY,
|
||||
UMAC_RESET_RX_EVENT_DO_TRIGGER_TR_SYNC,
|
||||
UMAC_RESET_RX_EVENT_DO_PRE_RESET,
|
||||
UMAC_RESET_RX_EVENT_DO_POST_RESET_START,
|
||||
UMAC_RESET_RX_EVENT_DO_POST_RESET_COMPELTE,
|
||||
|
||||
UMAC_RESET_RX_EVENT_ERROR = 0xFFFFFFFF,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum umac_reset_tx_cmd: UMAC reset Tx command
|
||||
* @UMAC_RESET_TX_CMD_TRIGGER_DONE: TRIGGER_DONE
|
||||
* @UMAC_RESET_TX_CMD_PRE_RESET_DONE: PRE_RESET_DONE
|
||||
* @UMAC_RESET_TX_CMD_POST_RESET_START_DONE: POST_RESET_START_DONE
|
||||
* @UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE: POST_RESET_COMPLETE_DONE
|
||||
*/
|
||||
enum umac_reset_tx_cmd {
|
||||
UMAC_RESET_TX_CMD_TRIGGER_DONE,
|
||||
UMAC_RESET_TX_CMD_PRE_RESET_DONE,
|
||||
UMAC_RESET_TX_CMD_POST_RESET_START_DONE,
|
||||
UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct umac_reset_rx_actions - callbacks for handling UMAC reset actions
|
||||
* @cb: Array of pointers where each pointer contains callback for each UMAC
|
||||
* reset action for that index
|
||||
*/
|
||||
struct umac_reset_rx_actions {
|
||||
QDF_STATUS (*cb[UMAC_RESET_ACTION_MAX])(struct dp_soc *soc);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct reset_ts - timestamps of for umac reset events for debug
|
||||
* @trigger_start: Umac reset trigger event timestamp
|
||||
* @trigger_done: Umac reset trigger done timestamp
|
||||
* @pre_reset_start: Umac prereset start event timestamp
|
||||
* @pre_reset_done: Umac prereset done timestamp
|
||||
* @post_reset_start: Umac postreset start event timestamp
|
||||
* @post_reset_done: Umac postreset done timestamp
|
||||
* @post_reset_complete_start: Umac postreset complete event timestamp
|
||||
* @post_reset_complete_done: Umac postreset complete done timestamp
|
||||
*/
|
||||
struct reset_ts {
|
||||
uint64_t trigger_start;
|
||||
uint64_t trigger_done;
|
||||
uint64_t pre_reset_start;
|
||||
uint64_t pre_reset_done;
|
||||
uint64_t post_reset_start;
|
||||
uint64_t post_reset_done;
|
||||
uint64_t post_reset_complete_start;
|
||||
uint64_t post_reset_complete_done;
|
||||
};
|
||||
|
||||
#if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
|
||||
/**
|
||||
* struct dp_soc_mlo_umac_reset_ctx - UMAC reset context at mlo group level
|
||||
* @partner_map: Partner soc map
|
||||
* @request_map: Partner soc request map
|
||||
* @response_map: Partner soc response map
|
||||
* @grp_ctx_lock: lock for accessing group level umac reset context
|
||||
* @umac_reset_in_progress: Flag to indicate if umac reset is in progress
|
||||
* @is_target_recovery: Flag to indicate if this is for target recovery
|
||||
* @tx_desc_pool_cleaned: Global tx_desc pool clean up has been done
|
||||
* @initiator_chip_id: chip id of the Umac reset initiator
|
||||
* @umac_reset_count: Number of times Umac reset happened on this MLO group
|
||||
*/
|
||||
struct dp_soc_mlo_umac_reset_ctx {
|
||||
unsigned long partner_map;
|
||||
unsigned long request_map;
|
||||
unsigned long response_map;
|
||||
qdf_spinlock_t grp_ctx_lock;
|
||||
uint8_t umac_reset_in_progress:1,
|
||||
is_target_recovery:1,
|
||||
tx_desc_pool_cleaned:1;
|
||||
uint8_t initiator_chip_id;
|
||||
uint32_t umac_reset_count;
|
||||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* struct dp_soc_umac_reset_ctx - UMAC reset context at soc level
|
||||
* @shmem_paddr_unaligned: Physical address of the shared memory (unaligned)
|
||||
* @shmem_vaddr_unaligned: Virtual address of the shared memory (unaligned)
|
||||
* @shmem_paddr_aligned: Physical address of the shared memory (aligned)
|
||||
* @shmem_vaddr_aligned: Virtual address of the shared memory (aligned)
|
||||
* @shmem_size: Size of the shared memory
|
||||
* @intr_offset: Offset of the UMAC reset interrupt w.r.t DP base interrupt
|
||||
* @current_state: current state of the UMAC reset state machine
|
||||
* @shmem_exp_magic_num: Expected magic number in the shared memory
|
||||
* @rx_actions: callbacks for handling UMAC reset actions
|
||||
* @pending_action: Action pending to be executed.
|
||||
* @intr_ctx_bkp: DP Interrupts ring masks backup
|
||||
* @nbuf_list: skb list for delayed free
|
||||
* @skel_enable: Enable skeleton code for umac reset
|
||||
* @ts: timestamps debug
|
||||
*/
|
||||
struct dp_soc_umac_reset_ctx {
|
||||
qdf_dma_addr_t shmem_paddr_unaligned;
|
||||
void *shmem_vaddr_unaligned;
|
||||
qdf_dma_addr_t shmem_paddr_aligned;
|
||||
htt_umac_hang_recovery_msg_shmem_t *shmem_vaddr_aligned;
|
||||
size_t shmem_size;
|
||||
int intr_offset;
|
||||
enum umac_reset_state current_state;
|
||||
uint32_t shmem_exp_magic_num;
|
||||
struct umac_reset_rx_actions rx_actions;
|
||||
enum umac_reset_action pending_action;
|
||||
struct dp_intr_bkp *intr_ctx_bkp;
|
||||
qdf_nbuf_t nbuf_list;
|
||||
bool skel_enable;
|
||||
struct reset_ts ts;
|
||||
};
|
||||
|
||||
/**
|
||||
* dp_soc_umac_reset_init() - Initialize UMAC reset context
|
||||
* @txrx_soc: DP soc object
|
||||
*
|
||||
* Return: QDF status of operation
|
||||
*/
|
||||
QDF_STATUS dp_soc_umac_reset_init(struct cdp_soc_t *txrx_soc);
|
||||
|
||||
/**
|
||||
* dp_soc_umac_reset_deinit() - De-initialize UMAC reset context
|
||||
* @txrx_soc: DP soc object
|
||||
*
|
||||
* Return: QDF status of operation
|
||||
*/
|
||||
QDF_STATUS dp_soc_umac_reset_deinit(struct cdp_soc_t *txrx_soc);
|
||||
|
||||
/**
|
||||
* dp_umac_reset_interrupt_attach() - Register handlers for UMAC reset interrupt
|
||||
* @soc: DP soc object
|
||||
*
|
||||
* Return: QDF status of operation
|
||||
*/
|
||||
QDF_STATUS dp_umac_reset_interrupt_attach(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_umac_reset_interrupt_detach() - Unregister UMAC reset interrupt handlers
|
||||
* @soc: DP soc object
|
||||
*
|
||||
* Return: QDF status of operation
|
||||
*/
|
||||
QDF_STATUS dp_umac_reset_interrupt_detach(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_umac_reset_register_rx_action_callback() - Register a callback for a given
|
||||
* UMAC reset action
|
||||
* @soc: DP soc object
|
||||
* @handler: callback handler to be registered
|
||||
* @action: UMAC reset action for which @handler needs to be registered
|
||||
*
|
||||
* Return: QDF status of operation
|
||||
*/
|
||||
QDF_STATUS dp_umac_reset_register_rx_action_callback(
|
||||
struct dp_soc *soc,
|
||||
QDF_STATUS (*handler)(struct dp_soc *soc),
|
||||
enum umac_reset_action action);
|
||||
|
||||
/**
|
||||
* dp_umac_reset_notify_action_completion() - Notify that a given action has
|
||||
* been completed
|
||||
* @soc: DP soc object
|
||||
* @action: UMAC reset action that got completed
|
||||
*
|
||||
* Return: QDF status of operation
|
||||
*/
|
||||
QDF_STATUS dp_umac_reset_notify_action_completion(
|
||||
struct dp_soc *soc,
|
||||
enum umac_reset_action action);
|
||||
|
||||
/**
|
||||
* dp_umac_reset_post_tx_cmd_via_shmem() - Post Tx command using shared memory
|
||||
* @soc: DP soc object
|
||||
* @ctxt: Tx command to be posted
|
||||
* @chip_id: Chip id of the mlo soc
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_umac_reset_post_tx_cmd_via_shmem(struct dp_soc *soc, void *ctxt,
|
||||
int chip_id);
|
||||
|
||||
/**
|
||||
* dp_check_umac_reset_in_progress() - Check if Umac reset is in progress
|
||||
* @soc: dp soc handle
|
||||
*
|
||||
* Return: true if Umac reset is in progress or false otherwise
|
||||
*/
|
||||
bool dp_check_umac_reset_in_progress(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_umac_reset_stats_print - API to print UMAC reset stats
|
||||
* @soc: dp soc handle
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_umac_reset_stats_print(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_umac_reset_validate_n_update_state_machine_on_rx() - Validate the state
|
||||
* machine for a given rx event and update the state machine
|
||||
* @umac_reset_ctx: UMAC reset context
|
||||
* @rx_event: Rx event
|
||||
* @current_exp_state: Expected state
|
||||
* @next_state: The state to which the state machine needs to be updated
|
||||
*
|
||||
* Return: QDF_STATUS of operation
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_umac_reset_validate_n_update_state_machine_on_rx(
|
||||
struct dp_soc_umac_reset_ctx *umac_reset_ctx,
|
||||
enum umac_reset_rx_event rx_event,
|
||||
enum umac_reset_state current_exp_state,
|
||||
enum umac_reset_state next_state);
|
||||
#else
|
||||
static inline bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline
|
||||
QDF_STATUS dp_soc_umac_reset_init(struct cdp_soc_t *txrx_soc)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline
|
||||
QDF_STATUS dp_soc_umac_reset_deinit(struct cdp_soc_t *txrx_soc)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline
|
||||
QDF_STATUS dp_umac_reset_register_rx_action_callback(
|
||||
struct dp_soc *soc,
|
||||
QDF_STATUS (*handler)(struct dp_soc *soc),
|
||||
enum umac_reset_action action)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline
|
||||
QDF_STATUS dp_umac_reset_notify_action_completion(
|
||||
struct dp_soc *soc,
|
||||
enum umac_reset_action action)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline
|
||||
QDF_STATUS dp_umac_reset_stats_print(struct dp_soc *soc)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#endif /* DP_UMAC_HW_RESET_SUPPORT */
|
||||
#endif /* _DP_UMAC_RESET_H_ */
|
284
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_wdi_event.c
Normal file
284
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/dp_wdi_event.c
Normal file
@ -0,0 +1,284 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
#include "dp_internal.h"
|
||||
#include "qdf_mem.h" /* qdf_mem_malloc,free */
|
||||
#ifdef WIFI_MONITOR_SUPPORT
|
||||
#include "dp_htt.h"
|
||||
#include <dp_mon.h>
|
||||
#endif
|
||||
#include <qdf_module.h>
|
||||
|
||||
#ifdef WDI_EVENT_ENABLE
|
||||
/**
|
||||
* dp_wdi_event_next_sub() - Return handle for Next WDI event
|
||||
* @wdi_sub: WDI Event handle
|
||||
*
|
||||
* Return handle for next WDI event in list
|
||||
*
|
||||
* Return: Next WDI event to be subscribe
|
||||
*/
|
||||
static inline wdi_event_subscribe *
|
||||
dp_wdi_event_next_sub(wdi_event_subscribe *wdi_sub)
|
||||
{
|
||||
if (!wdi_sub) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid subscriber in %s", __func__);
|
||||
return NULL;
|
||||
}
|
||||
return wdi_sub->priv.next;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* dp_wdi_event_del_subs() - Delete Event subscription
|
||||
* @wdi_sub: WDI Event handle
|
||||
* @event_index: Event index from list
|
||||
*
|
||||
* This API will delete subscribed event from list
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline void
|
||||
dp_wdi_event_del_subs(wdi_event_subscribe *wdi_sub, int event_index)
|
||||
{
|
||||
/* Subscribers should take care of deletion */
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* dp_wdi_event_iter_sub() - Iterate through all WDI event in the list
|
||||
* and pass WDI event to callback function
|
||||
* @pdev: DP pdev handle
|
||||
* @event_index: Event index in list
|
||||
* @wdi_sub: WDI event subscriber
|
||||
* @data: pointer to data
|
||||
* @peer_id: peer id number
|
||||
* @status: HTT rx status
|
||||
*
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline void
|
||||
dp_wdi_event_iter_sub(
|
||||
struct dp_pdev *pdev,
|
||||
uint32_t event_index,
|
||||
wdi_event_subscribe *wdi_sub,
|
||||
void *data,
|
||||
uint16_t peer_id,
|
||||
int status)
|
||||
{
|
||||
enum WDI_EVENT event = event_index + WDI_EVENT_BASE;
|
||||
|
||||
if (wdi_sub) {
|
||||
do {
|
||||
wdi_sub->callback(wdi_sub->context, event, data,
|
||||
peer_id, status);
|
||||
} while ((wdi_sub = dp_wdi_event_next_sub(wdi_sub)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
dp_wdi_event_handler(
|
||||
enum WDI_EVENT event,
|
||||
struct dp_soc *soc,
|
||||
void *data,
|
||||
uint16_t peer_id,
|
||||
int status, uint8_t pdev_id)
|
||||
{
|
||||
uint32_t event_index;
|
||||
wdi_event_subscribe *wdi_sub;
|
||||
struct dp_pdev *txrx_pdev;
|
||||
struct dp_soc *soc_t = (struct dp_soc *)soc;
|
||||
txrx_pdev = dp_get_pdev_for_mac_id(soc_t, pdev_id);
|
||||
|
||||
if (!event) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid WDI event in %s", __func__);
|
||||
return;
|
||||
}
|
||||
if (!txrx_pdev || txrx_pdev->pdev_deinit) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid pdev in WDI event handler");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* There can be NULL data, so no validation for the data
|
||||
* Subscribers must do the sanity based on the requirements
|
||||
*/
|
||||
event_index = event - WDI_EVENT_BASE;
|
||||
|
||||
DP_STATS_INC(txrx_pdev, wdi_event[event_index], 1);
|
||||
wdi_sub = txrx_pdev->wdi_event_list[event_index];
|
||||
|
||||
/* Find the subscriber */
|
||||
dp_wdi_event_iter_sub(txrx_pdev, event_index, wdi_sub, data,
|
||||
peer_id, status);
|
||||
}
|
||||
|
||||
qdf_export_symbol(dp_wdi_event_handler);
|
||||
|
||||
int
|
||||
dp_wdi_event_sub(
|
||||
struct cdp_soc_t *soc, uint8_t pdev_id,
|
||||
wdi_event_subscribe *event_cb_sub_handle,
|
||||
uint32_t event)
|
||||
{
|
||||
uint32_t event_index;
|
||||
wdi_event_subscribe *wdi_sub;
|
||||
wdi_event_subscribe *wdi_sub_itr;
|
||||
struct dp_pdev *txrx_pdev =
|
||||
dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
|
||||
pdev_id);
|
||||
wdi_event_subscribe *event_cb_sub =
|
||||
(wdi_event_subscribe *) event_cb_sub_handle;
|
||||
|
||||
if (!txrx_pdev) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid txrx_pdev in %s", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!event_cb_sub) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid callback in %s", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((!event) || (event >= WDI_EVENT_LAST) || (event < WDI_EVENT_BASE)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid event in %s", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dp_monitor_set_pktlog_wifi3(txrx_pdev, event, true);
|
||||
event_index = event - WDI_EVENT_BASE;
|
||||
wdi_sub = txrx_pdev->wdi_event_list[event_index];
|
||||
|
||||
/*
|
||||
* Check if it is the first subscriber of the event
|
||||
*/
|
||||
if (!wdi_sub) {
|
||||
wdi_sub = event_cb_sub;
|
||||
wdi_sub->priv.next = NULL;
|
||||
wdi_sub->priv.prev = NULL;
|
||||
txrx_pdev->wdi_event_list[event_index] = wdi_sub;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Check if event is already subscribed */
|
||||
wdi_sub_itr = wdi_sub;
|
||||
do {
|
||||
if (wdi_sub_itr == event_cb_sub) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"Duplicate wdi subscribe event detected %s", __func__);
|
||||
return 0;
|
||||
}
|
||||
} while ((wdi_sub_itr = dp_wdi_event_next_sub(wdi_sub_itr)));
|
||||
|
||||
event_cb_sub->priv.next = wdi_sub;
|
||||
event_cb_sub->priv.prev = NULL;
|
||||
wdi_sub->priv.prev = event_cb_sub;
|
||||
txrx_pdev->wdi_event_list[event_index] = event_cb_sub;
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
int
|
||||
dp_wdi_event_unsub(
|
||||
struct cdp_soc_t *soc, uint8_t pdev_id,
|
||||
wdi_event_subscribe *event_cb_sub_handle,
|
||||
uint32_t event)
|
||||
{
|
||||
uint32_t event_index = event - WDI_EVENT_BASE;
|
||||
struct dp_pdev *txrx_pdev =
|
||||
dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
|
||||
pdev_id);
|
||||
wdi_event_subscribe *event_cb_sub =
|
||||
(wdi_event_subscribe *) event_cb_sub_handle;
|
||||
|
||||
if (!txrx_pdev || !event_cb_sub) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid callback or pdev in %s", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dp_monitor_set_pktlog_wifi3(txrx_pdev, event, false);
|
||||
|
||||
if (!event_cb_sub->priv.prev) {
|
||||
txrx_pdev->wdi_event_list[event_index] = event_cb_sub->priv.next;
|
||||
} else {
|
||||
event_cb_sub->priv.prev->priv.next = event_cb_sub->priv.next;
|
||||
}
|
||||
if (event_cb_sub->priv.next) {
|
||||
event_cb_sub->priv.next->priv.prev = event_cb_sub->priv.prev;
|
||||
}
|
||||
|
||||
/* Reset susbscribe event list elems */
|
||||
event_cb_sub->priv.next = NULL;
|
||||
event_cb_sub->priv.prev = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
dp_wdi_event_attach(struct dp_pdev *txrx_pdev)
|
||||
{
|
||||
if (!txrx_pdev) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid device in %s\nWDI event attach failed",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Separate subscriber list for each event */
|
||||
txrx_pdev->wdi_event_list = (wdi_event_subscribe **)
|
||||
qdf_mem_malloc(
|
||||
sizeof(wdi_event_subscribe *) * WDI_NUM_EVENTS);
|
||||
if (!txrx_pdev->wdi_event_list) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Insufficient memory for the WDI event lists");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
dp_wdi_event_detach(struct dp_pdev *txrx_pdev)
|
||||
{
|
||||
int i;
|
||||
wdi_event_subscribe *wdi_sub;
|
||||
if (!txrx_pdev) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid device in %s\nWDI attach failed", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!txrx_pdev->wdi_event_list) {
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < WDI_NUM_EVENTS; i++) {
|
||||
wdi_sub = txrx_pdev->wdi_event_list[i];
|
||||
/* Delete all the subscribers */
|
||||
dp_wdi_event_del_subs(wdi_sub, i);
|
||||
}
|
||||
qdf_mem_free(txrx_pdev->wdi_event_list);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
751
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li.c
Normal file
751
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li.c
Normal file
@ -0,0 +1,751 @@
|
||||
/*
|
||||
* Copyright (c) 2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "dp_types.h"
|
||||
#include "dp_rings.h"
|
||||
#include <dp_internal.h>
|
||||
#include <dp_htt.h>
|
||||
#include "dp_li.h"
|
||||
#include "dp_li_tx.h"
|
||||
#include "dp_tx_desc.h"
|
||||
#include "dp_li_rx.h"
|
||||
#include "dp_peer.h"
|
||||
#include <wlan_utility.h>
|
||||
#include "dp_ipa.h"
|
||||
#ifdef WIFI_MONITOR_SUPPORT
|
||||
#include <dp_mon_1.0.h>
|
||||
#endif
|
||||
|
||||
#if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
|
||||
static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
|
||||
{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
|
||||
/*
|
||||
* INVALID_WBM_RING_NUM implies re-use of an existing WBM2SW ring
|
||||
* as indicated by rbm id.
|
||||
*/
|
||||
{1, INVALID_WBM_RING_NUM, HAL_LI_WBM_SW0_BM_ID, 0},
|
||||
{2, 2, HAL_LI_WBM_SW2_BM_ID, 0}
|
||||
};
|
||||
#else
|
||||
static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
|
||||
{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
|
||||
{1, 1, HAL_LI_WBM_SW1_BM_ID, 0},
|
||||
{2, 2, HAL_LI_WBM_SW2_BM_ID, 0},
|
||||
/*
|
||||
* Although using wbm_ring 4, wbm_ring 3 is mentioned in order to match
|
||||
* with the tx_mask in dp_service_srngs. Please be careful while using
|
||||
* this table anywhere else.
|
||||
*/
|
||||
{3, 3, HAL_LI_WBM_SW4_BM_ID, 0}
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef IPA_WDI3_TX_TWO_PIPES
|
||||
static inline void
|
||||
dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *cfg_ctx)
|
||||
{
|
||||
if (!cfg_ctx->ipa_enabled)
|
||||
return;
|
||||
|
||||
cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_ring_num = 4;
|
||||
cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_rbm_id =
|
||||
HAL_LI_WBM_SW4_BM_ID;
|
||||
}
|
||||
#else
|
||||
static inline void
|
||||
dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static void dp_soc_cfg_attach_li(struct dp_soc *soc)
|
||||
{
|
||||
struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
|
||||
|
||||
dp_soc_cfg_attach(soc);
|
||||
|
||||
wlan_cfg_set_rx_rel_ring_id(soc_cfg_ctx, WBM2SW_REL_ERR_RING_NUM);
|
||||
|
||||
soc_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
|
||||
dp_soc_cfg_update_tcl_wbm_map_for_ipa(soc_cfg_ctx);
|
||||
}
|
||||
|
||||
qdf_size_t dp_get_context_size_li(enum dp_context_type context_type)
|
||||
{
|
||||
switch (context_type) {
|
||||
case DP_CONTEXT_TYPE_SOC:
|
||||
return sizeof(struct dp_soc_li);
|
||||
case DP_CONTEXT_TYPE_PDEV:
|
||||
return sizeof(struct dp_pdev_li);
|
||||
case DP_CONTEXT_TYPE_VDEV:
|
||||
return sizeof(struct dp_vdev_li);
|
||||
case DP_CONTEXT_TYPE_PEER:
|
||||
return sizeof(struct dp_peer_li);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_soc_attach_li(struct dp_soc *soc,
|
||||
struct cdp_soc_attach_params *params)
|
||||
{
|
||||
soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_soc_detach_li(struct dp_soc *soc)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_soc_interrupt_attach_li(struct cdp_soc_t *txrx_soc)
|
||||
{
|
||||
return dp_soc_interrupt_attach(txrx_soc);
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_soc_attach_poll_li(struct cdp_soc_t *txrx_soc)
|
||||
{
|
||||
return dp_soc_attach_poll(txrx_soc);
|
||||
}
|
||||
|
||||
static void dp_soc_interrupt_detach_li(struct cdp_soc_t *txrx_soc)
|
||||
{
|
||||
return dp_soc_interrupt_detach(txrx_soc);
|
||||
}
|
||||
|
||||
static uint32_t dp_service_srngs_li(void *dp_ctx, uint32_t dp_budget, int cpu)
|
||||
{
|
||||
return dp_service_srngs(dp_ctx, dp_budget, cpu);
|
||||
}
|
||||
|
||||
static void *dp_soc_init_li(struct dp_soc *soc, HTC_HANDLE htc_handle,
|
||||
struct hif_opaque_softc *hif_handle)
|
||||
{
|
||||
wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
|
||||
WLAN_MD_DP_SOC, "dp_soc");
|
||||
|
||||
soc->hif_handle = hif_handle;
|
||||
|
||||
soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
|
||||
if (!soc->hal_soc)
|
||||
return NULL;
|
||||
|
||||
return dp_soc_init(soc, htc_handle, hif_handle);
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_soc_deinit_li(struct dp_soc *soc)
|
||||
{
|
||||
qdf_atomic_set(&soc->cmn_init_done, 0);
|
||||
|
||||
dp_soc_deinit(soc);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_pdev_attach_li(struct dp_pdev *pdev,
|
||||
struct cdp_pdev_attach_params *params)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_pdev_detach_li(struct dp_pdev *pdev)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_vdev_attach_li(struct dp_soc *soc, struct dp_vdev *vdev)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_vdev_detach_li(struct dp_soc *soc, struct dp_vdev *vdev)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
#ifdef AST_OFFLOAD_ENABLE
|
||||
static void dp_peer_map_detach_li(struct dp_soc *soc)
|
||||
{
|
||||
dp_soc_wds_detach(soc);
|
||||
dp_peer_ast_table_detach(soc);
|
||||
dp_peer_ast_hash_detach(soc);
|
||||
dp_peer_mec_hash_detach(soc);
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
|
||||
soc->max_peer_id = soc->max_peers;
|
||||
|
||||
status = dp_peer_ast_table_attach(soc);
|
||||
if (!QDF_IS_STATUS_SUCCESS(status))
|
||||
return status;
|
||||
|
||||
status = dp_peer_ast_hash_attach(soc);
|
||||
if (!QDF_IS_STATUS_SUCCESS(status))
|
||||
goto ast_table_detach;
|
||||
|
||||
status = dp_peer_mec_hash_attach(soc);
|
||||
if (!QDF_IS_STATUS_SUCCESS(status))
|
||||
goto hash_detach;
|
||||
|
||||
dp_soc_wds_attach(soc);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
hash_detach:
|
||||
dp_peer_ast_hash_detach(soc);
|
||||
ast_table_detach:
|
||||
dp_peer_ast_table_detach(soc);
|
||||
|
||||
return status;
|
||||
}
|
||||
#else
|
||||
static void dp_peer_map_detach_li(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
|
||||
{
|
||||
soc->max_peer_id = soc->max_peers;
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
static QDF_STATUS dp_peer_setup_li(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
uint8_t *peer_mac,
|
||||
struct cdp_peer_setup_info *setup_info)
|
||||
{
|
||||
return dp_peer_setup_wifi3(soc_hdl, vdev_id, peer_mac, setup_info);
|
||||
}
|
||||
|
||||
qdf_size_t dp_get_soc_context_size_li(void)
|
||||
{
|
||||
return sizeof(struct dp_soc);
|
||||
}
|
||||
|
||||
#ifdef NO_RX_PKT_HDR_TLV
|
||||
/**
|
||||
* dp_rxdma_ring_sel_cfg_li() - Setup RXDMA ring config
|
||||
* @soc: Common DP soc handle
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static QDF_STATUS
|
||||
dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
|
||||
{
|
||||
int i;
|
||||
int mac_id;
|
||||
struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
|
||||
struct dp_srng *rx_mac_srng;
|
||||
QDF_STATUS status = QDF_STATUS_SUCCESS;
|
||||
uint32_t target_type = hal_get_target_type(soc->hal_soc);
|
||||
uint16_t buf_size;
|
||||
|
||||
buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
|
||||
|
||||
if (target_type == TARGET_TYPE_QCN9160)
|
||||
return status;
|
||||
|
||||
htt_tlv_filter.mpdu_start = 1;
|
||||
htt_tlv_filter.msdu_start = 1;
|
||||
htt_tlv_filter.mpdu_end = 1;
|
||||
htt_tlv_filter.msdu_end = 1;
|
||||
htt_tlv_filter.attention = 1;
|
||||
htt_tlv_filter.packet = 1;
|
||||
htt_tlv_filter.packet_header = 0;
|
||||
|
||||
htt_tlv_filter.ppdu_start = 0;
|
||||
htt_tlv_filter.ppdu_end = 0;
|
||||
htt_tlv_filter.ppdu_end_user_stats = 0;
|
||||
htt_tlv_filter.ppdu_end_user_stats_ext = 0;
|
||||
htt_tlv_filter.ppdu_end_status_done = 0;
|
||||
htt_tlv_filter.enable_fp = 1;
|
||||
htt_tlv_filter.enable_md = 0;
|
||||
htt_tlv_filter.enable_md = 0;
|
||||
htt_tlv_filter.enable_mo = 0;
|
||||
|
||||
htt_tlv_filter.fp_mgmt_filter = 0;
|
||||
htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
|
||||
htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
|
||||
FILTER_DATA_MCAST |
|
||||
FILTER_DATA_DATA);
|
||||
htt_tlv_filter.mo_mgmt_filter = 0;
|
||||
htt_tlv_filter.mo_ctrl_filter = 0;
|
||||
htt_tlv_filter.mo_data_filter = 0;
|
||||
htt_tlv_filter.md_data_filter = 0;
|
||||
|
||||
htt_tlv_filter.offset_valid = true;
|
||||
|
||||
htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
|
||||
/*Not subscribing rx_pkt_header*/
|
||||
htt_tlv_filter.rx_header_offset = 0;
|
||||
htt_tlv_filter.rx_mpdu_start_offset =
|
||||
hal_rx_mpdu_start_offset_get(soc->hal_soc);
|
||||
htt_tlv_filter.rx_mpdu_end_offset =
|
||||
hal_rx_mpdu_end_offset_get(soc->hal_soc);
|
||||
htt_tlv_filter.rx_msdu_start_offset =
|
||||
hal_rx_msdu_start_offset_get(soc->hal_soc);
|
||||
htt_tlv_filter.rx_msdu_end_offset =
|
||||
hal_rx_msdu_end_offset_get(soc->hal_soc);
|
||||
htt_tlv_filter.rx_attn_offset =
|
||||
hal_rx_attn_offset_get(soc->hal_soc);
|
||||
|
||||
for (i = 0; i < MAX_PDEV_CNT; i++) {
|
||||
struct dp_pdev *pdev = soc->pdev_list[i];
|
||||
|
||||
if (!pdev)
|
||||
continue;
|
||||
|
||||
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
|
||||
int mac_for_pdev =
|
||||
dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
|
||||
/*
|
||||
* Obtain lmac id from pdev to access the LMAC ring
|
||||
* in soc context
|
||||
*/
|
||||
int lmac_id =
|
||||
dp_get_lmac_id_for_pdev_id(soc, mac_id,
|
||||
pdev->pdev_id);
|
||||
|
||||
rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
|
||||
htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
|
||||
rx_mac_srng->hal_srng,
|
||||
RXDMA_BUF, buf_size,
|
||||
&htt_tlv_filter);
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
#else
|
||||
|
||||
static QDF_STATUS
|
||||
dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
|
||||
{
|
||||
int i;
|
||||
int mac_id;
|
||||
struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
|
||||
struct dp_srng *rx_mac_srng;
|
||||
QDF_STATUS status = QDF_STATUS_SUCCESS;
|
||||
uint32_t target_type = hal_get_target_type(soc->hal_soc);
|
||||
uint16_t buf_size;
|
||||
|
||||
buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
|
||||
|
||||
if (target_type == TARGET_TYPE_QCN9160)
|
||||
return status;
|
||||
|
||||
htt_tlv_filter.mpdu_start = 1;
|
||||
htt_tlv_filter.msdu_start = 1;
|
||||
htt_tlv_filter.mpdu_end = 1;
|
||||
htt_tlv_filter.msdu_end = 1;
|
||||
htt_tlv_filter.attention = 1;
|
||||
htt_tlv_filter.packet = 1;
|
||||
htt_tlv_filter.packet_header = 1;
|
||||
|
||||
htt_tlv_filter.ppdu_start = 0;
|
||||
htt_tlv_filter.ppdu_end = 0;
|
||||
htt_tlv_filter.ppdu_end_user_stats = 0;
|
||||
htt_tlv_filter.ppdu_end_user_stats_ext = 0;
|
||||
htt_tlv_filter.ppdu_end_status_done = 0;
|
||||
htt_tlv_filter.enable_fp = 1;
|
||||
htt_tlv_filter.enable_md = 0;
|
||||
htt_tlv_filter.enable_md = 0;
|
||||
htt_tlv_filter.enable_mo = 0;
|
||||
|
||||
htt_tlv_filter.fp_mgmt_filter = 0;
|
||||
htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
|
||||
htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
|
||||
FILTER_DATA_MCAST |
|
||||
FILTER_DATA_DATA);
|
||||
htt_tlv_filter.mo_mgmt_filter = 0;
|
||||
htt_tlv_filter.mo_ctrl_filter = 0;
|
||||
htt_tlv_filter.mo_data_filter = 0;
|
||||
htt_tlv_filter.md_data_filter = 0;
|
||||
|
||||
htt_tlv_filter.offset_valid = true;
|
||||
|
||||
htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
|
||||
htt_tlv_filter.rx_header_offset =
|
||||
hal_rx_pkt_tlv_offset_get(soc->hal_soc);
|
||||
htt_tlv_filter.rx_mpdu_start_offset =
|
||||
hal_rx_mpdu_start_offset_get(soc->hal_soc);
|
||||
htt_tlv_filter.rx_mpdu_end_offset =
|
||||
hal_rx_mpdu_end_offset_get(soc->hal_soc);
|
||||
htt_tlv_filter.rx_msdu_start_offset =
|
||||
hal_rx_msdu_start_offset_get(soc->hal_soc);
|
||||
htt_tlv_filter.rx_msdu_end_offset =
|
||||
hal_rx_msdu_end_offset_get(soc->hal_soc);
|
||||
htt_tlv_filter.rx_attn_offset =
|
||||
hal_rx_attn_offset_get(soc->hal_soc);
|
||||
|
||||
for (i = 0; i < MAX_PDEV_CNT; i++) {
|
||||
struct dp_pdev *pdev = soc->pdev_list[i];
|
||||
|
||||
if (!pdev)
|
||||
continue;
|
||||
|
||||
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
|
||||
int mac_for_pdev =
|
||||
dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
|
||||
/*
|
||||
* Obtain lmac id from pdev to access the LMAC ring
|
||||
* in soc context
|
||||
*/
|
||||
int lmac_id =
|
||||
dp_get_lmac_id_for_pdev_id(soc, mac_id,
|
||||
pdev->pdev_id);
|
||||
|
||||
rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
|
||||
htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
|
||||
rx_mac_srng->hal_srng,
|
||||
RXDMA_BUF, buf_size,
|
||||
&htt_tlv_filter);
|
||||
}
|
||||
}
|
||||
return status;
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline
|
||||
QDF_STATUS dp_srng_init_li(struct dp_soc *soc, struct dp_srng *srng,
|
||||
int ring_type, int ring_num, int mac_id)
|
||||
{
|
||||
return dp_srng_init_idx(soc, srng, ring_type, ring_num, mac_id, 0);
|
||||
}
|
||||
|
||||
#ifdef QCA_DP_ENABLE_TX_COMP_RING4
|
||||
static inline
|
||||
void dp_deinit_txcomp_ring4(struct dp_soc *soc)
|
||||
{
|
||||
if (soc) {
|
||||
wlan_minidump_remove(soc->tx_comp_ring[3].base_vaddr_unaligned,
|
||||
soc->tx_comp_ring[3].alloc_size,
|
||||
soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
|
||||
"Transmit_completion_ring");
|
||||
dp_srng_deinit(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
|
||||
{
|
||||
if (soc) {
|
||||
if (dp_srng_init(soc, &soc->tx_comp_ring[3],
|
||||
WBM2SW_RELEASE, WBM2SW_TXCOMP_RING4_NUM, 0)) {
|
||||
dp_err("%pK: dp_srng_init failed for rx_rel_ring",
|
||||
soc);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
wlan_minidump_log(soc->tx_comp_ring[3].base_vaddr_unaligned,
|
||||
soc->tx_comp_ring[3].alloc_size,
|
||||
soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
|
||||
"Transmit_completion_ring");
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline
|
||||
void dp_free_txcomp_ring4(struct dp_soc *soc)
|
||||
{
|
||||
if (soc)
|
||||
dp_srng_free(soc, &soc->tx_comp_ring[3]);
|
||||
}
|
||||
|
||||
static inline
|
||||
QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
|
||||
uint32_t cached)
|
||||
{
|
||||
if (soc) {
|
||||
if (dp_srng_alloc(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE,
|
||||
tx_comp_ring_size, cached)) {
|
||||
dp_err("dp_srng_alloc failed for tx_comp_ring");
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
void dp_deinit_txcomp_ring4(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline
|
||||
void dp_free_txcomp_ring4(struct dp_soc *soc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
|
||||
uint32_t cached)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void dp_soc_srng_deinit_li(struct dp_soc *soc)
|
||||
{
|
||||
/* Tx Complete ring */
|
||||
dp_deinit_txcomp_ring4(soc);
|
||||
}
|
||||
|
||||
static void dp_soc_srng_free_li(struct dp_soc *soc)
|
||||
{
|
||||
dp_free_txcomp_ring4(soc);
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_soc_srng_alloc_li(struct dp_soc *soc)
|
||||
{
|
||||
uint32_t tx_comp_ring_size;
|
||||
uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
|
||||
struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
|
||||
|
||||
soc_cfg_ctx = soc->wlan_cfg_ctx;
|
||||
|
||||
tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
|
||||
/* Disable cached desc if NSS offload is enabled */
|
||||
if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
|
||||
cached = 0;
|
||||
|
||||
if (dp_alloc_txcomp_ring4(soc, tx_comp_ring_size, cached))
|
||||
goto fail1;
|
||||
return QDF_STATUS_SUCCESS;
|
||||
fail1:
|
||||
dp_soc_srng_free_li(soc);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_soc_srng_init_li(struct dp_soc *soc)
|
||||
{
|
||||
/* Tx comp ring 3 */
|
||||
if (dp_init_txcomp_ring4(soc))
|
||||
goto fail1;
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
fail1:
|
||||
/*
|
||||
* Cleanup will be done as part of soc_detach, which will
|
||||
* be called on pdev attach failure
|
||||
*/
|
||||
dp_soc_srng_deinit_li(soc);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
static void dp_tx_implicit_rbm_set_li(struct dp_soc *soc,
|
||||
uint8_t tx_ring_id,
|
||||
uint8_t bm_id)
|
||||
{
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_txrx_set_vdev_param_li(struct dp_soc *soc,
|
||||
struct dp_vdev *vdev,
|
||||
enum cdp_vdev_param_type param,
|
||||
cdp_config_param_type val)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
bool
|
||||
dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
|
||||
qdf_nbuf_t nbuf_copy,
|
||||
struct cdp_tid_rx_stats *tid_stats,
|
||||
uint8_t link_id)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static void dp_rx_word_mask_subscribe_li(struct dp_soc *soc,
|
||||
uint32_t *msg_word,
|
||||
void *rx_filter)
|
||||
{
|
||||
}
|
||||
|
||||
static void dp_get_rx_hash_key_li(struct dp_soc *soc,
|
||||
struct cdp_lro_hash_config *lro_hash)
|
||||
{
|
||||
dp_get_rx_hash_key_bytes(lro_hash);
|
||||
}
|
||||
|
||||
static void dp_peer_get_reo_hash_li(struct dp_vdev *vdev,
|
||||
struct cdp_peer_setup_info *setup_info,
|
||||
enum cdp_host_reo_dest_ring *reo_dest,
|
||||
bool *hash_based,
|
||||
uint8_t *lmac_peer_id_msb)
|
||||
{
|
||||
dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
|
||||
}
|
||||
|
||||
static bool dp_reo_remap_config_li(struct dp_soc *soc,
|
||||
uint32_t *remap0,
|
||||
uint32_t *remap1,
|
||||
uint32_t *remap2)
|
||||
{
|
||||
return dp_reo_remap_config(soc, remap0, remap1, remap2);
|
||||
}
|
||||
|
||||
static uint8_t dp_soc_get_num_soc_li(struct dp_soc *soc)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static QDF_STATUS dp_txrx_get_vdev_mcast_param_li(struct dp_soc *soc,
|
||||
struct dp_vdev *vdev,
|
||||
cdp_config_param_type *val)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static uint8_t dp_get_hw_link_id_li(struct dp_pdev *pdev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dp_get_vdev_stats_for_unmap_peer_li(
|
||||
struct dp_vdev *vdev,
|
||||
struct dp_peer *peer)
|
||||
{
|
||||
dp_get_vdev_stats_for_unmap_peer_legacy(vdev, peer);
|
||||
}
|
||||
|
||||
static struct
|
||||
dp_soc *dp_get_soc_by_chip_id_li(struct dp_soc *soc,
|
||||
uint8_t chip_id)
|
||||
{
|
||||
return soc;
|
||||
}
|
||||
|
||||
void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
|
||||
{
|
||||
#ifndef QCA_HOST_MODE_WIFI_DISABLED
|
||||
arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_li;
|
||||
arch_ops->dp_rx_process = dp_rx_process_li;
|
||||
arch_ops->dp_tx_send_fast = dp_tx_send;
|
||||
arch_ops->tx_comp_get_params_from_hal_desc =
|
||||
dp_tx_comp_get_params_from_hal_desc_li;
|
||||
arch_ops->dp_tx_process_htt_completion =
|
||||
dp_tx_process_htt_completion_li;
|
||||
arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
|
||||
dp_wbm_get_rx_desc_from_hal_desc_li;
|
||||
arch_ops->dp_tx_desc_pool_alloc = dp_tx_desc_pool_alloc_li;
|
||||
arch_ops->dp_tx_desc_pool_free = dp_tx_desc_pool_free_li;
|
||||
arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_li;
|
||||
arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_li;
|
||||
arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_li;
|
||||
arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_li;
|
||||
arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_li;
|
||||
arch_ops->dp_rx_chain_msdus = dp_rx_chain_msdus_li;
|
||||
arch_ops->dp_rx_wbm_err_reap_desc = dp_rx_wbm_err_reap_desc_li;
|
||||
arch_ops->dp_rx_null_q_desc_handle = dp_rx_null_q_desc_handle_li;
|
||||
#else
|
||||
arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_generic;
|
||||
arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_generic;
|
||||
#endif
|
||||
arch_ops->txrx_get_context_size = dp_get_context_size_li;
|
||||
#ifdef WIFI_MONITOR_SUPPORT
|
||||
arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_li;
|
||||
#endif
|
||||
arch_ops->txrx_soc_attach = dp_soc_attach_li;
|
||||
arch_ops->txrx_soc_detach = dp_soc_detach_li;
|
||||
arch_ops->txrx_soc_init = dp_soc_init_li;
|
||||
arch_ops->txrx_soc_deinit = dp_soc_deinit_li;
|
||||
arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_li;
|
||||
arch_ops->txrx_soc_srng_init = dp_soc_srng_init_li;
|
||||
arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_li;
|
||||
arch_ops->txrx_soc_srng_free = dp_soc_srng_free_li;
|
||||
arch_ops->txrx_pdev_attach = dp_pdev_attach_li;
|
||||
arch_ops->txrx_pdev_detach = dp_pdev_detach_li;
|
||||
arch_ops->txrx_vdev_attach = dp_vdev_attach_li;
|
||||
arch_ops->txrx_vdev_detach = dp_vdev_detach_li;
|
||||
arch_ops->txrx_peer_map_attach = dp_peer_map_attach_li;
|
||||
arch_ops->txrx_peer_map_detach = dp_peer_map_detach_li;
|
||||
arch_ops->get_rx_hash_key = dp_get_rx_hash_key_li;
|
||||
arch_ops->dp_set_rx_fst = NULL;
|
||||
arch_ops->dp_get_rx_fst = NULL;
|
||||
arch_ops->dp_rx_fst_ref = NULL;
|
||||
arch_ops->dp_rx_fst_deref = NULL;
|
||||
arch_ops->txrx_peer_setup = dp_peer_setup_li;
|
||||
arch_ops->dp_rx_desc_cookie_2_va =
|
||||
dp_rx_desc_cookie_2_va_li;
|
||||
arch_ops->dp_rx_intrabss_mcast_handler =
|
||||
dp_rx_intrabss_handle_nawds_li;
|
||||
arch_ops->dp_rx_word_mask_subscribe = dp_rx_word_mask_subscribe_li;
|
||||
arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
|
||||
arch_ops->dp_rx_peer_metadata_peer_id_get =
|
||||
dp_rx_peer_metadata_peer_id_get_li;
|
||||
arch_ops->soc_cfg_attach = dp_soc_cfg_attach_li;
|
||||
arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_li;
|
||||
arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_li;
|
||||
arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_li;
|
||||
arch_ops->dp_peer_rx_reorder_queue_setup =
|
||||
dp_peer_rx_reorder_queue_setup_li;
|
||||
arch_ops->peer_get_reo_hash = dp_peer_get_reo_hash_li;
|
||||
arch_ops->reo_remap_config = dp_reo_remap_config_li;
|
||||
arch_ops->dp_get_soc_by_chip_id = dp_get_soc_by_chip_id_li;
|
||||
arch_ops->dp_soc_get_num_soc = dp_soc_get_num_soc_li;
|
||||
arch_ops->get_reo_qdesc_addr = dp_rx_get_reo_qdesc_addr_li;
|
||||
arch_ops->txrx_get_vdev_mcast_param = dp_txrx_get_vdev_mcast_param_li;
|
||||
arch_ops->get_hw_link_id = dp_get_hw_link_id_li;
|
||||
arch_ops->txrx_srng_init = dp_srng_init_li;
|
||||
arch_ops->dp_get_vdev_stats_for_unmap_peer =
|
||||
dp_get_vdev_stats_for_unmap_peer_li;
|
||||
arch_ops->dp_get_interface_stats = dp_txrx_get_vdev_stats;
|
||||
#if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
|
||||
arch_ops->dp_update_ring_hptp = dp_update_ring_hptp;
|
||||
#endif
|
||||
arch_ops->dp_flush_tx_ring = dp_flush_tcl_ring;
|
||||
arch_ops->dp_soc_interrupt_attach = dp_soc_interrupt_attach_li;
|
||||
arch_ops->dp_soc_attach_poll = dp_soc_attach_poll_li;
|
||||
arch_ops->dp_soc_interrupt_detach = dp_soc_interrupt_detach_li;
|
||||
arch_ops->dp_service_srngs = dp_service_srngs_li;
|
||||
}
|
||||
|
||||
#ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
|
||||
void dp_tx_comp_get_prefetched_params_from_hal_desc(
|
||||
struct dp_soc *soc,
|
||||
void *tx_comp_hal_desc,
|
||||
struct dp_tx_desc_s **r_tx_desc)
|
||||
{
|
||||
uint8_t pool_id;
|
||||
uint32_t tx_desc_id;
|
||||
|
||||
tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
|
||||
pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
|
||||
DP_TX_DESC_ID_POOL_OS;
|
||||
|
||||
/* Find Tx descriptor */
|
||||
*r_tx_desc = dp_tx_desc_find(soc, pool_id,
|
||||
(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
|
||||
DP_TX_DESC_ID_PAGE_OS,
|
||||
(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
|
||||
DP_TX_DESC_ID_OFFSET_OS,
|
||||
(tx_desc_id & DP_TX_DESC_ID_SPCL_MASK));
|
||||
qdf_prefetch((uint8_t *)*r_tx_desc);
|
||||
}
|
||||
#endif
|
87
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li.h
Normal file
87
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li.h
Normal file
@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright (c) 2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
#ifndef __DP_LI_H
|
||||
#define __DP_LI_H
|
||||
|
||||
#include <dp_types.h>
|
||||
#ifdef WIFI_MONITOR_SUPPORT
|
||||
#include <dp_mon.h>
|
||||
#endif
|
||||
#include <hal_li_tx.h>
|
||||
#include <hal_li_rx.h>
|
||||
|
||||
/* WBM2SW ring id for rx release */
|
||||
#define WBM2SW_REL_ERR_RING_NUM 3
|
||||
|
||||
/**
|
||||
* struct dp_soc_li - Extended DP soc for LI targets
|
||||
* @soc: dp soc structure
|
||||
*/
|
||||
struct dp_soc_li {
|
||||
struct dp_soc soc;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dp_pdev_li - Extended DP pdev for LI targets
|
||||
* @pdev: dp_pdev structure
|
||||
*/
|
||||
struct dp_pdev_li {
|
||||
struct dp_pdev pdev;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dp_vdev_li - Extended DP vdev for LI targets
|
||||
* @vdev: dp_vdev structure
|
||||
*/
|
||||
struct dp_vdev_li {
|
||||
struct dp_vdev vdev;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dp_peer_li - Extended DP peer for LI targets
|
||||
* @peer: dp_peer structure
|
||||
*/
|
||||
struct dp_peer_li {
|
||||
struct dp_peer peer;
|
||||
};
|
||||
|
||||
/**
|
||||
* dp_get_soc_context_size_li() - get context size for dp_soc_li
|
||||
*
|
||||
* Return: value in bytes for LI specific soc structure
|
||||
*/
|
||||
qdf_size_t dp_get_soc_context_size_li(void);
|
||||
|
||||
/**
|
||||
* dp_initialize_arch_ops_li() - initialize LI specific arch ops
|
||||
* @arch_ops: arch ops pointer
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops);
|
||||
|
||||
/**
|
||||
* dp_get_context_size_li() - get LI specific size for peer/vdev/pdev/soc
|
||||
* @context_type: DP context type for which the size is needed
|
||||
*
|
||||
* Return: size in bytes for the context_type
|
||||
*/
|
||||
|
||||
qdf_size_t dp_get_context_size_li(enum dp_context_type context_type);
|
||||
#endif
|
1636
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li_rx.c
Normal file
1636
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li_rx.c
Normal file
File diff suppressed because it is too large
Load Diff
357
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li_rx.h
Normal file
357
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li_rx.h
Normal file
@ -0,0 +1,357 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DP_LI_RX_H_
|
||||
#define _DP_LI_RX_H_
|
||||
|
||||
#include <dp_types.h>
|
||||
#include <dp_rx.h>
|
||||
#include "dp_li.h"
|
||||
|
||||
/**
|
||||
* dp_rx_process_li() - Brain of the Rx processing functionality
|
||||
* Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
|
||||
* @int_ctx: per interrupt context
|
||||
* @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
|
||||
* @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
|
||||
* @quota: No. of units (packets) that can be serviced in one shot.
|
||||
*
|
||||
* This function implements the core of Rx functionality. This is
|
||||
* expected to handle only non-error frames.
|
||||
*
|
||||
* Return: uint32_t: No. of elements processed
|
||||
*/
|
||||
uint32_t dp_rx_process_li(struct dp_intr *int_ctx,
|
||||
hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
|
||||
uint32_t quota);
|
||||
|
||||
/**
|
||||
* dp_rx_chain_msdus_li() - Function to chain all msdus of a mpdu
|
||||
* to pdev invalid peer list
|
||||
*
|
||||
* @soc: core DP main context
|
||||
* @nbuf: Buffer pointer
|
||||
* @rx_tlv_hdr: start of rx tlv header
|
||||
* @mac_id: mac id
|
||||
*
|
||||
* Return: bool: true for last msdu of mpdu
|
||||
*/
|
||||
bool dp_rx_chain_msdus_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
|
||||
uint8_t *rx_tlv_hdr, uint8_t mac_id);
|
||||
|
||||
/**
|
||||
* dp_rx_desc_pool_init_li() - Initialize Rx Descriptor pool(s)
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @rx_desc_pool: Rx descriptor pool handler
|
||||
* @pool_id: Rx descriptor pool ID
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
QDF_STATUS dp_rx_desc_pool_init_li(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
uint32_t pool_id);
|
||||
|
||||
/**
|
||||
* dp_rx_desc_pool_deinit_li() - De-initialize Rx Descriptor pool(s)
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @rx_desc_pool: Rx descriptor pool handler
|
||||
* @pool_id: Rx descriptor pool ID
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_rx_desc_pool_deinit_li(struct dp_soc *soc,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
uint32_t pool_id);
|
||||
|
||||
/**
|
||||
* dp_wbm_get_rx_desc_from_hal_desc_li() - Get corresponding Rx Desc
|
||||
* address from WBM ring Desc
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @ring_desc: ring descriptor structure pointer
|
||||
* @r_rx_desc: pointer to a pointer of Rx Desc
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS - succeeded, others - failed
|
||||
*/
|
||||
QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_li(
|
||||
struct dp_soc *soc,
|
||||
void *ring_desc,
|
||||
struct dp_rx_desc **r_rx_desc);
|
||||
/**
|
||||
* dp_rx_get_reo_qdesc_addr_li(): API to get qdesc address of reo
|
||||
* entrance ring desc
|
||||
*
|
||||
* @hal_soc: Handle to HAL Soc structure
|
||||
* @dst_ring_desc: reo dest ring descriptor (used for Lithium DP)
|
||||
* @buf: pointer to the start of RX PKT TLV headers
|
||||
* @txrx_peer: pointer to txrx_peer
|
||||
* @tid: tid value
|
||||
*
|
||||
* Return: qdesc address in reo destination ring buffer
|
||||
*/
|
||||
static inline
|
||||
uint64_t dp_rx_get_reo_qdesc_addr_li(hal_soc_handle_t hal_soc,
|
||||
uint8_t *dst_ring_desc,
|
||||
uint8_t *buf,
|
||||
struct dp_txrx_peer *txrx_peer,
|
||||
unsigned int tid)
|
||||
{
|
||||
return hal_rx_get_qdesc_addr(hal_soc, dst_ring_desc, buf);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_desc_cookie_2_va_li() - Convert RX Desc cookie ID to VA
|
||||
* @soc:Handle to DP Soc structure
|
||||
* @cookie: cookie used to lookup virtual address
|
||||
*
|
||||
* Return: Rx descriptor virtual address
|
||||
*/
|
||||
static inline
|
||||
struct dp_rx_desc *dp_rx_desc_cookie_2_va_li(struct dp_soc *soc,
|
||||
uint32_t cookie)
|
||||
{
|
||||
return dp_rx_cookie_2_va_rxdma_buf(soc, cookie);
|
||||
}
|
||||
|
||||
#define DP_PEER_METADATA_VDEV_ID_MASK 0x003f0000
|
||||
#define DP_PEER_METADATA_VDEV_ID_SHIFT 16
|
||||
#define DP_PEER_METADATA_OFFLOAD_MASK 0x01000000
|
||||
#define DP_PEER_METADATA_OFFLOAD_SHIFT 24
|
||||
|
||||
#define DP_PEER_METADATA_VDEV_ID_GET_LI(_peer_metadata) \
|
||||
(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK) \
|
||||
>> DP_PEER_METADATA_VDEV_ID_SHIFT)
|
||||
|
||||
#define DP_PEER_METADATA_OFFLOAD_GET_LI(_peer_metadata) \
|
||||
(((_peer_metadata) & DP_PEER_METADATA_OFFLOAD_MASK) \
|
||||
>> DP_PEER_METADATA_OFFLOAD_SHIFT)
|
||||
|
||||
static inline uint16_t
|
||||
dp_rx_peer_metadata_peer_id_get_li(struct dp_soc *soc, uint32_t peer_metadata)
|
||||
{
|
||||
struct htt_rx_peer_metadata_v0 *metadata =
|
||||
(struct htt_rx_peer_metadata_v0 *)&peer_metadata;
|
||||
|
||||
return metadata->peer_id;
|
||||
}
|
||||
|
||||
bool
|
||||
dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
|
||||
qdf_nbuf_t nbuf_copy,
|
||||
struct cdp_tid_rx_stats *tid_stats,
|
||||
uint8_t link_id);
|
||||
|
||||
#ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
|
||||
static inline
|
||||
void dp_rx_prefetch_nbuf_data(qdf_nbuf_t nbuf, qdf_nbuf_t next)
|
||||
{
|
||||
struct rx_pkt_tlvs *pkt_tlvs;
|
||||
|
||||
if (next) {
|
||||
/* prefetch skb->next and first few bytes of skb->cb */
|
||||
qdf_prefetch(next);
|
||||
/* skb->cb spread across 2 cache lines hence below prefetch */
|
||||
qdf_prefetch(&next->_skb_refdst);
|
||||
qdf_prefetch(&next->len);
|
||||
qdf_prefetch(&next->protocol);
|
||||
pkt_tlvs = (struct rx_pkt_tlvs *)next->data;
|
||||
/* sa_idx, da_idx, l3_pad in RX msdu_end TLV */
|
||||
qdf_prefetch(pkt_tlvs);
|
||||
/* msdu_done in RX attention TLV */
|
||||
qdf_prefetch(&pkt_tlvs->attn_tlv);
|
||||
/* fr_ds & to_ds in RX MPDU start TLV */
|
||||
if (qdf_nbuf_is_rx_chfrag_end(nbuf))
|
||||
qdf_prefetch(&pkt_tlvs->mpdu_start_tlv);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
void dp_rx_prefetch_nbuf_data(qdf_nbuf_t nbuf, qdf_nbuf_t next)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH
|
||||
/**
|
||||
* dp_rx_cookie_2_va_rxdma_buf_prefetch() - function to prefetch the SW desc
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @cookie: cookie used to lookup virtual address
|
||||
*
|
||||
* Return: prefetched Rx descriptor virtual address
|
||||
*/
|
||||
static inline
|
||||
void *dp_rx_cookie_2_va_rxdma_buf_prefetch(struct dp_soc *soc, uint32_t cookie)
|
||||
{
|
||||
uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
|
||||
uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
|
||||
struct rx_desc_pool *rx_desc_pool;
|
||||
void *prefetch_desc;
|
||||
|
||||
if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
|
||||
return NULL;
|
||||
|
||||
rx_desc_pool = &soc->rx_desc_buf[pool_id];
|
||||
|
||||
if (qdf_unlikely(index >= rx_desc_pool->pool_size))
|
||||
return NULL;
|
||||
|
||||
prefetch_desc = &soc->rx_desc_buf[pool_id].array[index].rx_desc;
|
||||
qdf_prefetch(prefetch_desc);
|
||||
return prefetch_desc;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @hal_soc: Handle to HAL Soc structure
|
||||
* @num_entries: valid number of HW descriptors
|
||||
* @hal_ring_hdl: Destination ring pointer
|
||||
* @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
|
||||
* @last_prefetched_sw_desc: input & output param of last prefetch SW desc
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline
|
||||
void dp_rx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
|
||||
hal_soc_handle_t hal_soc,
|
||||
uint32_t num_entries,
|
||||
hal_ring_handle_t hal_ring_hdl,
|
||||
hal_ring_desc_t *last_prefetched_hw_desc,
|
||||
struct dp_rx_desc **last_prefetched_sw_desc)
|
||||
{
|
||||
if (*last_prefetched_sw_desc) {
|
||||
qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
|
||||
qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
|
||||
}
|
||||
|
||||
if (num_entries) {
|
||||
*last_prefetched_sw_desc = dp_rx_cookie_2_va_rxdma_buf_prefetch(soc, HAL_RX_REO_BUF_COOKIE_GET(*last_prefetched_hw_desc));
|
||||
*last_prefetched_hw_desc = hal_srng_dst_prefetch_next_cached_desc(hal_soc,
|
||||
hal_ring_hdl,
|
||||
(uint8_t *)*last_prefetched_hw_desc);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
void dp_rx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
|
||||
hal_soc_handle_t hal_soc,
|
||||
uint32_t quota,
|
||||
hal_ring_handle_t hal_ring_hdl,
|
||||
hal_ring_desc_t *last_prefetched_hw_desc,
|
||||
struct dp_rx_desc **last_prefetched_sw_desc)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline
|
||||
QDF_STATUS dp_peer_rx_reorder_queue_setup_li(struct dp_soc *soc,
|
||||
struct dp_peer *peer,
|
||||
uint32_t tid_bitmap,
|
||||
uint32_t ba_window_size)
|
||||
{
|
||||
int tid;
|
||||
struct dp_rx_tid *rx_tid;
|
||||
|
||||
if (!soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
|
||||
dp_peer_debug("peer_rx_reorder_queue_setup NULL");
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
for (tid = 0; tid < DP_MAX_TIDS; tid++) {
|
||||
if (!(BIT(tid) & tid_bitmap))
|
||||
continue;
|
||||
|
||||
rx_tid = &peer->rx_tid[tid];
|
||||
if (!rx_tid->hw_qdesc_paddr) {
|
||||
tid_bitmap &= ~BIT(tid);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
|
||||
soc->ctrl_psoc,
|
||||
peer->vdev->pdev->pdev_id,
|
||||
peer->vdev->vdev_id,
|
||||
peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
|
||||
1, ba_window_size)) {
|
||||
dp_peer_err("%pK: Fail to send reo q setup. tid %d",
|
||||
soc, tid);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!tid_bitmap) {
|
||||
dp_peer_err("tid_bitmap=0. All tids setup fail");
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_wbm_err_reap_desc_li() - Function to reap and replenish
|
||||
* WBM RX Error descriptors
|
||||
*
|
||||
* @int_ctx: pointer to DP interrupt context
|
||||
* @soc: core DP main context
|
||||
* @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, to be serviced
|
||||
* @quota: No. of units (packets) that can be serviced in one shot.
|
||||
* @rx_bufs_used: No. of descriptors reaped
|
||||
*
|
||||
* This function implements the core Rx functionality like reap and
|
||||
* replenish the RX error ring Descriptors, and create a nbuf list
|
||||
* out of it. It also reads wbm error information from descriptors
|
||||
* and update the nbuf tlv area.
|
||||
*
|
||||
* Return: qdf_nbuf_t: head pointer to the nbuf list created
|
||||
*/
|
||||
qdf_nbuf_t
|
||||
dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
|
||||
hal_ring_handle_t hal_ring_hdl, uint32_t quota,
|
||||
uint32_t *rx_bufs_used);
|
||||
|
||||
/**
|
||||
* dp_rx_null_q_desc_handle_li() - Function to handle NULL Queue
|
||||
* descriptor violation on either a
|
||||
* REO or WBM ring
|
||||
*
|
||||
* @soc: core DP main context
|
||||
* @nbuf: buffer pointer
|
||||
* @rx_tlv_hdr: start of rx tlv header
|
||||
* @pool_id: mac id
|
||||
* @txrx_peer: txrx peer handle
|
||||
* @is_reo_exception: flag to check if the error is from REO or WBM
|
||||
* @link_id: link Id on which packet is received
|
||||
*
|
||||
* This function handles NULL queue descriptor violations arising out
|
||||
* a missing REO queue for a given peer or a given TID. This typically
|
||||
* may happen if a packet is received on a QOS enabled TID before the
|
||||
* ADDBA negotiation for that TID, when the TID queue is setup. Or
|
||||
* it may also happen for MC/BC frames if they are not routed to the
|
||||
* non-QOS TID queue, in the absence of any other default TID queue.
|
||||
* This error can show up both in a REO destination or WBM release ring.
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
|
||||
* if nbuf could not be handled or dropped.
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_rx_null_q_desc_handle_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
|
||||
uint8_t *rx_tlv_hdr, uint8_t pool_id,
|
||||
struct dp_txrx_peer *txrx_peer,
|
||||
bool is_reo_exception,
|
||||
uint8_t link_id);
|
||||
#endif
|
652
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li_tx.c
Normal file
652
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li_tx.c
Normal file
@ -0,0 +1,652 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
#include "cdp_txrx_cmn_struct.h"
|
||||
#include "dp_types.h"
|
||||
#include "dp_tx.h"
|
||||
#include "dp_li_tx.h"
|
||||
#include "dp_tx_desc.h"
|
||||
#include <dp_internal.h>
|
||||
#include <dp_htt.h>
|
||||
#include <hal_li_api.h>
|
||||
#include <hal_li_tx.h>
|
||||
#include "dp_peer.h"
|
||||
#ifdef FEATURE_WDS
|
||||
#include "dp_txrx_wds.h"
|
||||
#endif
|
||||
#include "dp_li.h"
|
||||
|
||||
extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
|
||||
|
||||
QDF_STATUS
|
||||
dp_tx_comp_get_params_from_hal_desc_li(struct dp_soc *soc,
|
||||
void *tx_comp_hal_desc,
|
||||
struct dp_tx_desc_s **r_tx_desc)
|
||||
{
|
||||
uint8_t pool_id;
|
||||
uint32_t tx_desc_id;
|
||||
|
||||
tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
|
||||
pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
|
||||
DP_TX_DESC_ID_POOL_OS;
|
||||
|
||||
/* Find Tx descriptor */
|
||||
*r_tx_desc = dp_tx_desc_find(soc, pool_id,
|
||||
(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
|
||||
DP_TX_DESC_ID_PAGE_OS,
|
||||
(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
|
||||
DP_TX_DESC_ID_OFFSET_OS,
|
||||
(tx_desc_id & DP_TX_DESC_ID_SPCL_MASK));
|
||||
/* Pool id is not matching. Error */
|
||||
if ((*r_tx_desc)->pool_id != pool_id) {
|
||||
dp_tx_comp_alert("Tx Comp pool id %d not matched %d",
|
||||
pool_id, (*r_tx_desc)->pool_id);
|
||||
|
||||
qdf_assert_always(0);
|
||||
}
|
||||
|
||||
(*r_tx_desc)->peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline
|
||||
void dp_tx_process_mec_notify_li(struct dp_soc *soc, uint8_t *status)
|
||||
{
|
||||
struct dp_vdev *vdev;
|
||||
uint8_t vdev_id;
|
||||
uint32_t *htt_desc = (uint32_t *)status;
|
||||
|
||||
/*
|
||||
* Get vdev id from HTT status word in case of MEC
|
||||
* notification
|
||||
*/
|
||||
vdev_id = HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(htt_desc[3]);
|
||||
if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
|
||||
return;
|
||||
|
||||
vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
|
||||
DP_MOD_ID_HTT_COMP);
|
||||
if (!vdev)
|
||||
return;
|
||||
dp_tx_mec_handler(vdev, status);
|
||||
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
|
||||
}
|
||||
|
||||
void dp_tx_process_htt_completion_li(struct dp_soc *soc,
|
||||
struct dp_tx_desc_s *tx_desc,
|
||||
uint8_t *status,
|
||||
uint8_t ring_id)
|
||||
{
|
||||
uint8_t tx_status;
|
||||
struct dp_pdev *pdev;
|
||||
struct dp_vdev *vdev = NULL;
|
||||
struct hal_tx_completion_status ts = {0};
|
||||
uint32_t *htt_desc = (uint32_t *)status;
|
||||
struct dp_txrx_peer *txrx_peer;
|
||||
dp_txrx_ref_handle txrx_ref_handle = NULL;
|
||||
struct cdp_tid_tx_stats *tid_stats = NULL;
|
||||
struct htt_soc *htt_handle;
|
||||
uint8_t vdev_id;
|
||||
|
||||
tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
|
||||
htt_handle = (struct htt_soc *)soc->htt_handle;
|
||||
htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
|
||||
|
||||
/*
|
||||
* There can be scenario where WBM consuming descriptor enqueued
|
||||
* from TQM2WBM first and TQM completion can happen before MEC
|
||||
* notification comes from FW2WBM. Avoid access any field of tx
|
||||
* descriptor in case of MEC notify.
|
||||
*/
|
||||
if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY)
|
||||
return dp_tx_process_mec_notify_li(soc, status);
|
||||
|
||||
/*
|
||||
* If the descriptor is already freed in vdev_detach,
|
||||
* continue to next descriptor
|
||||
*/
|
||||
if (qdf_unlikely(!tx_desc->flags)) {
|
||||
dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
|
||||
tx_desc->id);
|
||||
return;
|
||||
}
|
||||
|
||||
if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
|
||||
dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
|
||||
tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
|
||||
goto release_tx_desc;
|
||||
}
|
||||
|
||||
pdev = tx_desc->pdev;
|
||||
if (qdf_unlikely(!pdev)) {
|
||||
dp_tx_comp_warn("The pdev in TX desc is NULL, dropped.");
|
||||
dp_tx_comp_warn("tx_status: %u", tx_status);
|
||||
tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
|
||||
goto release_tx_desc;
|
||||
}
|
||||
|
||||
if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
|
||||
dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
|
||||
tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
|
||||
goto release_tx_desc;
|
||||
}
|
||||
|
||||
qdf_assert(tx_desc->pdev);
|
||||
|
||||
vdev_id = tx_desc->vdev_id;
|
||||
vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
|
||||
DP_MOD_ID_HTT_COMP);
|
||||
|
||||
if (qdf_unlikely(!vdev)) {
|
||||
dp_tx_comp_info_rl("Unable to get vdev ref %d", tx_desc->id);
|
||||
tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
|
||||
goto release_tx_desc;
|
||||
}
|
||||
|
||||
switch (tx_status) {
|
||||
case HTT_TX_FW2WBM_TX_STATUS_OK:
|
||||
case HTT_TX_FW2WBM_TX_STATUS_DROP:
|
||||
case HTT_TX_FW2WBM_TX_STATUS_TTL:
|
||||
{
|
||||
uint8_t tid;
|
||||
uint8_t transmit_cnt_valid = 0;
|
||||
|
||||
if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
|
||||
ts.peer_id =
|
||||
HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
|
||||
htt_desc[2]);
|
||||
ts.tid =
|
||||
HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
|
||||
htt_desc[2]);
|
||||
} else {
|
||||
ts.peer_id = HTT_INVALID_PEER;
|
||||
ts.tid = HTT_INVALID_TID;
|
||||
}
|
||||
ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
|
||||
ts.ppdu_id =
|
||||
HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
|
||||
htt_desc[1]);
|
||||
ts.ack_frame_rssi =
|
||||
HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
|
||||
htt_desc[1]);
|
||||
transmit_cnt_valid =
|
||||
HTT_TX_WBM_COMPLETION_V2_TRANSMIT_CNT_VALID_GET(
|
||||
htt_desc[2]);
|
||||
if (transmit_cnt_valid)
|
||||
ts.transmit_cnt =
|
||||
HTT_TX_WBM_COMPLETION_V2_TRANSMIT_COUNT_GET(
|
||||
htt_desc[0]);
|
||||
|
||||
ts.tsf = htt_desc[3];
|
||||
ts.first_msdu = 1;
|
||||
ts.last_msdu = 1;
|
||||
switch (tx_status) {
|
||||
case HTT_TX_FW2WBM_TX_STATUS_OK:
|
||||
ts.status = HAL_TX_TQM_RR_FRAME_ACKED;
|
||||
break;
|
||||
case HTT_TX_FW2WBM_TX_STATUS_DROP:
|
||||
ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
|
||||
break;
|
||||
case HTT_TX_FW2WBM_TX_STATUS_TTL:
|
||||
ts.status = HAL_TX_TQM_RR_REM_CMD_TX;
|
||||
break;
|
||||
}
|
||||
tid = ts.tid;
|
||||
if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
|
||||
tid = CDP_MAX_DATA_TIDS - 1;
|
||||
|
||||
tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
|
||||
|
||||
if (qdf_unlikely(pdev->delay_stats_flag) ||
|
||||
qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)))
|
||||
dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
|
||||
if (tx_status < CDP_MAX_TX_HTT_STATUS)
|
||||
tid_stats->htt_status_cnt[tx_status]++;
|
||||
|
||||
txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id,
|
||||
&txrx_ref_handle,
|
||||
DP_MOD_ID_HTT_COMP);
|
||||
if (qdf_likely(txrx_peer)) {
|
||||
DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1,
|
||||
qdf_nbuf_len(tx_desc->nbuf));
|
||||
if (tx_status != HTT_TX_FW2WBM_TX_STATUS_OK)
|
||||
DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
|
||||
}
|
||||
|
||||
dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
|
||||
ring_id);
|
||||
dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
|
||||
dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
|
||||
|
||||
if (qdf_likely(txrx_peer))
|
||||
dp_txrx_peer_unref_delete(txrx_ref_handle,
|
||||
DP_MOD_ID_HTT_COMP);
|
||||
|
||||
break;
|
||||
}
|
||||
case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
|
||||
{
|
||||
uint8_t reinject_reason;
|
||||
|
||||
reinject_reason =
|
||||
HTT_TX_WBM_COMPLETION_V2_REINJECT_REASON_GET(
|
||||
htt_desc[0]);
|
||||
dp_tx_reinject_handler(soc, vdev, tx_desc,
|
||||
status, reinject_reason);
|
||||
break;
|
||||
}
|
||||
case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
|
||||
{
|
||||
dp_tx_inspect_handler(soc, vdev, tx_desc, status);
|
||||
break;
|
||||
}
|
||||
case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
|
||||
{
|
||||
DP_STATS_INC(vdev,
|
||||
tx_i[DP_XMIT_LINK].dropped.fail_per_pkt_vdev_id_check,
|
||||
1);
|
||||
goto release_tx_desc;
|
||||
}
|
||||
default:
|
||||
dp_tx_comp_err("Invalid HTT tx_status %d\n",
|
||||
tx_status);
|
||||
goto release_tx_desc;
|
||||
}
|
||||
|
||||
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
|
||||
return;
|
||||
|
||||
release_tx_desc:
|
||||
dp_tx_comp_free_buf(soc, tx_desc, false);
|
||||
dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
|
||||
if (vdev)
|
||||
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
|
||||
}
|
||||
|
||||
#ifdef QCA_OL_TX_MULTIQ_SUPPORT
|
||||
/**
|
||||
* dp_tx_get_rbm_id_li() - Get the RBM ID for data transmission completion.
|
||||
* @soc: DP soc structure pointer
|
||||
* @ring_id: Transmit Queue/ring_id to be used when XPS is enabled
|
||||
*
|
||||
* Return: HAL ring handle
|
||||
*/
|
||||
#ifdef IPA_OFFLOAD
|
||||
static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc,
|
||||
uint8_t ring_id)
|
||||
{
|
||||
return (ring_id + soc->wbm_sw0_bm_id);
|
||||
}
|
||||
#else
|
||||
#ifndef QCA_DP_ENABLE_TX_COMP_RING4
|
||||
static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc,
|
||||
uint8_t ring_id)
|
||||
{
|
||||
return (ring_id ? HAL_WBM_SW0_BM_ID + (ring_id - 1) :
|
||||
HAL_WBM_SW2_BM_ID);
|
||||
}
|
||||
#else
|
||||
static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc,
|
||||
uint8_t ring_id)
|
||||
{
|
||||
if (ring_id == soc->num_tcl_data_rings)
|
||||
return HAL_WBM_SW4_BM_ID(soc->wbm_sw0_bm_id);
|
||||
return (ring_id + HAL_WBM_SW0_BM_ID(soc->wbm_sw0_bm_id));
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
#else
|
||||
#ifdef TX_MULTI_TCL
|
||||
#ifdef IPA_OFFLOAD
|
||||
static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc,
|
||||
uint8_t ring_id)
|
||||
{
|
||||
if (soc->wlan_cfg_ctx->ipa_enabled)
|
||||
return (ring_id + soc->wbm_sw0_bm_id);
|
||||
|
||||
return soc->wlan_cfg_ctx->tcl_wbm_map_array[ring_id].wbm_rbm_id;
|
||||
}
|
||||
#else
|
||||
static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc,
|
||||
uint8_t ring_id)
|
||||
{
|
||||
return soc->wlan_cfg_ctx->tcl_wbm_map_array[ring_id].wbm_rbm_id;
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc,
|
||||
uint8_t ring_id)
|
||||
{
|
||||
return (ring_id + soc->wbm_sw0_bm_id);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(CLEAR_SW2TCL_CONSUMED_DESC)
|
||||
/**
|
||||
* dp_tx_clear_consumed_hw_descs - Reset all the consumed Tx ring descs to 0
|
||||
*
|
||||
* @soc: DP soc handle
|
||||
* @hal_ring_hdl: Source ring pointer
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static inline
|
||||
void dp_tx_clear_consumed_hw_descs(struct dp_soc *soc,
|
||||
hal_ring_handle_t hal_ring_hdl)
|
||||
{
|
||||
void *desc = hal_srng_src_get_next_consumed(soc->hal_soc, hal_ring_hdl);
|
||||
|
||||
while (desc) {
|
||||
hal_tx_desc_clear(desc);
|
||||
desc = hal_srng_src_get_next_consumed(soc->hal_soc,
|
||||
hal_ring_hdl);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
static inline
|
||||
void dp_tx_clear_consumed_hw_descs(struct dp_soc *soc,
|
||||
hal_ring_handle_t hal_ring_hdl)
|
||||
{
|
||||
}
|
||||
#endif /* CLEAR_SW2TCL_CONSUMED_DESC */
|
||||
|
||||
#ifdef WLAN_CONFIG_TX_DELAY
|
||||
static inline
|
||||
QDF_STATUS dp_tx_compute_hw_delay_li(struct dp_soc *soc,
|
||||
struct dp_vdev *vdev,
|
||||
struct hal_tx_completion_status *ts,
|
||||
uint32_t *delay_us)
|
||||
{
|
||||
return dp_tx_compute_hw_delay_us(ts, vdev->delta_tsf, delay_us);
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
QDF_STATUS dp_tx_compute_hw_delay_li(struct dp_soc *soc,
|
||||
struct dp_vdev *vdev,
|
||||
struct hal_tx_completion_status *ts,
|
||||
uint32_t *delay_us)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SAWF
|
||||
/**
|
||||
* dp_sawf_config_li - Configure sawf specific fields in tcl
|
||||
*
|
||||
* @soc: DP soc handle
|
||||
* @hal_tx_desc_cached: tx descriptor
|
||||
* @fw_metadata: firmware metadata
|
||||
* @vdev_id: vdev id
|
||||
* @nbuf: skb buffer
|
||||
* @msdu_info: msdu info
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static inline
|
||||
void dp_sawf_config_li(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
|
||||
uint16_t *fw_metadata, uint16_t vdev_id,
|
||||
qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
|
||||
{
|
||||
uint8_t q_id = 0;
|
||||
uint32_t flow_idx = 0;
|
||||
|
||||
q_id = dp_sawf_queue_id_get(nbuf);
|
||||
if (q_id == DP_SAWF_DEFAULT_Q_INVALID)
|
||||
return;
|
||||
|
||||
msdu_info->tid = (q_id & (CDP_DATA_TID_MAX - 1));
|
||||
hal_tx_desc_set_hlos_tid(hal_tx_desc_cached,
|
||||
(q_id & (CDP_DATA_TID_MAX - 1)));
|
||||
|
||||
if ((q_id >= DP_SAWF_DEFAULT_QUEUE_MIN) &&
|
||||
(q_id < DP_SAWF_DEFAULT_QUEUE_MAX))
|
||||
return;
|
||||
|
||||
if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx))
|
||||
return;
|
||||
|
||||
dp_sawf_tcl_cmd(fw_metadata, nbuf);
|
||||
|
||||
/* For SAWF, q_id starts from DP_SAWF_Q_MAX */
|
||||
if (!dp_sawf_get_search_index(soc, nbuf, vdev_id,
|
||||
q_id, &flow_idx))
|
||||
hal_tx_desc_set_to_fw(hal_tx_desc_cached, true);
|
||||
|
||||
hal_tx_desc_set_search_type_li(soc->hal_soc, hal_tx_desc_cached,
|
||||
HAL_TX_ADDR_INDEX_SEARCH);
|
||||
hal_tx_desc_set_search_index_li(soc->hal_soc, hal_tx_desc_cached,
|
||||
flow_idx);
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
void dp_sawf_config_li(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
|
||||
uint16_t *fw_metadata, uint16_t vdev_id,
|
||||
qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
|
||||
{
|
||||
}
|
||||
|
||||
#define dp_sawf_tx_enqueue_peer_stats(soc, tx_desc)
|
||||
#define dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc)
|
||||
#endif
|
||||
|
||||
QDF_STATUS
|
||||
dp_tx_hw_enqueue_li(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
|
||||
struct cdp_tx_exception_metadata *tx_exc_metadata,
|
||||
struct dp_tx_msdu_info_s *msdu_info)
|
||||
{
|
||||
void *hal_tx_desc;
|
||||
uint32_t *hal_tx_desc_cached;
|
||||
int coalesce = 0;
|
||||
struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
|
||||
uint8_t ring_id = tx_q->ring_id & DP_TX_QUEUE_MASK;
|
||||
uint8_t tid;
|
||||
|
||||
/*
|
||||
* Setting it initialization statically here to avoid
|
||||
* a memset call jump with qdf_mem_set call
|
||||
*/
|
||||
uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
|
||||
|
||||
enum cdp_sec_type sec_type = ((tx_exc_metadata &&
|
||||
tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
|
||||
tx_exc_metadata->sec_type : vdev->sec_type);
|
||||
|
||||
/* Return Buffer Manager ID */
|
||||
uint8_t bm_id = dp_tx_get_rbm_id_li(soc, ring_id);
|
||||
|
||||
hal_ring_handle_t hal_ring_hdl = NULL;
|
||||
|
||||
QDF_STATUS status = QDF_STATUS_E_RESOURCES;
|
||||
|
||||
if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
|
||||
dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
|
||||
return QDF_STATUS_E_RESOURCES;
|
||||
}
|
||||
|
||||
hal_tx_desc_cached = (void *)cached_desc;
|
||||
|
||||
hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
|
||||
tx_desc->dma_addr, bm_id, tx_desc->id,
|
||||
(tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
|
||||
hal_tx_desc_set_lmac_id_li(soc->hal_soc, hal_tx_desc_cached,
|
||||
vdev->lmac_id);
|
||||
hal_tx_desc_set_search_type_li(soc->hal_soc, hal_tx_desc_cached,
|
||||
vdev->search_type);
|
||||
hal_tx_desc_set_search_index_li(soc->hal_soc, hal_tx_desc_cached,
|
||||
vdev->bss_ast_idx);
|
||||
hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
|
||||
vdev->dscp_tid_map_id);
|
||||
|
||||
hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
|
||||
sec_type_map[sec_type]);
|
||||
hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
|
||||
(vdev->bss_ast_hash & 0xF));
|
||||
|
||||
if (dp_sawf_tag_valid_get(tx_desc->nbuf)) {
|
||||
dp_sawf_config_li(soc, hal_tx_desc_cached, &fw_metadata,
|
||||
vdev->vdev_id, tx_desc->nbuf, msdu_info);
|
||||
dp_sawf_tx_enqueue_peer_stats(soc, tx_desc);
|
||||
}
|
||||
|
||||
hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
|
||||
hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
|
||||
hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
|
||||
hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
|
||||
hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
|
||||
vdev->hal_desc_addr_search_flags);
|
||||
|
||||
if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
|
||||
hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
|
||||
|
||||
/* verify checksum offload configuration*/
|
||||
if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
|
||||
QDF_NBUF_TX_CKSUM_TCP_UDP) ||
|
||||
qdf_nbuf_is_tso(tx_desc->nbuf)) {
|
||||
hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
|
||||
hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
|
||||
}
|
||||
|
||||
tid = msdu_info->tid;
|
||||
if (tid != HTT_TX_EXT_TID_INVALID)
|
||||
hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
|
||||
|
||||
if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
|
||||
hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
|
||||
|
||||
if (!dp_tx_desc_set_ktimestamp(vdev, tx_desc))
|
||||
dp_tx_desc_set_timestamp(tx_desc);
|
||||
|
||||
dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
|
||||
tx_desc->length,
|
||||
(tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
|
||||
(uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
|
||||
tx_desc->id);
|
||||
|
||||
hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
|
||||
|
||||
if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s %d : HAL RING Access Failed -- %pK",
|
||||
__func__, __LINE__, hal_ring_hdl);
|
||||
DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
|
||||
DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.enqueue_fail,
|
||||
1);
|
||||
dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
|
||||
return status;
|
||||
}
|
||||
|
||||
dp_tx_clear_consumed_hw_descs(soc, hal_ring_hdl);
|
||||
|
||||
/* Sync cached descriptor with HW */
|
||||
|
||||
hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
|
||||
if (qdf_unlikely(!hal_tx_desc)) {
|
||||
dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
|
||||
DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
|
||||
DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.enqueue_fail,
|
||||
1);
|
||||
dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
|
||||
goto ring_access_fail;
|
||||
}
|
||||
|
||||
tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
|
||||
dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
|
||||
hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
|
||||
coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
|
||||
msdu_info, ring_id);
|
||||
DP_STATS_INC_PKT(vdev, tx_i[DP_XMIT_LINK].processed, 1,
|
||||
tx_desc->length);
|
||||
DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
|
||||
dp_tx_update_stats(soc, tx_desc, ring_id);
|
||||
status = QDF_STATUS_SUCCESS;
|
||||
|
||||
dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
|
||||
hal_ring_hdl, soc, ring_id);
|
||||
|
||||
ring_access_fail:
|
||||
dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
|
||||
dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
|
||||
qdf_get_log_timestamp(), tx_desc->nbuf);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_desc_pool_init_li(struct dp_soc *soc,
|
||||
uint32_t num_elem,
|
||||
uint8_t pool_id,
|
||||
bool spcl_tx_desc)
|
||||
{
|
||||
uint32_t id, count, page_id, offset, pool_id_32;
|
||||
struct dp_tx_desc_s *tx_desc;
|
||||
struct dp_tx_desc_pool_s *tx_desc_pool;
|
||||
uint16_t num_desc_per_page;
|
||||
|
||||
if (spcl_tx_desc)
|
||||
tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
|
||||
else
|
||||
tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
|
||||
tx_desc = tx_desc_pool->freelist;
|
||||
count = 0;
|
||||
pool_id_32 = (uint32_t)pool_id;
|
||||
num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
|
||||
while (tx_desc) {
|
||||
page_id = count / num_desc_per_page;
|
||||
offset = count % num_desc_per_page;
|
||||
id = ((!!spcl_tx_desc) << DP_TX_DESC_ID_SPCL_OS |
|
||||
(pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
|
||||
(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
|
||||
|
||||
tx_desc->id = id;
|
||||
tx_desc->pool_id = pool_id;
|
||||
tx_desc->vdev_id = DP_INVALID_VDEV_ID;
|
||||
dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
|
||||
tx_desc = tx_desc->next;
|
||||
count++;
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void dp_tx_desc_pool_deinit_li(struct dp_soc *soc,
|
||||
struct dp_tx_desc_pool_s *tx_desc_pool,
|
||||
uint8_t pool_id, bool spcl_tx_desc)
|
||||
{
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_compute_tx_delay_li(struct dp_soc *soc,
|
||||
struct dp_vdev *vdev,
|
||||
struct hal_tx_completion_status *ts,
|
||||
uint32_t *delay_us)
|
||||
{
|
||||
return dp_tx_compute_hw_delay_li(soc, vdev, ts, delay_us);
|
||||
}
|
||||
|
||||
QDF_STATUS dp_tx_desc_pool_alloc_li(struct dp_soc *soc, uint32_t num_elem,
|
||||
uint8_t pool_id)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void dp_tx_desc_pool_free_li(struct dp_soc *soc, uint8_t pool_id)
|
||||
{
|
||||
}
|
133
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li_tx.h
Normal file
133
qcom/opensource/wlan/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li_tx.h
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
#ifndef __DP_LI_TX_H
|
||||
#define __DP_LI_TX_H
|
||||
|
||||
#include <dp_types.h>
|
||||
|
||||
/**
|
||||
* dp_tx_hw_enqueue_li() - Enqueue to TCL HW for transmit
|
||||
* @soc: DP Soc Handle
|
||||
* @vdev: DP vdev handle
|
||||
* @tx_desc: Tx Descriptor Handle
|
||||
* @fw_metadata: Metadata to send to Target Firmware along with frame
|
||||
* @tx_exc_metadata: Handle that holds exception path meta data
|
||||
* @msdu_info: MSDU information
|
||||
*
|
||||
* Gets the next free TCL HW DMA descriptor and sets up required parameters
|
||||
* from software Tx descriptor
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS: success
|
||||
* QDF_STATUS_E_RESOURCES: Error return
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_tx_hw_enqueue_li(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
|
||||
struct cdp_tx_exception_metadata *tx_exc_metadata,
|
||||
struct dp_tx_msdu_info_s *msdu_info);
|
||||
/**
|
||||
* dp_tx_comp_get_params_from_hal_desc_li() - Get TX desc from HAL comp desc
|
||||
* @soc: DP soc handle
|
||||
* @tx_comp_hal_desc: HAL TX Comp Descriptor
|
||||
* @r_tx_desc: SW Tx Descriptor retrieved from HAL desc.
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS
|
||||
dp_tx_comp_get_params_from_hal_desc_li(struct dp_soc *soc,
|
||||
void *tx_comp_hal_desc,
|
||||
struct dp_tx_desc_s **r_tx_desc);
|
||||
|
||||
/**
|
||||
* dp_tx_process_htt_completion_li() - Tx HTT Completion Indication Handler
|
||||
* @soc: Handle to DP soc structure
|
||||
* @tx_desc: software descriptor head pointer
|
||||
* @status: Tx completion status from HTT descriptor
|
||||
* @ring_id: ring number
|
||||
*
|
||||
* This function will process HTT Tx indication messages from Target
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_tx_process_htt_completion_li(struct dp_soc *soc,
|
||||
struct dp_tx_desc_s *tx_desc,
|
||||
uint8_t *status,
|
||||
uint8_t ring_id);
|
||||
|
||||
/**
|
||||
* dp_tx_desc_pool_init_li() - Initialize Tx Descriptor pool(s)
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @num_elem: pool descriptor number
|
||||
* @pool_id: pool to allocate
|
||||
* @spcl_tx_desc: if special desc
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS - success, others - failure
|
||||
*/
|
||||
QDF_STATUS dp_tx_desc_pool_init_li(struct dp_soc *soc,
|
||||
uint32_t num_elem,
|
||||
uint8_t pool_id,
|
||||
bool spcl_tx_desc);
|
||||
|
||||
/**
|
||||
* dp_tx_desc_pool_deinit_li() - De-initialize Tx Descriptor pool(s)
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @tx_desc_pool: Tx descriptor pool handler
|
||||
* @pool_id: pool to deinit
|
||||
* @spcl_tx_desc: if special desc
|
||||
*
|
||||
* Return: None.
|
||||
*/
|
||||
void dp_tx_desc_pool_deinit_li(struct dp_soc *soc,
|
||||
struct dp_tx_desc_pool_s *tx_desc_pool,
|
||||
uint8_t pool_id, bool spcl_tx_desc);
|
||||
|
||||
/**
|
||||
* dp_tx_compute_tx_delay_li() - Compute HW Tx completion delay
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @vdev: vdev
|
||||
* @ts: Tx completion status
|
||||
* @delay_us: Delay to be calculated in microseconds
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_tx_compute_tx_delay_li(struct dp_soc *soc,
|
||||
struct dp_vdev *vdev,
|
||||
struct hal_tx_completion_status *ts,
|
||||
uint32_t *delay_us);
|
||||
|
||||
/**
|
||||
* dp_tx_desc_pool_alloc_li() - Allocate TX descriptor pool
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @num_elem: Number of elements to allocate
|
||||
* @pool_id: TCL descriptor pool ID
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_tx_desc_pool_alloc_li(struct dp_soc *soc, uint32_t num_elem,
|
||||
uint8_t pool_id);
|
||||
|
||||
/**
|
||||
* dp_tx_desc_pool_free_li() - Free TX descriptor pool
|
||||
* @soc: Handle to DP Soc structure
|
||||
* @pool_id: TCL descriptor pool ID
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void dp_tx_desc_pool_free_li(struct dp_soc *soc, uint8_t pool_id);
|
||||
#endif
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user