qcacld-4.0: Removing HIF/HTC from driver project

Remove HIF/HTC from driver project.
This is moved to host common project

Change-Id: I7dd6d4117a197a474304aa4a28edacbca924b795
This commit is contained in:
Komal Seelam 2016-02-04 12:26:09 +05:30 committed by Prakash Dhavali
parent 9673f3a977
commit 7922cf8a6f
51 changed files with 0 additions and 28102 deletions

View File

@ -1,706 +0,0 @@
/*
* Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _HIF_H_
#define _HIF_H_
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/* Header files */
#include "athdefs.h"
#include "a_types.h"
#include "osapi_linux.h"
#include "cdf_status.h"
#include "cdf_nbuf.h"
#include "ol_if_athvar.h"
#include <linux/platform_device.h>
#ifdef HIF_PCI
#include <linux/pci.h>
#endif /* HIF_PCI */
#define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
typedef struct htc_callbacks HTC_CALLBACKS;
typedef void __iomem *A_target_id_t;
#define HIF_TYPE_AR6002 2
#define HIF_TYPE_AR6003 3
#define HIF_TYPE_AR6004 5
#define HIF_TYPE_AR9888 6
#define HIF_TYPE_AR6320 7
#define HIF_TYPE_AR6320V2 8
/* For attaching Peregrine 2.0 board host_reg_tbl only */
#define HIF_TYPE_AR9888V2 8
#define HIF_TYPE_QCA6180 9
#define HIF_TYPE_ADRASTEA 10
#define TARGET_TYPE_UNKNOWN 0
#define TARGET_TYPE_AR6001 1
#define TARGET_TYPE_AR6002 2
#define TARGET_TYPE_AR6003 3
#define TARGET_TYPE_AR6004 5
#define TARGET_TYPE_AR6006 6
#define TARGET_TYPE_AR9888 7
#define TARGET_TYPE_AR6320 8
#define TARGET_TYPE_AR900B 9
/* For attach Peregrine 2.0 board target_reg_tbl only */
#define TARGET_TYPE_AR9888V2 10
/* For attach Rome1.0 target_reg_tbl only*/
#define TARGET_TYPE_AR6320V1 11
/* For Rome2.0/2.1 target_reg_tbl ID*/
#define TARGET_TYPE_AR6320V2 12
/* For Rome3.0 target_reg_tbl ID*/
#define TARGET_TYPE_AR6320V3 13
/* For Tufello1.0 target_reg_tbl ID*/
#define TARGET_TYPE_QCA9377V1 14
/* For QCA6180 target */
#define TARGET_TYPE_QCA6180 15
/* For Adrastea target */
#define TARGET_TYPE_ADRASTEA 16
struct CE_state;
#ifdef QCA_WIFI_3_0_ADRASTEA
#define CE_COUNT_MAX 12
#else
#define CE_COUNT_MAX 8
#endif
/* These numbers are selected so that the product is close to current
higher limit of packets HIF services at one shot (1000) */
#define QCA_NAPI_BUDGET 64
#define QCA_NAPI_DEF_SCALE 16
/* NOTE: This is to adapt non-NAPI solution to use
the same "budget" as NAPI. Will be removed
`once decision about NAPI is made */
#define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
/* NOTE: "napi->scale" can be changed,
but this does not change the number of buckets */
#define QCA_NAPI_NUM_BUCKETS (QCA_NAPI_BUDGET / QCA_NAPI_DEF_SCALE)
struct qca_napi_stat {
uint32_t napi_schedules;
uint32_t napi_polls;
uint32_t napi_completes;
uint32_t napi_workdone;
uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
};
/**
* per NAPI instance data structure
* This data structure holds stuff per NAPI instance.
* Note that, in the current implementation, though scale is
* an instance variable, it is set to the same value for all
* instances.
*/
struct qca_napi_info {
struct napi_struct napi; /* one NAPI Instance per CE in phase I */
uint8_t scale; /* currently same on all instances */
uint8_t id;
struct qca_napi_stat stats[NR_CPUS];
};
/**
* NAPI data-sructure common to all NAPI instances.
*
* A variable of this type will be stored in hif module context.
*/
struct qca_napi_data {
/* NOTE: make sure the mutex is inited only at the very beginning
once for the lifetime of the driver. For now, granularity of one
is OK, but we might want to have a better granularity later */
struct mutex mutex;
uint32_t state;
uint32_t ce_map; /* bitmap of created/registered NAPI
instances, indexed by pipe_id,
not used by clients (clients use an
id returned by create) */
struct net_device netdev; /* dummy net_dev */
struct qca_napi_info napis[CE_COUNT_MAX];
};
struct ol_softc {
void __iomem *mem; /* IO mapped memory base address */
cdf_dma_addr_t mem_pa;
uint32_t soc_version;
/*
* handle for code that uses the osdep.h version of OS
* abstraction primitives
*/
struct _NIC_DEV aps_osdev;
enum ath_hal_bus_type bus_type;
uint32_t lcr_val;
bool pkt_log_init;
bool request_irq_done;
/*
* handle for code that uses cdf version of OS
* abstraction primitives
*/
cdf_device_t cdf_dev;
struct ol_version version;
/* Packet statistics */
struct ol_ath_stats pkt_stats;
/* A_TARGET_TYPE_* */
uint32_t target_type;
uint32_t target_fw_version;
uint32_t target_version;
uint32_t target_revision;
uint8_t crm_version_string[64];
uint8_t wlan_version_string[64];
ol_target_status target_status;
bool is_sim;
/* calibration data is stored in flash */
uint8_t *cal_in_flash;
/* virtual address for the calibration data on the flash */
void *cal_mem;
/* status of target init */
WLAN_INIT_STATUS wlan_init_status;
/* BMI info */
/* OS-dependent private info for BMI */
void *bmi_ol_priv;
bool bmi_done;
bool bmi_ua_done;
uint8_t *bmi_cmd_buff;
dma_addr_t bmi_cmd_da;
OS_DMA_MEM_CONTEXT(bmicmd_dmacontext)
uint8_t *bmi_rsp_buff;
dma_addr_t bmi_rsp_da;
/* length of last response */
uint32_t last_rxlen;
OS_DMA_MEM_CONTEXT(bmirsp_dmacontext)
void *msi_magic;
dma_addr_t msi_magic_da;
OS_DMA_MEM_CONTEXT(msi_dmacontext)
/* Handles for Lower Layers : filled in at init time */
hif_handle_t hif_hdl;
#ifdef HIF_PCI
struct hif_pci_softc *hif_sc;
#endif
#ifdef WLAN_FEATURE_FASTPATH
int fastpath_mode_on; /* Duplicating this for data path efficiency */
#endif /* WLAN_FEATURE_FASTPATH */
/* HTC handles */
void *htc_handle;
bool fEnableBeaconEarlyTermination;
uint8_t bcnEarlyTermWakeInterval;
/* UTF event information */
struct {
uint8_t *data;
uint32_t length;
cdf_size_t offset;
uint8_t currentSeq;
uint8_t expectedSeq;
} utf_event_info;
struct ol_wow_info *scn_wowInfo;
/* enable uart/serial prints from target */
bool enableuartprint;
/* enable fwlog */
bool enablefwlog;
HAL_REG_CAPABILITIES hal_reg_capabilities;
struct ol_regdmn *ol_regdmn_handle;
uint8_t bcn_mode;
uint8_t arp_override;
/*
* Includes host side stack level stats +
* radio level athstats
*/
struct wlan_dbg_stats ath_stats;
/* noise_floor */
int16_t chan_nf;
uint32_t min_tx_power;
uint32_t max_tx_power;
uint32_t txpowlimit2G;
uint32_t txpowlimit5G;
uint32_t txpower_scale;
uint32_t chan_tx_pwr;
uint32_t vdev_count;
uint32_t max_bcn_ie_size;
cdf_spinlock_t scn_lock;
uint8_t vow_extstats;
/* if dcs enabled or not */
uint8_t scn_dcs;
wdi_event_subscribe scn_rx_peer_invalid_subscriber;
uint8_t proxy_sta;
uint8_t bcn_enabled;
/* Dynamic Tx Chainmask Selection enabled/disabled */
uint8_t dtcs;
/* true if vht ies are set on target */
uint32_t set_ht_vht_ies:1;
/*CWM enable/disable state */
bool scn_cwmenable;
uint8_t max_no_of_peers;
#ifdef CONFIG_CNSS
struct cnss_fw_files fw_files;
#endif
#if defined(CONFIG_CNSS)
void *ramdump_base;
unsigned long ramdump_address;
unsigned long ramdump_size;
#endif
bool enable_self_recovery;
#ifdef WLAN_FEATURE_LPSS
bool enablelpasssupport;
#endif
bool enable_ramdump_collection;
struct targetdef_s *targetdef;
struct ce_reg_def *target_ce_def;
struct hostdef_s *hostdef;
struct host_shadow_regs_s *host_shadow_regs;
bool athdiag_procfs_inited;
/*
* Guard changes to Target HW state and to software
* structures that track hardware state.
*/
unsigned int ce_count; /* Number of Copy Engines supported */
struct CE_state *ce_id_to_state[CE_COUNT_MAX]; /* CE id to CE_state */
#ifdef FEATURE_NAPI
struct qca_napi_data napi_data;
#endif /* FEATURE_NAPI */
int htc_endpoint;
bool recovery;
bool hif_init_done;
int linkstate_vote;
atomic_t link_suspended;
atomic_t wow_done;
atomic_t tasklet_from_intr;
atomic_t active_tasklet_cnt;
bool notice_send;
#ifdef HIF_PCI
cdf_spinlock_t irq_lock;
uint32_t ce_irq_summary;
#endif
uint32_t *vaddr_rri_on_ddr;
};
typedef enum {
HIF_DEVICE_POWER_UP, /* HIF layer should power up interface
* and/or module */
HIF_DEVICE_POWER_DOWN, /* HIF layer should initiate bus-specific
* measures to minimize power */
HIF_DEVICE_POWER_CUT /* HIF layer should initiate bus-specific
* AND/OR platform-specific measures
* to completely power-off the module and
* associated hardware (i.e. cut power
* supplies) */
} HIF_DEVICE_POWER_CHANGE_TYPE;
/**
* enum hif_enable_type: what triggered the enabling of hif
*
* @HIF_ENABLE_TYPE_PROBE: probe triggered enable
* @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
*/
enum hif_enable_type {
HIF_ENABLE_TYPE_PROBE,
HIF_ENABLE_TYPE_REINIT,
HIF_ENABLE_TYPE_MAX
};
/**
* enum hif_disable_type: what triggered the disabling of hif
*
* @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
* @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered
* disable
* @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
* @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
*/
enum hif_disable_type {
HIF_DISABLE_TYPE_PROBE_ERROR,
HIF_DISABLE_TYPE_REINIT_ERROR,
HIF_DISABLE_TYPE_REMOVE,
HIF_DISABLE_TYPE_SHUTDOWN,
HIF_DISABLE_TYPE_MAX
};
#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
typedef struct _HID_ACCESS_LOG {
uint32_t seqnum;
bool is_write;
void *addr;
uint32_t value;
} HIF_ACCESS_LOG;
#endif
#define HIF_MAX_DEVICES 1
struct htc_callbacks {
void *context; /* context to pass to the dsrhandler
* note : rwCompletionHandler is provided
* the context passed to hif_read_write */
int (*rwCompletionHandler)(void *rwContext, int status);
int (*dsrHandler)(void *context);
};
typedef struct osdrv_callbacks {
void *context; /* context to pass for all callbacks
* except deviceRemovedHandler
* the deviceRemovedHandler is only
* called if the device is claimed */
int (*deviceInsertedHandler)(void *context, void *hif_handle);
int (*deviceRemovedHandler)(void *claimedContext,
void *hif_handle);
int (*deviceSuspendHandler)(void *context);
int (*deviceResumeHandler)(void *context);
int (*deviceWakeupHandler)(void *context);
int (*devicePowerChangeHandler)(void *context,
HIF_DEVICE_POWER_CHANGE_TYPE
config);
} OSDRV_CALLBACKS;
/*
* This API is used to perform any global initialization of the HIF layer
* and to set OS driver callbacks (i.e. insertion/removal) to the HIF layer
*
*/
int hif_init(OSDRV_CALLBACKS *callbacks);
/* This API detaches the HTC layer from the HIF device */
void hif_detach_htc(struct ol_softc *scn);
/****************************************************************/
/* BMI and Diag window abstraction */
/****************************************************************/
#define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0))
#define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be
* handled atomically by
* DiagRead/DiagWrite */
/*
* API to handle HIF-specific BMI message exchanges, this API is synchronous
* and only allowed to be called from a context that can block (sleep) */
CDF_STATUS hif_exchange_bmi_msg(struct ol_softc *scn,
uint8_t *pSendMessage,
uint32_t Length,
uint8_t *pResponseMessage,
uint32_t *pResponseLength, uint32_t TimeoutMS);
/*
* APIs to handle HIF specific diagnostic read accesses. These APIs are
* synchronous and only allowed to be called from a context that
* can block (sleep). They are not high performance APIs.
*
* hif_diag_read_access reads a 4 Byte aligned/length value from a
* Target register or memory word.
*
* hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
*/
CDF_STATUS hif_diag_read_access(struct ol_softc *scn, uint32_t address,
uint32_t *data);
CDF_STATUS hif_diag_read_mem(struct ol_softc *scn, uint32_t address,
uint8_t *data, int nbytes);
void hif_dump_target_memory(struct ol_softc *scn, void *ramdump_base,
uint32_t address, uint32_t size);
/*
* APIs to handle HIF specific diagnostic write accesses. These APIs are
* synchronous and only allowed to be called from a context that
* can block (sleep).
* They are not high performance APIs.
*
* hif_diag_write_access writes a 4 Byte aligned/length value to a
* Target register or memory word.
*
* hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
*/
CDF_STATUS hif_diag_write_access(struct ol_softc *scn, uint32_t address,
uint32_t data);
CDF_STATUS hif_diag_write_mem(struct ol_softc *scn, uint32_t address,
uint8_t *data, int nbytes);
/*
* Set the FASTPATH_mode_on flag in sc, for use by data path
*/
#ifdef WLAN_FEATURE_FASTPATH
void hif_enable_fastpath(struct ol_softc *hif_dev);
#endif
#if defined(HIF_PCI) && !defined(A_SIMOS_DEVHOST)
/*
* This API allows the Host to access Target registers of a given
* A_target_id_t directly and relatively efficiently over PCIe.
* This allows the Host to avoid extra overhead associated with
* sending a message to firmware and waiting for a response message
* from firmware, as is done on other interconnects.
*
* Yet there is some complexity with direct accesses because the
* Target's power state is not known a priori. The Host must issue
* special PCIe reads/writes in order to explicitly wake the Target
* and to verify that it is awake and will remain awake.
*
* NB: Host endianness conversion is left for the caller to handle.
* These interfaces handle access; not interpretation.
*
* Usage:
* During initialization, use A_TARGET_ID to obtain an 'target ID'
* for use with these interfaces.
*
* Use A_TARGET_READ and A_TARGET_WRITE to access Target space.
* These calls must be bracketed by A_TARGET_ACCESS_BEGIN and
* A_TARGET_ACCESS_END. A single BEGIN/END pair is adequate for
* multiple READ/WRITE operations.
*
* Use A_TARGET_ACCESS_BEGIN to put the Target in a state in
* which it is legal for the Host to directly access it. This
* may involve waking the Target from a low power state, which
* may take up to 2Ms!
*
* Use A_TARGET_ACCESS_END to tell the Target that as far as
* this code path is concerned, it no longer needs to remain
* directly accessible. BEGIN/END is under a reference counter;
* multiple code paths may issue BEGIN/END on a single targid.
*
* For added efficiency, the Host may use A_TARGET_ACCESS_LIKELY.
* The LIKELY interface works just like A_TARGET_ACCESS_BEGIN,
* except that it may return before the Target is actually
* available. It's a vague indication that some Target accesses
* are expected "soon". When the LIKELY API is used,
* A_TARGET_ACCESS_BEGIN must be used before any access.
*
* There are several uses for the LIKELY/UNLIKELY API:
* -If there is some potential time before Target accesses
* and we want to get a head start on waking the Target
* (e.g. to overlap Target wake with Host-side malloc)
* -High-level code knows that it will call low-level
* functions that will use BEGIN/END, and we don't want
* to allow the Target to sleep until the entire sequence
* has completed.
*
* A_TARGET_ACCESS_OK verifies that the Target can be
* accessed. In general, this should not be needed, but it
* may be useful for debugging or for special uses.
*
* Note that there must be a matching END for each BEGIN
* AND there must be a matching UNLIKELY for each LIKELY!
*
* NB: This API is designed to allow some flexibility in tradeoffs
* between Target power utilization and Host efficiency and
* system performance.
*/
/*
* Enable/disable CDC max performance workaround
* For max-performace set this to 0
* To allow SoC to enter sleep set this to 1
*/
#define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
#endif
#ifdef IPA_OFFLOAD
void hif_ipa_get_ce_resource(struct ol_softc *scn,
cdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr);
#else
/**
* hif_ipa_get_ce_resource() - get uc resource on hif
* @scn: bus context
* @ce_sr_base_paddr: copyengine source ring base physical address
* @ce_sr_ring_size: copyengine source ring size
* @ce_reg_paddr: copyengine register physical address
*
* IPA micro controller data path offload feature enabled,
* HIF should release copy engine related resource information to IPA UC
* IPA UC will access hardware resource with released information
*
* Return: None
*/
static inline void hif_ipa_get_ce_resource(struct ol_softc *scn,
cdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr)
{
return;
}
#endif /* IPA_OFFLOAD */
void hif_read_phy_mem_base(struct ol_softc *scn,
cdf_dma_addr_t *bar_value);
/**
* @brief List of callbacks - filled in by HTC.
*/
struct hif_msg_callbacks {
void *Context;
/**< context meaningful to HTC */
CDF_STATUS (*txCompletionHandler)(void *Context, cdf_nbuf_t wbuf,
uint32_t transferID,
uint32_t toeplitz_hash_result);
CDF_STATUS (*rxCompletionHandler)(void *Context, cdf_nbuf_t wbuf,
uint8_t pipeID);
void (*txResourceAvailHandler)(void *context, uint8_t pipe);
void (*fwEventHandler)(void *context, CDF_STATUS status);
};
#define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
(attr |= (v & 0x01) << 5)
#define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
(attr |= (v & 0x03) << 6)
#define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
(attr |= (v & 0x01) << 13)
#define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
(attr |= (v & 0x01) << 14)
#define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
(attr |= (v & 0x01) << 15)
#define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
(attr |= (v & 0x0FFF) << 16)
#define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
(attr |= (v & 0x01) << 30)
#ifdef HIF_PCI
typedef struct pci_device_id hif_bus_id;
#else
typedef struct device hif_bus_id;
#endif
void hif_post_init(struct ol_softc *scn, void *hHTC,
struct hif_msg_callbacks *callbacks);
CDF_STATUS hif_start(struct ol_softc *scn);
void hif_stop(struct ol_softc *scn);
void hif_flush_surprise_remove(struct ol_softc *scn);
void hif_dump(struct ol_softc *scn, uint8_t CmdId, bool start);
CDF_STATUS hif_send_head(struct ol_softc *scn, uint8_t PipeID,
uint32_t transferID, uint32_t nbytes,
cdf_nbuf_t wbuf, uint32_t data_attr);
void hif_send_complete_check(struct ol_softc *scn, uint8_t PipeID,
int force);
void hif_cancel_deferred_target_sleep(struct ol_softc *scn);
void hif_get_default_pipe(struct ol_softc *scn, uint8_t *ULPipe,
uint8_t *DLPipe);
int hif_map_service_to_pipe(struct ol_softc *scn, uint16_t svc_id,
uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
int *dl_is_polled);
uint16_t hif_get_free_queue_number(struct ol_softc *scn, uint8_t PipeID);
void *hif_get_targetdef(struct ol_softc *scn);
void hi_fsuspendwow(struct ol_softc *scn);
uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
void hif_set_target_sleep(struct ol_softc *scn, bool sleep_ok,
bool wait_for_it);
int hif_check_fw_reg(struct ol_softc *scn);
int hif_check_soc_status(struct ol_softc *scn);
void hif_get_hw_info(void *scn, u32 *version, u32 *revision,
const char **target_name);
void hif_set_fw_info(void *scn, u32 target_fw_version);
void hif_disable_isr(void *scn);
void hif_reset_soc(void *scn);
void hif_disable_aspm(void);
void hif_save_htc_htt_config_endpoint(int htc_endpoint);
CDF_STATUS hif_open(enum ath_hal_bus_type bus_type);
void hif_close(void *hif_ctx);
CDF_STATUS hif_enable(void *hif_ctx, struct device *dev, void *bdev,
const hif_bus_id *bid, enum ath_hal_bus_type bus_type,
enum hif_enable_type type);
void hif_disable(void *hif_ctx, enum hif_disable_type type);
void hif_enable_power_gating(void *hif_ctx);
#ifdef FEATURE_RUNTIME_PM
struct hif_pm_runtime_lock;
int hif_pm_runtime_get(void *hif_ctx);
void hif_pm_runtime_get_noresume(void *hif_ctx);
int hif_pm_runtime_put(void *hif_ctx);
struct hif_pm_runtime_lock *hif_runtime_lock_init(const char *name);
void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *lock);
int hif_pm_runtime_prevent_suspend(void *ol_sc,
struct hif_pm_runtime_lock *lock);
int hif_pm_runtime_allow_suspend(void *ol_sc,
struct hif_pm_runtime_lock *lock);
int hif_pm_runtime_prevent_suspend_timeout(void *ol_sc,
struct hif_pm_runtime_lock *lock, unsigned int delay);
#else
struct hif_pm_runtime_lock {
const char *name;
};
static inline void hif_pm_runtime_get_noresume(void *hif_ctx)
{}
static inline int hif_pm_runtime_get(void *hif_ctx)
{ return 0; }
static inline int hif_pm_runtime_put(void *hif_ctx)
{ return 0; }
static inline struct hif_pm_runtime_lock *hif_runtime_lock_init(
const char *name)
{ return NULL; }
static inline void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *lock)
{}
static inline int hif_pm_runtime_prevent_suspend(void *ol_sc,
struct hif_pm_runtime_lock *lock)
{ return 0; }
static inline int hif_pm_runtime_allow_suspend(void *ol_sc,
struct hif_pm_runtime_lock *lock)
{ return 0; }
static inline int hif_pm_runtime_prevent_suspend_timeout(void *ol_sc,
struct hif_pm_runtime_lock *lock, unsigned int delay)
{ return 0; }
#endif
void hif_enable_power_management(void *hif_ctx);
void hif_disable_power_management(void *hif_ctx);
void hif_vote_link_down(void);
void hif_vote_link_up(void);
bool hif_can_suspend_link(void);
int hif_bus_resume(void);
int hif_bus_suspend(void);
#ifdef FEATURE_RUNTIME_PM
int hif_pre_runtime_suspend(void);
void hif_pre_runtime_resume(void);
int hif_runtime_suspend(void);
int hif_runtime_resume(void);
void hif_process_runtime_suspend_success(void);
void hif_process_runtime_suspend_failure(void);
void hif_process_runtime_resume_success(void);
#endif
int hif_dump_registers(struct ol_softc *scn);
int ol_copy_ramdump(struct ol_softc *scn);
void hif_pktlogmod_exit(void *hif_ctx);
void hif_crash_shutdown(void *hif_ctx);
#ifdef __cplusplus
}
#endif
#endif /* _HIF_H_ */

View File

@ -1,153 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __HIF_NAPI_H__
#define __HIF_NAPI_H__
/**
* DOC: hif_napi.h
*
* Interface to HIF implemented functions of NAPI.
* These are used by hdd_napi.
*/
/* CLD headers */
#include <hif.h> /* struct ol_softc; */
/**
* common stuff
* The declarations until #ifdef FEATURE_NAPI below
* are valid whether or not FEATURE_NAPI has been
* defined.
*/
/* the following triggers napi_enable/disable as required */
enum qca_napi_event {
NAPI_EVT_INVALID,
NAPI_EVT_INI_FILE,
NAPI_EVT_CMD_STATE /* ioctl enable/disable commands */
};
/**
* Macros to map ids -returned by ...create()- to pipes and vice versa
*/
#define NAPI_ID2PIPE(i) ((i)-1)
#define NAPI_PIPE2ID(p) ((p)+1)
#ifdef FEATURE_NAPI
/**
* NAPI HIF API
*
* the declarations below only apply to the case
* where FEATURE_NAPI is defined
*/
int hif_napi_create(struct ol_softc *hif,
uint8_t pipe_id,
int (*poll)(struct napi_struct *, int),
int budget,
int scale);
int hif_napi_destroy(struct ol_softc *hif,
uint8_t id,
int force);
struct qca_napi_data *hif_napi_get_all(struct ol_softc *hif);
int hif_napi_event(struct ol_softc *hif,
enum qca_napi_event event,
void *data);
/* called from the ISR within hif, so, ce is known */
int hif_napi_enabled(struct ol_softc *hif, int ce);
/* called from hdd (napi_poll), using napi id as a selector */
void hif_napi_enable_irq(struct ol_softc *hif, int id);
/* called by ce_tasklet.c::ce_irq_handler */
int hif_napi_schedule(struct ol_softc *scn, int ce_id);
/* called by hdd_napi, which is called by kernel */
int hif_napi_poll(struct napi_struct *napi, int budget);
#ifdef FEATURE_NAPI_DEBUG
#define NAPI_DEBUG(fmt, ...) \
cdf_print("wlan: NAPI: %s:%d "fmt, __func__, __LINE__, ##__VA_ARGS__);
#else
#define NAPI_DEBUG(fmt, ...) /* NO-OP */
#endif /* FEATURE NAPI_DEBUG */
#else /* ! defined(FEATURE_NAPI) */
/**
* Stub API
*
* The declarations in this section are valid only
* when FEATURE_NAPI has *not* been defined.
*/
#define NAPI_DEBUG(fmt, ...) /* NO-OP */
static inline int hif_napi_create(struct ol_softc *hif,
uint8_t pipe_id,
int (*poll)(struct napi_struct *, int),
int budget,
int scale)
{ return -EPERM; }
static inline int hif_napi_destroy(struct ol_softc *hif,
uint8_t id,
int force)
{ return -EPERM; }
static inline struct qca_napi_data *hif_napi_get_all(struct ol_softc *hif)
{ return NULL; }
static inline int hif_napi_event(struct ol_softc *hif,
enum qca_napi_event event,
void *data)
{ return -EPERM; }
/* called from the ISR within hif, so, ce is known */
static inline int hif_napi_enabled(struct ol_softc *hif, int ce)
{ return 0; }
/* called from hdd (napi_poll), using napi id as a selector */
static inline void hif_napi_enable_irq(struct ol_softc *hif, int id)
{ return; }
static inline int hif_napi_schedule(struct ol_softc *hif, int ce_id)
{ return 0; }
static inline int hif_napi_poll(struct napi_struct *napi, int budget)
{ return -EPERM; }
#endif /* FEATURE_NAPI */
#endif /* __HIF_NAPI_H__ */

View File

@ -1,37 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _PLATFORM_ICNSS_H_
#define _PLATFORM_ICNSS_H_
#ifdef HIF_PCI
#include "icnss_stub.h"
#else
#include <soc/qcom/icnss.h>
#endif
#endif

View File

@ -1,33 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _REGTABLE_H_
#define _REGTABLE_H_
#include "regtable_pcie.h"
#include "regtable_ce.h"
#endif

View File

@ -1,260 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _REGTABLE_CE_H_
#define _REGTABLE_CE_H_
/*
* @d_DST_WR_INDEX_ADDRESS: Destination ring write index
*
* @d_SRC_WATERMARK_ADDRESS: Source ring watermark
*
* @d_SRC_WATERMARK_LOW_MASK: Bits indicating low watermark from Source ring
* watermark
*
* @d_SRC_WATERMARK_HIGH_MASK: Bits indicating high watermark from Source ring
* watermark
*
* @d_DST_WATERMARK_LOW_MASK: Bits indicating low watermark from Destination
* ring watermark
*
* @d_DST_WATERMARK_HIGH_MASK: Bits indicating high watermark from Destination
* ring watermark
*
* @d_CURRENT_SRRI_ADDRESS: Current source ring read index.The Start Offset
* will be reflected after a CE transfer is completed.
*
* @d_CURRENT_DRRI_ADDRESS: Current Destination ring read index. The Start
* Offset will be reflected after a CE transfer
* is completed.
*
* @d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK: Source ring high watermark
* Interrupt Status
*
* @d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK: Source ring low watermark
* Interrupt Status
*
* @d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK: Destination ring high watermark
* Interrupt Status
*
* @d_HOST_IS_DST_RING_LOW_WATERMARK_MASK: Source ring low watermark
* Interrupt Status
*
* @d_HOST_IS_ADDRESS: Host Interrupt Status Register
*
* @d_MISC_IS_ADDRESS: Miscellaneous Interrupt Status Register
*
* @d_HOST_IS_COPY_COMPLETE_MASK: Bits indicating Copy complete interrupt
* status from the Host Interrupt Status
* register
*
* @d_CE_WRAPPER_BASE_ADDRESS: Copy Engine Wrapper Base Address
*
* @d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS: CE Wrapper summary for interrupts
* to host
*
* @d_CE_WRAPPER_INDEX_BASE_LOW: The LSB Base address to which source and
* destination read indices are written
*
* @d_CE_WRAPPER_INDEX_BASE_HIGH: The MSB Base address to which source and
* destination read indices are written
*
* @d_HOST_IE_ADDRESS: Host Line Interrupt Enable Register
*
* @d_HOST_IE_COPY_COMPLETE_MASK: Bits indicating Copy complete interrupt
* enable from the IE register
*
* @d_SR_BA_ADDRESS: LSB of Source Ring Base Address
*
* @d_SR_BA_ADDRESS_HIGH: MSB of Source Ring Base Address
*
* @d_SR_SIZE_ADDRESS: Source Ring size - number of entries and Start Offset
*
* @d_CE_CTRL1_ADDRESS: CE Control register
*
* @d_CE_CTRL1_DMAX_LENGTH_MASK: Destination buffer Max Length used for error
* check
*
* @d_DR_BA_ADDRESS: Destination Ring Base Address Low
*
* @d_DR_BA_ADDRESS_HIGH: Destination Ring Base Address High
*
* @d_DR_SIZE_ADDRESS: Destination Ring size - number of entries Start Offset
*
* @d_CE_CMD_REGISTER: Implements commands to all CE Halt Flush
*
* @d_CE_MSI_ADDRESS: CE MSI LOW Address register
*
* @d_CE_MSI_ADDRESS_HIGH: CE MSI High Address register
*
* @d_CE_MSI_DATA: CE MSI Data Register
*
* @d_CE_MSI_ENABLE_BIT: Bit in CTRL1 register indication the MSI enable
*
* @d_MISC_IE_ADDRESS: Miscellaneous Interrupt Enable Register
*
* @d_MISC_IS_AXI_ERR_MASK: Bit in Misc IS indicating AXI Timeout Interrupt
* status
*
* @d_MISC_IS_DST_ADDR_ERR_MASK: Bit in Misc IS indicating Destination Address
* Error
*
* @d_MISC_IS_SRC_LEN_ERR_MASK: Bit in Misc IS indicating Source Zero Length
* Error Interrupt status
*
* @d_MISC_IS_DST_MAX_LEN_VIO_MASK: Bit in Misc IS indicating Destination Max
* Length Violated Interrupt status
*
* @d_MISC_IS_DST_RING_OVERFLOW_MASK: Bit in Misc IS indicating Destination
* Ring Overflow Interrupt status
*
* @d_MISC_IS_SRC_RING_OVERFLOW_MASK: Bit in Misc IS indicating Source Ring
* Overflow Interrupt status
*
* @d_SRC_WATERMARK_LOW_LSB: Source Ring Low Watermark LSB
*
* @d_SRC_WATERMARK_HIGH_LSB: Source Ring Low Watermark MSB
*
* @d_DST_WATERMARK_LOW_LSB: Destination Ring Low Watermark LSB
*
* @d_DST_WATERMARK_HIGH_LSB: Destination Ring High Watermark LSB
*
*
* @d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK: Bits in
* d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDR
* indicating Copy engine
* miscellaneous interrupt summary
*
* @d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB:Bits in
* d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDR
* indicating Host interrupts summary
*
* @d_CE_CTRL1_DMAX_LENGTH_LSB: LSB of Destination buffer Max Length used for
* error check
*
* @d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK: Bits indicating Source ring Byte Swap
* enable. Treats source ring memory
* organisation as big-endian
*
* @d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK: Bits indicating Destination ring
* byte swap enable. Treats destination
* ring memory organisation as big-endian
*
* @d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB: LSB of Source ring Byte Swap enable
*
* @d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB: LSB of Destination ring Byte Swap enable
*
* @d_CE_WRAPPER_DEBUG_OFFSET: Offset of CE OBS BUS Select register
*
* @d_CE_WRAPPER_DEBUG_SEL_MSB: MSB of Control register selecting inputs for
* trace/debug
*
* @d_CE_WRAPPER_DEBUG_SEL_LSB: LSB of Control register selecting inputs for
* trace/debug
*
* @d_CE_WRAPPER_DEBUG_SEL_MASK: Bits indicating Control register selecting
* inputs for trace/debug
*
* @d_CE_DEBUG_OFFSET: Offset of Copy Engine FSM Debug Status
*
* @d_CE_DEBUG_SEL_MSB: MSB of Copy Engine FSM Debug Status
*
* @d_CE_DEBUG_SEL_LSB: LSB of Copy Engine FSM Debug Status
*
* @d_CE_DEBUG_SEL_MASK: Bits indicating Copy Engine FSM Debug Status
*
*/
struct ce_reg_def {
/* copy_engine.c */
uint32_t d_DST_WR_INDEX_ADDRESS;
uint32_t d_SRC_WATERMARK_ADDRESS;
uint32_t d_SRC_WATERMARK_LOW_MASK;
uint32_t d_SRC_WATERMARK_HIGH_MASK;
uint32_t d_DST_WATERMARK_LOW_MASK;
uint32_t d_DST_WATERMARK_HIGH_MASK;
uint32_t d_CURRENT_SRRI_ADDRESS;
uint32_t d_CURRENT_DRRI_ADDRESS;
uint32_t d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK;
uint32_t d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK;
uint32_t d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK;
uint32_t d_HOST_IS_DST_RING_LOW_WATERMARK_MASK;
uint32_t d_HOST_IS_ADDRESS;
uint32_t d_MISC_IS_ADDRESS;
uint32_t d_HOST_IS_COPY_COMPLETE_MASK;
uint32_t d_CE_WRAPPER_BASE_ADDRESS;
uint32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS;
uint32_t d_CE_DDR_ADDRESS_FOR_RRI_LOW;
uint32_t d_CE_DDR_ADDRESS_FOR_RRI_HIGH;
uint32_t d_HOST_IE_ADDRESS;
uint32_t d_HOST_IE_COPY_COMPLETE_MASK;
uint32_t d_SR_BA_ADDRESS;
uint32_t d_SR_BA_ADDRESS_HIGH;
uint32_t d_SR_SIZE_ADDRESS;
uint32_t d_CE_CTRL1_ADDRESS;
uint32_t d_CE_CTRL1_DMAX_LENGTH_MASK;
uint32_t d_DR_BA_ADDRESS;
uint32_t d_DR_BA_ADDRESS_HIGH;
uint32_t d_DR_SIZE_ADDRESS;
uint32_t d_CE_CMD_REGISTER;
uint32_t d_CE_MSI_ADDRESS;
uint32_t d_CE_MSI_ADDRESS_HIGH;
uint32_t d_CE_MSI_DATA;
uint32_t d_CE_MSI_ENABLE_BIT;
uint32_t d_MISC_IE_ADDRESS;
uint32_t d_MISC_IS_AXI_ERR_MASK;
uint32_t d_MISC_IS_DST_ADDR_ERR_MASK;
uint32_t d_MISC_IS_SRC_LEN_ERR_MASK;
uint32_t d_MISC_IS_DST_MAX_LEN_VIO_MASK;
uint32_t d_MISC_IS_DST_RING_OVERFLOW_MASK;
uint32_t d_MISC_IS_SRC_RING_OVERFLOW_MASK;
uint32_t d_SRC_WATERMARK_LOW_LSB;
uint32_t d_SRC_WATERMARK_HIGH_LSB;
uint32_t d_DST_WATERMARK_LOW_LSB;
uint32_t d_DST_WATERMARK_HIGH_LSB;
uint32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK;
uint32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB;
uint32_t d_CE_CTRL1_DMAX_LENGTH_LSB;
uint32_t d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK;
uint32_t d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK;
uint32_t d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB;
uint32_t d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB;
uint32_t d_CE_CTRL1_IDX_UPD_EN_MASK;
uint32_t d_CE_WRAPPER_DEBUG_OFFSET;
uint32_t d_CE_WRAPPER_DEBUG_SEL_MSB;
uint32_t d_CE_WRAPPER_DEBUG_SEL_LSB;
uint32_t d_CE_WRAPPER_DEBUG_SEL_MASK;
uint32_t d_CE_DEBUG_OFFSET;
uint32_t d_CE_DEBUG_SEL_MSB;
uint32_t d_CE_DEBUG_SEL_LSB;
uint32_t d_CE_DEBUG_SEL_MASK;
uint32_t d_CE0_BASE_ADDRESS;
uint32_t d_CE1_BASE_ADDRESS;
uint32_t d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES;
uint32_t d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS;
};
#endif /* _REGTABLE_CE_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,796 +0,0 @@
/*
* Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _AR6320DEF_H_
#define _AR6320DEF_H_
/* Base Addresses */
#define AR6320_RTC_SOC_BASE_ADDRESS 0x00000000
#define AR6320_RTC_WMAC_BASE_ADDRESS 0x00001000
#define AR6320_MAC_COEX_BASE_ADDRESS 0x0000f000
#define AR6320_BT_COEX_BASE_ADDRESS 0x00002000
#define AR6320_SOC_PCIE_BASE_ADDRESS 0x00038000
#define AR6320_SOC_CORE_BASE_ADDRESS 0x0003a000
#define AR6320_WLAN_UART_BASE_ADDRESS 0x0000c000
#define AR6320_WLAN_SI_BASE_ADDRESS 0x00010000
#define AR6320_WLAN_GPIO_BASE_ADDRESS 0x00005000
#define AR6320_WLAN_ANALOG_INTF_BASE_ADDRESS 0x00006000
#define AR6320_WLAN_MAC_BASE_ADDRESS 0x00010000
#define AR6320_EFUSE_BASE_ADDRESS 0x00024000
#define AR6320_FPGA_REG_BASE_ADDRESS 0x00039000
#define AR6320_WLAN_UART2_BASE_ADDRESS 0x00054c00
#define AR6320_CE_WRAPPER_BASE_ADDRESS 0x00034000
#define AR6320_CE0_BASE_ADDRESS 0x00034400
#define AR6320_CE1_BASE_ADDRESS 0x00034800
#define AR6320_CE2_BASE_ADDRESS 0x00034c00
#define AR6320_CE3_BASE_ADDRESS 0x00035000
#define AR6320_CE4_BASE_ADDRESS 0x00035400
#define AR6320_CE5_BASE_ADDRESS 0x00035800
#define AR6320_CE6_BASE_ADDRESS 0x00035c00
#define AR6320_CE7_BASE_ADDRESS 0x00036000
#define AR6320_DBI_BASE_ADDRESS 0x0003c000
#define AR6320_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x00007800
#define AR6320_SCRATCH_3_ADDRESS 0x0028
#define AR6320_TARG_DRAM_START 0x00400000
#define AR6320_SOC_SYSTEM_SLEEP_OFFSET 0x000000c0
#define AR6320_SOC_RESET_CONTROL_OFFSET 0x00000000
#define AR6320_SOC_CLOCK_CONTROL_OFFSET 0x00000028
#define AR6320_SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
#define AR6320_SOC_RESET_CONTROL_SI0_RST_MASK 0x00000000
#define AR6320_WLAN_GPIO_PIN0_ADDRESS 0x00000068
#define AR6320_WLAN_GPIO_PIN1_ADDRESS 0x0000006c
#define AR6320_WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
#define AR6320_WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
#define AR6320_SOC_CPU_CLOCK_OFFSET 0x00000020
#define AR6320_SOC_LPO_CAL_OFFSET 0x000000e0
#define AR6320_WLAN_GPIO_PIN10_ADDRESS 0x00000090
#define AR6320_WLAN_GPIO_PIN11_ADDRESS 0x00000094
#define AR6320_WLAN_GPIO_PIN12_ADDRESS 0x00000098
#define AR6320_WLAN_GPIO_PIN13_ADDRESS 0x0000009c
#define AR6320_SOC_CPU_CLOCK_STANDARD_LSB 0
#define AR6320_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
#define AR6320_SOC_LPO_CAL_ENABLE_LSB 20
#define AR6320_SOC_LPO_CAL_ENABLE_MASK 0x00100000
#define AR6320_WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
#define AR6320_WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
#define AR6320_WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
#define AR6320_WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
#define AR6320_SI_CONFIG_BIDIR_OD_DATA_LSB 18
#define AR6320_SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
#define AR6320_SI_CONFIG_I2C_LSB 16
#define AR6320_SI_CONFIG_I2C_MASK 0x00010000
#define AR6320_SI_CONFIG_POS_SAMPLE_LSB 7
#define AR6320_SI_CONFIG_POS_SAMPLE_MASK 0x00000080
#define AR6320_SI_CONFIG_INACTIVE_CLK_LSB 4
#define AR6320_SI_CONFIG_INACTIVE_CLK_MASK 0x00000010
#define AR6320_SI_CONFIG_INACTIVE_DATA_LSB 5
#define AR6320_SI_CONFIG_INACTIVE_DATA_MASK 0x00000020
#define AR6320_SI_CONFIG_DIVIDER_LSB 0
#define AR6320_SI_CONFIG_DIVIDER_MASK 0x0000000f
#define AR6320_SI_CONFIG_OFFSET 0x00000000
#define AR6320_SI_TX_DATA0_OFFSET 0x00000008
#define AR6320_SI_TX_DATA1_OFFSET 0x0000000c
#define AR6320_SI_RX_DATA0_OFFSET 0x00000010
#define AR6320_SI_RX_DATA1_OFFSET 0x00000014
#define AR6320_SI_CS_OFFSET 0x00000004
#define AR6320_SI_CS_DONE_ERR_MASK 0x00000400
#define AR6320_SI_CS_DONE_INT_MASK 0x00000200
#define AR6320_SI_CS_START_LSB 8
#define AR6320_SI_CS_START_MASK 0x00000100
#define AR6320_SI_CS_RX_CNT_LSB 4
#define AR6320_SI_CS_RX_CNT_MASK 0x000000f0
#define AR6320_SI_CS_TX_CNT_LSB 0
#define AR6320_SI_CS_TX_CNT_MASK 0x0000000f
#define AR6320_CE_COUNT 8
#define AR6320_SR_WR_INDEX_ADDRESS 0x003c
#define AR6320_DST_WATERMARK_ADDRESS 0x0050
#define AR6320_RX_MSDU_END_4_FIRST_MSDU_LSB 14
#define AR6320_RX_MSDU_END_4_FIRST_MSDU_MASK 0x00004000
#define AR6320_RX_MPDU_START_0_RETRY_LSB 14
#define AR6320_RX_MPDU_START_0_RETRY_MASK 0x00004000
#define AR6320_RX_MPDU_START_0_SEQ_NUM_LSB 16
#define AR6320_RX_MPDU_START_0_SEQ_NUM_MASK 0x0fff0000
#define AR6320_RX_MPDU_START_2_TID_LSB 28
#define AR6320_RX_MPDU_START_2_TID_MASK 0xf0000000
#define AR6320_RX_MPDU_START_2_PN_47_32_LSB 0
#define AR6320_RX_MPDU_START_2_PN_47_32_MASK 0x0000ffff
#define AR6320_RX_MSDU_END_1_KEY_ID_OCT_MASK 0x000000ff
#define AR6320_RX_MSDU_END_1_KEY_ID_OCT_LSB 0
#define AR6320_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB 16
#define AR6320_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK 0xffff0000
#define AR6320_RX_MSDU_END_4_LAST_MSDU_LSB 15
#define AR6320_RX_MSDU_END_4_LAST_MSDU_MASK 0x00008000
#define AR6320_RX_ATTENTION_0_MCAST_BCAST_LSB 2
#define AR6320_RX_ATTENTION_0_MCAST_BCAST_MASK 0x00000004
#define AR6320_RX_ATTENTION_0_FRAGMENT_LSB 13
#define AR6320_RX_ATTENTION_0_FRAGMENT_MASK 0x00002000
#define AR6320_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK 0x08000000
#define AR6320_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB 16
#define AR6320_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK 0x00ff0000
#define AR6320_RX_MSDU_START_0_MSDU_LENGTH_LSB 0
#define AR6320_RX_MSDU_START_0_MSDU_LENGTH_MASK 0x00003fff
#define AR6320_RX_MSDU_START_2_DECAP_FORMAT_OFFSET 0x00000008
#define AR6320_RX_MSDU_START_2_DECAP_FORMAT_LSB 8
#define AR6320_RX_MSDU_START_2_DECAP_FORMAT_MASK 0x00000300
#define AR6320_RX_MPDU_START_0_ENCRYPTED_LSB 13
#define AR6320_RX_MPDU_START_0_ENCRYPTED_MASK 0x00002000
#define AR6320_RX_ATTENTION_0_MORE_DATA_MASK 0x00000400
#define AR6320_RX_ATTENTION_0_MSDU_DONE_MASK 0x80000000
#define AR6320_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK 0x00040000
#define AR6320_DST_WR_INDEX_ADDRESS 0x0040
#define AR6320_SRC_WATERMARK_ADDRESS 0x004c
#define AR6320_SRC_WATERMARK_LOW_MASK 0xffff0000
#define AR6320_SRC_WATERMARK_HIGH_MASK 0x0000ffff
#define AR6320_DST_WATERMARK_LOW_MASK 0xffff0000
#define AR6320_DST_WATERMARK_HIGH_MASK 0x0000ffff
#define AR6320_CURRENT_SRRI_ADDRESS 0x0044
#define AR6320_CURRENT_DRRI_ADDRESS 0x0048
#define AR6320_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
#define AR6320_HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
#define AR6320_HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
#define AR6320_HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
#define AR6320_HOST_IS_ADDRESS 0x0030
#define AR6320_HOST_IS_COPY_COMPLETE_MASK 0x00000001
#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
#define AR6320_HOST_IE_ADDRESS 0x002c
#define AR6320_HOST_IE_COPY_COMPLETE_MASK 0x00000001
#define AR6320_SR_BA_ADDRESS 0x0000
#define AR6320_SR_SIZE_ADDRESS 0x0004
#define AR6320_CE_CTRL1_ADDRESS 0x0010
#define AR6320_CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
#define AR6320_DR_BA_ADDRESS 0x0008
#define AR6320_DR_SIZE_ADDRESS 0x000c
#define AR6320_MISC_IE_ADDRESS 0x0034
#define AR6320_MISC_IS_AXI_ERR_MASK 0x00000400
#define AR6320_MISC_IS_DST_ADDR_ERR_MASK 0x00000200
#define AR6320_MISC_IS_SRC_LEN_ERR_MASK 0x00000100
#define AR6320_MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
#define AR6320_MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
#define AR6320_MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
#define AR6320_SRC_WATERMARK_LOW_LSB 16
#define AR6320_SRC_WATERMARK_HIGH_LSB 0
#define AR6320_DST_WATERMARK_LOW_LSB 16
#define AR6320_DST_WATERMARK_HIGH_LSB 0
#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
#define AR6320_CE_CTRL1_DMAX_LENGTH_LSB 0
#define AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
#define AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
#define AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
#define AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
#define AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000020
#define AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB 5
#define AR6320_SOC_GLOBAL_RESET_ADDRESS 0x0008
#define AR6320_RTC_STATE_ADDRESS 0x0000
#define AR6320_RTC_STATE_COLD_RESET_MASK 0x00002000
#define AR6320_PCIE_SOC_WAKE_RESET 0x00000000
#define AR6320_PCIE_SOC_WAKE_ADDRESS 0x0004
#define AR6320_PCIE_SOC_WAKE_V_MASK 0x00000001
#define AR6320_RTC_STATE_V_MASK 0x00000007
#define AR6320_RTC_STATE_V_LSB 0
#define AR6320_RTC_STATE_V_ON 3
#define AR6320_MUX_ID_MASK 0x0000
#define AR6320_TRANSACTION_ID_MASK 0x3fff
#define AR6320_PCIE_LOCAL_BASE_ADDRESS 0x80000
#define AR6320_FW_IND_EVENT_PENDING 1
#define AR6320_FW_IND_INITIALIZED 2
#define AR6320_FW_IND_HELPER 4
#define AR6320_PCIE_INTR_ENABLE_ADDRESS 0x0008
#define AR6320_PCIE_INTR_CLR_ADDRESS 0x0014
#define AR6320_PCIE_INTR_FIRMWARE_MASK 0x00000400
#define AR6320_PCIE_INTR_CE0_MASK 0x00000800
#define AR6320_PCIE_INTR_CE_MASK_ALL 0x0007f800
#define AR6320_PCIE_INTR_CAUSE_ADDRESS 0x000c
#define AR6320_CPU_INTR_ADDRESS 0x0010
#define AR6320_SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
#define AR6320_SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
#define AR6320_SOC_RESET_CONTROL_ADDRESS 0x00000000
#define AR6320_SOC_RESET_CONTROL_CE_RST_MASK 0x00000001
#define AR6320_SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
#define AR6320_CORE_CTRL_ADDRESS 0x0000
#define AR6320_CORE_CTRL_CPU_INTR_MASK 0x00002000
#define AR6320_LOCAL_SCRATCH_OFFSET 0x000000c0
#define AR6320_CLOCK_GPIO_OFFSET 0xffffffff
#define AR6320_CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0
#define AR6320_CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
#define AR6320_SOC_CHIP_ID_ADDRESS 0x000000f0
#define AR6320_SOC_CHIP_ID_VERSION_MASK 0xfffc0000
#define AR6320_SOC_CHIP_ID_VERSION_LSB 18
#define AR6320_SOC_CHIP_ID_REVISION_MASK 0x00000f00
#define AR6320_SOC_CHIP_ID_REVISION_LSB 8
#define AR6320_SOC_POWER_REG_OFFSET 0x0000010c
/* Copy Engine Debug */
#define AR6320_WLAN_DEBUG_INPUT_SEL_OFFSET 0x0000010c
#define AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MSB 3
#define AR6320_WLAN_DEBUG_INPUT_SEL_SRC_LSB 0
#define AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MASK 0x0000000f
#define AR6320_WLAN_DEBUG_CONTROL_OFFSET 0x00000108
#define AR6320_WLAN_DEBUG_CONTROL_ENABLE_MSB 0
#define AR6320_WLAN_DEBUG_CONTROL_ENABLE_LSB 0
#define AR6320_WLAN_DEBUG_CONTROL_ENABLE_MASK 0x00000001
#define AR6320_WLAN_DEBUG_OUT_OFFSET 0x00000110
#define AR6320_WLAN_DEBUG_OUT_DATA_MSB 19
#define AR6320_WLAN_DEBUG_OUT_DATA_LSB 0
#define AR6320_WLAN_DEBUG_OUT_DATA_MASK 0x000fffff
#define AR6320_AMBA_DEBUG_BUS_OFFSET 0x0000011c
#define AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB 13
#define AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB 8
#define AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK 0x00003f00
#define AR6320_AMBA_DEBUG_BUS_SEL_MSB 4
#define AR6320_AMBA_DEBUG_BUS_SEL_LSB 0
#define AR6320_AMBA_DEBUG_BUS_SEL_MASK 0x0000001f
#define AR6320_CE_WRAPPER_DEBUG_OFFSET 0x0008
#define AR6320_CE_WRAPPER_DEBUG_SEL_MSB 5
#define AR6320_CE_WRAPPER_DEBUG_SEL_LSB 0
#define AR6320_CE_WRAPPER_DEBUG_SEL_MASK 0x0000003f
#define AR6320_CE_DEBUG_OFFSET 0x0054
#define AR6320_CE_DEBUG_SEL_MSB 5
#define AR6320_CE_DEBUG_SEL_LSB 0
#define AR6320_CE_DEBUG_SEL_MASK 0x0000003f
/* End */
/* PLL start */
#define AR6320_EFUSE_OFFSET 0x0000032c
#define AR6320_EFUSE_XTAL_SEL_MSB 10
#define AR6320_EFUSE_XTAL_SEL_LSB 8
#define AR6320_EFUSE_XTAL_SEL_MASK 0x00000700
#define AR6320_BB_PLL_CONFIG_OFFSET 0x000002f4
#define AR6320_BB_PLL_CONFIG_OUTDIV_MSB 20
#define AR6320_BB_PLL_CONFIG_OUTDIV_LSB 18
#define AR6320_BB_PLL_CONFIG_OUTDIV_MASK 0x001c0000
#define AR6320_BB_PLL_CONFIG_FRAC_MSB 17
#define AR6320_BB_PLL_CONFIG_FRAC_LSB 0
#define AR6320_BB_PLL_CONFIG_FRAC_MASK 0x0003ffff
#define AR6320_WLAN_PLL_SETTLE_TIME_MSB 10
#define AR6320_WLAN_PLL_SETTLE_TIME_LSB 0
#define AR6320_WLAN_PLL_SETTLE_TIME_MASK 0x000007ff
#define AR6320_WLAN_PLL_SETTLE_OFFSET 0x0018
#define AR6320_WLAN_PLL_SETTLE_SW_MASK 0x000007ff
#define AR6320_WLAN_PLL_SETTLE_RSTMASK 0xffffffff
#define AR6320_WLAN_PLL_SETTLE_RESET 0x00000400
#define AR6320_WLAN_PLL_CONTROL_NOPWD_MSB 18
#define AR6320_WLAN_PLL_CONTROL_NOPWD_LSB 18
#define AR6320_WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000
#define AR6320_WLAN_PLL_CONTROL_BYPASS_MSB 16
#define AR6320_WLAN_PLL_CONTROL_BYPASS_LSB 16
#define AR6320_WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000
#define AR6320_WLAN_PLL_CONTROL_BYPASS_RESET 0x1
#define AR6320_WLAN_PLL_CONTROL_CLK_SEL_MSB 15
#define AR6320_WLAN_PLL_CONTROL_CLK_SEL_LSB 14
#define AR6320_WLAN_PLL_CONTROL_CLK_SEL_MASK 0x0000c000
#define AR6320_WLAN_PLL_CONTROL_CLK_SEL_RESET 0x0
#define AR6320_WLAN_PLL_CONTROL_REFDIV_MSB 13
#define AR6320_WLAN_PLL_CONTROL_REFDIV_LSB 10
#define AR6320_WLAN_PLL_CONTROL_REFDIV_MASK 0x00003c00
#define AR6320_WLAN_PLL_CONTROL_REFDIV_RESET 0x0
#define AR6320_WLAN_PLL_CONTROL_DIV_MSB 9
#define AR6320_WLAN_PLL_CONTROL_DIV_LSB 0
#define AR6320_WLAN_PLL_CONTROL_DIV_MASK 0x000003ff
#define AR6320_WLAN_PLL_CONTROL_DIV_RESET 0x11
#define AR6320_WLAN_PLL_CONTROL_OFFSET 0x0014
#define AR6320_WLAN_PLL_CONTROL_SW_MASK 0x001fffff
#define AR6320_WLAN_PLL_CONTROL_RSTMASK 0xffffffff
#define AR6320_WLAN_PLL_CONTROL_RESET 0x00010011
#define AR6320_SOC_CORE_CLK_CTRL_OFFSET 0x00000114
#define AR6320_SOC_CORE_CLK_CTRL_DIV_MSB 2
#define AR6320_SOC_CORE_CLK_CTRL_DIV_LSB 0
#define AR6320_SOC_CORE_CLK_CTRL_DIV_MASK 0x00000007
#define AR6320_RTC_SYNC_STATUS_PLL_CHANGING_MSB 5
#define AR6320_RTC_SYNC_STATUS_PLL_CHANGING_LSB 5
#define AR6320_RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020
#define AR6320_RTC_SYNC_STATUS_PLL_CHANGING_RESET 0x0
#define AR6320_RTC_SYNC_STATUS_OFFSET 0x0244
#define AR6320_SOC_CPU_CLOCK_OFFSET 0x00000020
#define AR6320_SOC_CPU_CLOCK_STANDARD_MSB 1
#define AR6320_SOC_CPU_CLOCK_STANDARD_LSB 0
#define AR6320_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
/* PLL end */
#define AR6320_PCIE_INTR_CE_MASK(n) \
(AR6320_PCIE_INTR_CE0_MASK << (n))
#define AR6320_DRAM_BASE_ADDRESS AR6320_TARG_DRAM_START
#define AR6320_FW_INDICATOR_ADDRESS \
(AR6320_SOC_CORE_BASE_ADDRESS + AR6320_SCRATCH_3_ADDRESS)
#define AR6320_SYSTEM_SLEEP_OFFSET AR6320_SOC_SYSTEM_SLEEP_OFFSET
#define AR6320_WLAN_SYSTEM_SLEEP_OFFSET 0x002c
#define AR6320_WLAN_RESET_CONTROL_OFFSET AR6320_SOC_RESET_CONTROL_OFFSET
#define AR6320_CLOCK_CONTROL_OFFSET AR6320_SOC_CLOCK_CONTROL_OFFSET
#define AR6320_CLOCK_CONTROL_SI0_CLK_MASK AR6320_SOC_CLOCK_CONTROL_SI0_CLK_MASK
#define AR6320_RESET_CONTROL_MBOX_RST_MASK 0x00000004
#define AR6320_RESET_CONTROL_SI0_RST_MASK AR6320_SOC_RESET_CONTROL_SI0_RST_MASK
#define AR6320_GPIO_BASE_ADDRESS AR6320_WLAN_GPIO_BASE_ADDRESS
#define AR6320_GPIO_PIN0_OFFSET AR6320_WLAN_GPIO_PIN0_ADDRESS
#define AR6320_GPIO_PIN1_OFFSET AR6320_WLAN_GPIO_PIN1_ADDRESS
#define AR6320_GPIO_PIN0_CONFIG_MASK AR6320_WLAN_GPIO_PIN0_CONFIG_MASK
#define AR6320_GPIO_PIN1_CONFIG_MASK AR6320_WLAN_GPIO_PIN1_CONFIG_MASK
#define AR6320_SI_BASE_ADDRESS 0x00050000
#define AR6320_CPU_CLOCK_OFFSET AR6320_SOC_CPU_CLOCK_OFFSET
#define AR6320_LPO_CAL_OFFSET AR6320_SOC_LPO_CAL_OFFSET
#define AR6320_GPIO_PIN10_OFFSET AR6320_WLAN_GPIO_PIN10_ADDRESS
#define AR6320_GPIO_PIN11_OFFSET AR6320_WLAN_GPIO_PIN11_ADDRESS
#define AR6320_GPIO_PIN12_OFFSET AR6320_WLAN_GPIO_PIN12_ADDRESS
#define AR6320_GPIO_PIN13_OFFSET AR6320_WLAN_GPIO_PIN13_ADDRESS
#define AR6320_CPU_CLOCK_STANDARD_LSB AR6320_SOC_CPU_CLOCK_STANDARD_LSB
#define AR6320_CPU_CLOCK_STANDARD_MASK AR6320_SOC_CPU_CLOCK_STANDARD_MASK
#define AR6320_LPO_CAL_ENABLE_LSB AR6320_SOC_LPO_CAL_ENABLE_LSB
#define AR6320_LPO_CAL_ENABLE_MASK AR6320_SOC_LPO_CAL_ENABLE_MASK
#define AR6320_ANALOG_INTF_BASE_ADDRESS AR6320_WLAN_ANALOG_INTF_BASE_ADDRESS
#define AR6320_MBOX_BASE_ADDRESS 0x00008000
#define AR6320_INT_STATUS_ENABLE_ERROR_LSB 7
#define AR6320_INT_STATUS_ENABLE_ERROR_MASK 0x00000080
#define AR6320_INT_STATUS_ENABLE_CPU_LSB 6
#define AR6320_INT_STATUS_ENABLE_CPU_MASK 0x00000040
#define AR6320_INT_STATUS_ENABLE_COUNTER_LSB 4
#define AR6320_INT_STATUS_ENABLE_COUNTER_MASK 0x00000010
#define AR6320_INT_STATUS_ENABLE_MBOX_DATA_LSB 0
#define AR6320_INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f
#define AR6320_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 17
#define AR6320_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00020000
#define AR6320_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 16
#define AR6320_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00010000
#define AR6320_COUNTER_INT_STATUS_ENABLE_BIT_LSB 24
#define AR6320_COUNTER_INT_STATUS_ENABLE_BIT_MASK 0xff000000
#define AR6320_INT_STATUS_ENABLE_ADDRESS 0x0828
#define AR6320_CPU_INT_STATUS_ENABLE_BIT_LSB 8
#define AR6320_CPU_INT_STATUS_ENABLE_BIT_MASK 0x0000ff00
#define AR6320_HOST_INT_STATUS_ADDRESS 0x0800
#define AR6320_CPU_INT_STATUS_ADDRESS 0x0801
#define AR6320_ERROR_INT_STATUS_ADDRESS 0x0802
#define AR6320_ERROR_INT_STATUS_WAKEUP_MASK 0x00040000
#define AR6320_ERROR_INT_STATUS_WAKEUP_LSB 18
#define AR6320_ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00020000
#define AR6320_ERROR_INT_STATUS_RX_UNDERFLOW_LSB 17
#define AR6320_ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00010000
#define AR6320_ERROR_INT_STATUS_TX_OVERFLOW_LSB 16
#define AR6320_COUNT_DEC_ADDRESS 0x0840
#define AR6320_HOST_INT_STATUS_CPU_MASK 0x00000040
#define AR6320_HOST_INT_STATUS_CPU_LSB 6
#define AR6320_HOST_INT_STATUS_ERROR_MASK 0x00000080
#define AR6320_HOST_INT_STATUS_ERROR_LSB 7
#define AR6320_HOST_INT_STATUS_COUNTER_MASK 0x00000010
#define AR6320_HOST_INT_STATUS_COUNTER_LSB 4
#define AR6320_RX_LOOKAHEAD_VALID_ADDRESS 0x0805
#define AR6320_WINDOW_DATA_ADDRESS 0x0874
#define AR6320_WINDOW_READ_ADDR_ADDRESS 0x087c
#define AR6320_WINDOW_WRITE_ADDR_ADDRESS 0x0878
struct targetdef_s ar6320_targetdef = {
.d_RTC_SOC_BASE_ADDRESS = AR6320_RTC_SOC_BASE_ADDRESS,
.d_RTC_WMAC_BASE_ADDRESS = AR6320_RTC_WMAC_BASE_ADDRESS,
.d_SYSTEM_SLEEP_OFFSET = AR6320_WLAN_SYSTEM_SLEEP_OFFSET,
.d_WLAN_SYSTEM_SLEEP_OFFSET = AR6320_WLAN_SYSTEM_SLEEP_OFFSET,
.d_WLAN_SYSTEM_SLEEP_DISABLE_LSB =
AR6320_WLAN_SYSTEM_SLEEP_DISABLE_LSB,
.d_WLAN_SYSTEM_SLEEP_DISABLE_MASK =
AR6320_WLAN_SYSTEM_SLEEP_DISABLE_MASK,
.d_CLOCK_CONTROL_OFFSET = AR6320_CLOCK_CONTROL_OFFSET,
.d_CLOCK_CONTROL_SI0_CLK_MASK = AR6320_CLOCK_CONTROL_SI0_CLK_MASK,
.d_RESET_CONTROL_OFFSET = AR6320_SOC_RESET_CONTROL_OFFSET,
.d_RESET_CONTROL_MBOX_RST_MASK = AR6320_RESET_CONTROL_MBOX_RST_MASK,
.d_RESET_CONTROL_SI0_RST_MASK = AR6320_RESET_CONTROL_SI0_RST_MASK,
.d_WLAN_RESET_CONTROL_OFFSET = AR6320_WLAN_RESET_CONTROL_OFFSET,
.d_WLAN_RESET_CONTROL_COLD_RST_MASK =
AR6320_WLAN_RESET_CONTROL_COLD_RST_MASK,
.d_WLAN_RESET_CONTROL_WARM_RST_MASK =
AR6320_WLAN_RESET_CONTROL_WARM_RST_MASK,
.d_GPIO_BASE_ADDRESS = AR6320_GPIO_BASE_ADDRESS,
.d_GPIO_PIN0_OFFSET = AR6320_GPIO_PIN0_OFFSET,
.d_GPIO_PIN1_OFFSET = AR6320_GPIO_PIN1_OFFSET,
.d_GPIO_PIN0_CONFIG_MASK = AR6320_GPIO_PIN0_CONFIG_MASK,
.d_GPIO_PIN1_CONFIG_MASK = AR6320_GPIO_PIN1_CONFIG_MASK,
.d_SI_CONFIG_BIDIR_OD_DATA_LSB = AR6320_SI_CONFIG_BIDIR_OD_DATA_LSB,
.d_SI_CONFIG_BIDIR_OD_DATA_MASK = AR6320_SI_CONFIG_BIDIR_OD_DATA_MASK,
.d_SI_CONFIG_I2C_LSB = AR6320_SI_CONFIG_I2C_LSB,
.d_SI_CONFIG_I2C_MASK = AR6320_SI_CONFIG_I2C_MASK,
.d_SI_CONFIG_POS_SAMPLE_LSB = AR6320_SI_CONFIG_POS_SAMPLE_LSB,
.d_SI_CONFIG_POS_SAMPLE_MASK = AR6320_SI_CONFIG_POS_SAMPLE_MASK,
.d_SI_CONFIG_INACTIVE_CLK_LSB = AR6320_SI_CONFIG_INACTIVE_CLK_LSB,
.d_SI_CONFIG_INACTIVE_CLK_MASK = AR6320_SI_CONFIG_INACTIVE_CLK_MASK,
.d_SI_CONFIG_INACTIVE_DATA_LSB = AR6320_SI_CONFIG_INACTIVE_DATA_LSB,
.d_SI_CONFIG_INACTIVE_DATA_MASK = AR6320_SI_CONFIG_INACTIVE_DATA_MASK,
.d_SI_CONFIG_DIVIDER_LSB = AR6320_SI_CONFIG_DIVIDER_LSB,
.d_SI_CONFIG_DIVIDER_MASK = AR6320_SI_CONFIG_DIVIDER_MASK,
.d_SI_BASE_ADDRESS = AR6320_SI_BASE_ADDRESS,
.d_SI_CONFIG_OFFSET = AR6320_SI_CONFIG_OFFSET,
.d_SI_TX_DATA0_OFFSET = AR6320_SI_TX_DATA0_OFFSET,
.d_SI_TX_DATA1_OFFSET = AR6320_SI_TX_DATA1_OFFSET,
.d_SI_RX_DATA0_OFFSET = AR6320_SI_RX_DATA0_OFFSET,
.d_SI_RX_DATA1_OFFSET = AR6320_SI_RX_DATA1_OFFSET,
.d_SI_CS_OFFSET = AR6320_SI_CS_OFFSET,
.d_SI_CS_DONE_ERR_MASK = AR6320_SI_CS_DONE_ERR_MASK,
.d_SI_CS_DONE_INT_MASK = AR6320_SI_CS_DONE_INT_MASK,
.d_SI_CS_START_LSB = AR6320_SI_CS_START_LSB,
.d_SI_CS_START_MASK = AR6320_SI_CS_START_MASK,
.d_SI_CS_RX_CNT_LSB = AR6320_SI_CS_RX_CNT_LSB,
.d_SI_CS_RX_CNT_MASK = AR6320_SI_CS_RX_CNT_MASK,
.d_SI_CS_TX_CNT_LSB = AR6320_SI_CS_TX_CNT_LSB,
.d_SI_CS_TX_CNT_MASK = AR6320_SI_CS_TX_CNT_MASK,
.d_BOARD_DATA_SZ = AR6320_BOARD_DATA_SZ,
.d_BOARD_EXT_DATA_SZ = AR6320_BOARD_EXT_DATA_SZ,
.d_MBOX_BASE_ADDRESS = AR6320_MBOX_BASE_ADDRESS,
.d_LOCAL_SCRATCH_OFFSET = AR6320_LOCAL_SCRATCH_OFFSET,
.d_CPU_CLOCK_OFFSET = AR6320_CPU_CLOCK_OFFSET,
.d_LPO_CAL_OFFSET = AR6320_LPO_CAL_OFFSET,
.d_GPIO_PIN10_OFFSET = AR6320_GPIO_PIN10_OFFSET,
.d_GPIO_PIN11_OFFSET = AR6320_GPIO_PIN11_OFFSET,
.d_GPIO_PIN12_OFFSET = AR6320_GPIO_PIN12_OFFSET,
.d_GPIO_PIN13_OFFSET = AR6320_GPIO_PIN13_OFFSET,
.d_CLOCK_GPIO_OFFSET = AR6320_CLOCK_GPIO_OFFSET,
.d_CPU_CLOCK_STANDARD_LSB = AR6320_CPU_CLOCK_STANDARD_LSB,
.d_CPU_CLOCK_STANDARD_MASK = AR6320_CPU_CLOCK_STANDARD_MASK,
.d_LPO_CAL_ENABLE_LSB = AR6320_LPO_CAL_ENABLE_LSB,
.d_LPO_CAL_ENABLE_MASK = AR6320_LPO_CAL_ENABLE_MASK,
.d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB = AR6320_CLOCK_GPIO_BT_CLK_OUT_EN_LSB,
.d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK =
AR6320_CLOCK_GPIO_BT_CLK_OUT_EN_MASK,
.d_ANALOG_INTF_BASE_ADDRESS = AR6320_ANALOG_INTF_BASE_ADDRESS,
.d_WLAN_MAC_BASE_ADDRESS = AR6320_WLAN_MAC_BASE_ADDRESS,
.d_FW_INDICATOR_ADDRESS = AR6320_FW_INDICATOR_ADDRESS,
.d_DRAM_BASE_ADDRESS = AR6320_DRAM_BASE_ADDRESS,
.d_SOC_CORE_BASE_ADDRESS = AR6320_SOC_CORE_BASE_ADDRESS,
.d_CORE_CTRL_ADDRESS = AR6320_CORE_CTRL_ADDRESS,
.d_CE_COUNT = AR6320_CE_COUNT,
.d_MSI_NUM_REQUEST = MSI_NUM_REQUEST,
.d_MSI_ASSIGN_FW = MSI_ASSIGN_FW,
.d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL,
.d_PCIE_INTR_ENABLE_ADDRESS = AR6320_PCIE_INTR_ENABLE_ADDRESS,
.d_PCIE_INTR_CLR_ADDRESS = AR6320_PCIE_INTR_CLR_ADDRESS,
.d_PCIE_INTR_FIRMWARE_MASK = AR6320_PCIE_INTR_FIRMWARE_MASK,
.d_PCIE_INTR_CE_MASK_ALL = AR6320_PCIE_INTR_CE_MASK_ALL,
.d_CORE_CTRL_CPU_INTR_MASK = AR6320_CORE_CTRL_CPU_INTR_MASK,
.d_SR_WR_INDEX_ADDRESS = AR6320_SR_WR_INDEX_ADDRESS,
.d_DST_WATERMARK_ADDRESS = AR6320_DST_WATERMARK_ADDRESS,
/* htt_rx.c */
.d_RX_MSDU_END_4_FIRST_MSDU_MASK =
AR6320_RX_MSDU_END_4_FIRST_MSDU_MASK,
.d_RX_MSDU_END_4_FIRST_MSDU_LSB = AR6320_RX_MSDU_END_4_FIRST_MSDU_LSB,
.d_RX_MPDU_START_0_RETRY_LSB = AR6320_RX_MPDU_START_0_RETRY_LSB,
.d_RX_MPDU_START_0_RETRY_MASK = AR6320_RX_MPDU_START_0_RETRY_MASK,
.d_RX_MPDU_START_0_SEQ_NUM_MASK = AR6320_RX_MPDU_START_0_SEQ_NUM_MASK,
.d_RX_MPDU_START_0_SEQ_NUM_LSB = AR6320_RX_MPDU_START_0_SEQ_NUM_LSB,
.d_RX_MPDU_START_2_PN_47_32_LSB = AR6320_RX_MPDU_START_2_PN_47_32_LSB,
.d_RX_MPDU_START_2_PN_47_32_MASK =
AR6320_RX_MPDU_START_2_PN_47_32_MASK,
.d_RX_MPDU_START_2_TID_LSB = AR6320_RX_MPDU_START_2_TID_LSB,
.d_RX_MPDU_START_2_TID_MASK = AR6320_RX_MPDU_START_2_TID_MASK,
.d_RX_MSDU_END_1_KEY_ID_OCT_MASK =
AR6320_RX_MSDU_END_1_KEY_ID_OCT_MASK,
.d_RX_MSDU_END_1_KEY_ID_OCT_LSB = AR6320_RX_MSDU_END_1_KEY_ID_OCT_LSB,
.d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK =
AR6320_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK,
.d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB =
AR6320_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB,
.d_RX_MSDU_END_4_LAST_MSDU_MASK = AR6320_RX_MSDU_END_4_LAST_MSDU_MASK,
.d_RX_MSDU_END_4_LAST_MSDU_LSB = AR6320_RX_MSDU_END_4_LAST_MSDU_LSB,
.d_RX_ATTENTION_0_MCAST_BCAST_MASK =
AR6320_RX_ATTENTION_0_MCAST_BCAST_MASK,
.d_RX_ATTENTION_0_MCAST_BCAST_LSB =
AR6320_RX_ATTENTION_0_MCAST_BCAST_LSB,
.d_RX_ATTENTION_0_FRAGMENT_MASK = AR6320_RX_ATTENTION_0_FRAGMENT_MASK,
.d_RX_ATTENTION_0_FRAGMENT_LSB = AR6320_RX_ATTENTION_0_FRAGMENT_LSB,
.d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK =
AR6320_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK,
.d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK =
AR6320_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK,
.d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB =
AR6320_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB,
.d_RX_MSDU_START_0_MSDU_LENGTH_MASK =
AR6320_RX_MSDU_START_0_MSDU_LENGTH_MASK,
.d_RX_MSDU_START_0_MSDU_LENGTH_LSB =
AR6320_RX_MSDU_START_0_MSDU_LENGTH_LSB,
.d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET =
AR6320_RX_MSDU_START_2_DECAP_FORMAT_OFFSET,
.d_RX_MSDU_START_2_DECAP_FORMAT_MASK =
AR6320_RX_MSDU_START_2_DECAP_FORMAT_MASK,
.d_RX_MSDU_START_2_DECAP_FORMAT_LSB =
AR6320_RX_MSDU_START_2_DECAP_FORMAT_LSB,
.d_RX_MPDU_START_0_ENCRYPTED_MASK =
AR6320_RX_MPDU_START_0_ENCRYPTED_MASK,
.d_RX_MPDU_START_0_ENCRYPTED_LSB =
AR6320_RX_MPDU_START_0_ENCRYPTED_LSB,
.d_RX_ATTENTION_0_MORE_DATA_MASK =
AR6320_RX_ATTENTION_0_MORE_DATA_MASK,
.d_RX_ATTENTION_0_MSDU_DONE_MASK =
AR6320_RX_ATTENTION_0_MSDU_DONE_MASK,
.d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK =
AR6320_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK,
/* PLL start */
.d_EFUSE_OFFSET = AR6320_EFUSE_OFFSET,
.d_EFUSE_XTAL_SEL_MSB = AR6320_EFUSE_XTAL_SEL_MSB,
.d_EFUSE_XTAL_SEL_LSB = AR6320_EFUSE_XTAL_SEL_LSB,
.d_EFUSE_XTAL_SEL_MASK = AR6320_EFUSE_XTAL_SEL_MASK,
.d_BB_PLL_CONFIG_OFFSET = AR6320_BB_PLL_CONFIG_OFFSET,
.d_BB_PLL_CONFIG_OUTDIV_MSB = AR6320_BB_PLL_CONFIG_OUTDIV_MSB,
.d_BB_PLL_CONFIG_OUTDIV_LSB = AR6320_BB_PLL_CONFIG_OUTDIV_LSB,
.d_BB_PLL_CONFIG_OUTDIV_MASK = AR6320_BB_PLL_CONFIG_OUTDIV_MASK,
.d_BB_PLL_CONFIG_FRAC_MSB = AR6320_BB_PLL_CONFIG_FRAC_MSB,
.d_BB_PLL_CONFIG_FRAC_LSB = AR6320_BB_PLL_CONFIG_FRAC_LSB,
.d_BB_PLL_CONFIG_FRAC_MASK = AR6320_BB_PLL_CONFIG_FRAC_MASK,
.d_WLAN_PLL_SETTLE_TIME_MSB = AR6320_WLAN_PLL_SETTLE_TIME_MSB,
.d_WLAN_PLL_SETTLE_TIME_LSB = AR6320_WLAN_PLL_SETTLE_TIME_LSB,
.d_WLAN_PLL_SETTLE_TIME_MASK = AR6320_WLAN_PLL_SETTLE_TIME_MASK,
.d_WLAN_PLL_SETTLE_OFFSET = AR6320_WLAN_PLL_SETTLE_OFFSET,
.d_WLAN_PLL_SETTLE_SW_MASK = AR6320_WLAN_PLL_SETTLE_SW_MASK,
.d_WLAN_PLL_SETTLE_RSTMASK = AR6320_WLAN_PLL_SETTLE_RSTMASK,
.d_WLAN_PLL_SETTLE_RESET = AR6320_WLAN_PLL_SETTLE_RESET,
.d_WLAN_PLL_CONTROL_NOPWD_MSB = AR6320_WLAN_PLL_CONTROL_NOPWD_MSB,
.d_WLAN_PLL_CONTROL_NOPWD_LSB = AR6320_WLAN_PLL_CONTROL_NOPWD_LSB,
.d_WLAN_PLL_CONTROL_NOPWD_MASK = AR6320_WLAN_PLL_CONTROL_NOPWD_MASK,
.d_WLAN_PLL_CONTROL_BYPASS_MSB = AR6320_WLAN_PLL_CONTROL_BYPASS_MSB,
.d_WLAN_PLL_CONTROL_BYPASS_LSB = AR6320_WLAN_PLL_CONTROL_BYPASS_LSB,
.d_WLAN_PLL_CONTROL_BYPASS_MASK = AR6320_WLAN_PLL_CONTROL_BYPASS_MASK,
.d_WLAN_PLL_CONTROL_BYPASS_RESET =
AR6320_WLAN_PLL_CONTROL_BYPASS_RESET,
.d_WLAN_PLL_CONTROL_CLK_SEL_MSB = AR6320_WLAN_PLL_CONTROL_CLK_SEL_MSB,
.d_WLAN_PLL_CONTROL_CLK_SEL_LSB = AR6320_WLAN_PLL_CONTROL_CLK_SEL_LSB,
.d_WLAN_PLL_CONTROL_CLK_SEL_MASK =
AR6320_WLAN_PLL_CONTROL_CLK_SEL_MASK,
.d_WLAN_PLL_CONTROL_CLK_SEL_RESET =
AR6320_WLAN_PLL_CONTROL_CLK_SEL_RESET,
.d_WLAN_PLL_CONTROL_REFDIV_MSB = AR6320_WLAN_PLL_CONTROL_REFDIV_MSB,
.d_WLAN_PLL_CONTROL_REFDIV_LSB = AR6320_WLAN_PLL_CONTROL_REFDIV_LSB,
.d_WLAN_PLL_CONTROL_REFDIV_MASK = AR6320_WLAN_PLL_CONTROL_REFDIV_MASK,
.d_WLAN_PLL_CONTROL_REFDIV_RESET =
AR6320_WLAN_PLL_CONTROL_REFDIV_RESET,
.d_WLAN_PLL_CONTROL_DIV_MSB = AR6320_WLAN_PLL_CONTROL_DIV_MSB,
.d_WLAN_PLL_CONTROL_DIV_LSB = AR6320_WLAN_PLL_CONTROL_DIV_LSB,
.d_WLAN_PLL_CONTROL_DIV_MASK = AR6320_WLAN_PLL_CONTROL_DIV_MASK,
.d_WLAN_PLL_CONTROL_DIV_RESET = AR6320_WLAN_PLL_CONTROL_DIV_RESET,
.d_WLAN_PLL_CONTROL_OFFSET = AR6320_WLAN_PLL_CONTROL_OFFSET,
.d_WLAN_PLL_CONTROL_SW_MASK = AR6320_WLAN_PLL_CONTROL_SW_MASK,
.d_WLAN_PLL_CONTROL_RSTMASK = AR6320_WLAN_PLL_CONTROL_RSTMASK,
.d_WLAN_PLL_CONTROL_RESET = AR6320_WLAN_PLL_CONTROL_RESET,
.d_SOC_CORE_CLK_CTRL_OFFSET = AR6320_SOC_CORE_CLK_CTRL_OFFSET,
.d_SOC_CORE_CLK_CTRL_DIV_MSB = AR6320_SOC_CORE_CLK_CTRL_DIV_MSB,
.d_SOC_CORE_CLK_CTRL_DIV_LSB = AR6320_SOC_CORE_CLK_CTRL_DIV_LSB,
.d_SOC_CORE_CLK_CTRL_DIV_MASK = AR6320_SOC_CORE_CLK_CTRL_DIV_MASK,
.d_RTC_SYNC_STATUS_PLL_CHANGING_MSB =
AR6320_RTC_SYNC_STATUS_PLL_CHANGING_MSB,
.d_RTC_SYNC_STATUS_PLL_CHANGING_LSB =
AR6320_RTC_SYNC_STATUS_PLL_CHANGING_LSB,
.d_RTC_SYNC_STATUS_PLL_CHANGING_MASK =
AR6320_RTC_SYNC_STATUS_PLL_CHANGING_MASK,
.d_RTC_SYNC_STATUS_PLL_CHANGING_RESET =
AR6320_RTC_SYNC_STATUS_PLL_CHANGING_RESET,
.d_RTC_SYNC_STATUS_OFFSET = AR6320_RTC_SYNC_STATUS_OFFSET,
.d_SOC_CPU_CLOCK_OFFSET = AR6320_SOC_CPU_CLOCK_OFFSET,
.d_SOC_CPU_CLOCK_STANDARD_MSB = AR6320_SOC_CPU_CLOCK_STANDARD_MSB,
.d_SOC_CPU_CLOCK_STANDARD_LSB = AR6320_SOC_CPU_CLOCK_STANDARD_LSB,
.d_SOC_CPU_CLOCK_STANDARD_MASK = AR6320_SOC_CPU_CLOCK_STANDARD_MASK,
/* PLL end */
.d_SOC_POWER_REG_OFFSET = AR6320_SOC_POWER_REG_OFFSET,
.d_PCIE_INTR_CAUSE_ADDRESS = AR6320_PCIE_INTR_CAUSE_ADDRESS,
.d_SOC_RESET_CONTROL_ADDRESS = AR6320_SOC_RESET_CONTROL_ADDRESS,
.d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK =
AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK,
.d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB =
AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB,
.d_SOC_RESET_CONTROL_CE_RST_MASK =
AR6320_SOC_RESET_CONTROL_CE_RST_MASK,
.d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK =
AR6320_SOC_RESET_CONTROL_CPU_WARM_RST_MASK,
.d_CPU_INTR_ADDRESS = AR6320_CPU_INTR_ADDRESS,
.d_SOC_LF_TIMER_CONTROL0_ADDRESS =
AR6320_SOC_LF_TIMER_CONTROL0_ADDRESS,
.d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK =
AR6320_SOC_LF_TIMER_CONTROL0_ENABLE_MASK,
/* chip id start */
.d_SOC_CHIP_ID_ADDRESS = AR6320_SOC_CHIP_ID_ADDRESS,
.d_SOC_CHIP_ID_VERSION_MASK = AR6320_SOC_CHIP_ID_VERSION_MASK,
.d_SOC_CHIP_ID_VERSION_LSB = AR6320_SOC_CHIP_ID_VERSION_LSB,
.d_SOC_CHIP_ID_REVISION_MASK = AR6320_SOC_CHIP_ID_REVISION_MASK,
.d_SOC_CHIP_ID_REVISION_LSB = AR6320_SOC_CHIP_ID_REVISION_LSB,
/* chip id end */
.d_WLAN_DEBUG_INPUT_SEL_OFFSET = AR6320_WLAN_DEBUG_INPUT_SEL_OFFSET,
.d_WLAN_DEBUG_INPUT_SEL_SRC_MSB = AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MSB,
.d_WLAN_DEBUG_INPUT_SEL_SRC_LSB = AR6320_WLAN_DEBUG_INPUT_SEL_SRC_LSB,
.d_WLAN_DEBUG_INPUT_SEL_SRC_MASK =
AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MASK,
.d_WLAN_DEBUG_CONTROL_OFFSET = AR6320_WLAN_DEBUG_CONTROL_OFFSET,
.d_WLAN_DEBUG_CONTROL_ENABLE_MSB =
AR6320_WLAN_DEBUG_CONTROL_ENABLE_MSB,
.d_WLAN_DEBUG_CONTROL_ENABLE_LSB =
AR6320_WLAN_DEBUG_CONTROL_ENABLE_LSB,
.d_WLAN_DEBUG_CONTROL_ENABLE_MASK =
AR6320_WLAN_DEBUG_CONTROL_ENABLE_MASK,
.d_WLAN_DEBUG_OUT_OFFSET = AR6320_WLAN_DEBUG_OUT_OFFSET,
.d_WLAN_DEBUG_OUT_DATA_MSB = AR6320_WLAN_DEBUG_OUT_DATA_MSB,
.d_WLAN_DEBUG_OUT_DATA_LSB = AR6320_WLAN_DEBUG_OUT_DATA_LSB,
.d_WLAN_DEBUG_OUT_DATA_MASK = AR6320_WLAN_DEBUG_OUT_DATA_MASK,
.d_AMBA_DEBUG_BUS_OFFSET = AR6320_AMBA_DEBUG_BUS_OFFSET,
.d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB =
AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB,
.d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB =
AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB,
.d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK =
AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK,
.d_AMBA_DEBUG_BUS_SEL_MSB = AR6320_AMBA_DEBUG_BUS_SEL_MSB,
.d_AMBA_DEBUG_BUS_SEL_LSB = AR6320_AMBA_DEBUG_BUS_SEL_LSB,
.d_AMBA_DEBUG_BUS_SEL_MASK = AR6320_AMBA_DEBUG_BUS_SEL_MASK,
};
struct hostdef_s ar6320_hostdef = {
.d_INT_STATUS_ENABLE_ERROR_LSB = AR6320_INT_STATUS_ENABLE_ERROR_LSB,
.d_INT_STATUS_ENABLE_ERROR_MASK = AR6320_INT_STATUS_ENABLE_ERROR_MASK,
.d_INT_STATUS_ENABLE_CPU_LSB = AR6320_INT_STATUS_ENABLE_CPU_LSB,
.d_INT_STATUS_ENABLE_CPU_MASK = AR6320_INT_STATUS_ENABLE_CPU_MASK,
.d_INT_STATUS_ENABLE_COUNTER_LSB =
AR6320_INT_STATUS_ENABLE_COUNTER_LSB,
.d_INT_STATUS_ENABLE_COUNTER_MASK =
AR6320_INT_STATUS_ENABLE_COUNTER_MASK,
.d_INT_STATUS_ENABLE_MBOX_DATA_LSB =
AR6320_INT_STATUS_ENABLE_MBOX_DATA_LSB,
.d_INT_STATUS_ENABLE_MBOX_DATA_MASK =
AR6320_INT_STATUS_ENABLE_MBOX_DATA_MASK,
.d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB =
AR6320_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB,
.d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK =
AR6320_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK,
.d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB =
AR6320_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB,
.d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK =
AR6320_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK,
.d_COUNTER_INT_STATUS_ENABLE_BIT_LSB =
AR6320_COUNTER_INT_STATUS_ENABLE_BIT_LSB,
.d_COUNTER_INT_STATUS_ENABLE_BIT_MASK =
AR6320_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
.d_INT_STATUS_ENABLE_ADDRESS = AR6320_INT_STATUS_ENABLE_ADDRESS,
.d_CPU_INT_STATUS_ENABLE_BIT_LSB =
AR6320_CPU_INT_STATUS_ENABLE_BIT_LSB,
.d_CPU_INT_STATUS_ENABLE_BIT_MASK =
AR6320_CPU_INT_STATUS_ENABLE_BIT_MASK,
.d_HOST_INT_STATUS_ADDRESS = AR6320_HOST_INT_STATUS_ADDRESS,
.d_CPU_INT_STATUS_ADDRESS = AR6320_CPU_INT_STATUS_ADDRESS,
.d_ERROR_INT_STATUS_ADDRESS = AR6320_ERROR_INT_STATUS_ADDRESS,
.d_ERROR_INT_STATUS_WAKEUP_MASK = AR6320_ERROR_INT_STATUS_WAKEUP_MASK,
.d_ERROR_INT_STATUS_WAKEUP_LSB = AR6320_ERROR_INT_STATUS_WAKEUP_LSB,
.d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK =
AR6320_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
.d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB =
AR6320_ERROR_INT_STATUS_RX_UNDERFLOW_LSB,
.d_ERROR_INT_STATUS_TX_OVERFLOW_MASK =
AR6320_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
.d_ERROR_INT_STATUS_TX_OVERFLOW_LSB =
AR6320_ERROR_INT_STATUS_TX_OVERFLOW_LSB,
.d_COUNT_DEC_ADDRESS = AR6320_COUNT_DEC_ADDRESS,
.d_HOST_INT_STATUS_CPU_MASK = AR6320_HOST_INT_STATUS_CPU_MASK,
.d_HOST_INT_STATUS_CPU_LSB = AR6320_HOST_INT_STATUS_CPU_LSB,
.d_HOST_INT_STATUS_ERROR_MASK = AR6320_HOST_INT_STATUS_ERROR_MASK,
.d_HOST_INT_STATUS_ERROR_LSB = AR6320_HOST_INT_STATUS_ERROR_LSB,
.d_HOST_INT_STATUS_COUNTER_MASK = AR6320_HOST_INT_STATUS_COUNTER_MASK,
.d_HOST_INT_STATUS_COUNTER_LSB = AR6320_HOST_INT_STATUS_COUNTER_LSB,
.d_RX_LOOKAHEAD_VALID_ADDRESS = AR6320_RX_LOOKAHEAD_VALID_ADDRESS,
.d_WINDOW_DATA_ADDRESS = AR6320_WINDOW_DATA_ADDRESS,
.d_WINDOW_READ_ADDR_ADDRESS = AR6320_WINDOW_READ_ADDR_ADDRESS,
.d_WINDOW_WRITE_ADDR_ADDRESS = AR6320_WINDOW_WRITE_ADDR_ADDRESS,
.d_SOC_GLOBAL_RESET_ADDRESS = AR6320_SOC_GLOBAL_RESET_ADDRESS,
.d_RTC_STATE_ADDRESS = AR6320_RTC_STATE_ADDRESS,
.d_RTC_STATE_COLD_RESET_MASK = AR6320_RTC_STATE_COLD_RESET_MASK,
.d_PCIE_LOCAL_BASE_ADDRESS = AR6320_PCIE_LOCAL_BASE_ADDRESS,
.d_PCIE_SOC_WAKE_RESET = AR6320_PCIE_SOC_WAKE_RESET,
.d_PCIE_SOC_WAKE_ADDRESS = AR6320_PCIE_SOC_WAKE_ADDRESS,
.d_PCIE_SOC_WAKE_V_MASK = AR6320_PCIE_SOC_WAKE_V_MASK,
.d_RTC_STATE_V_MASK = AR6320_RTC_STATE_V_MASK,
.d_RTC_STATE_V_LSB = AR6320_RTC_STATE_V_LSB,
.d_FW_IND_EVENT_PENDING = AR6320_FW_IND_EVENT_PENDING,
.d_FW_IND_INITIALIZED = AR6320_FW_IND_INITIALIZED,
.d_FW_IND_HELPER = AR6320_FW_IND_HELPER,
.d_RTC_STATE_V_ON = AR6320_RTC_STATE_V_ON,
.d_MUX_ID_MASK = AR6320_MUX_ID_MASK,
.d_TRANSACTION_ID_MASK = AR6320_TRANSACTION_ID_MASK,
#if defined(SDIO_3_0)
.d_HOST_INT_STATUS_MBOX_DATA_MASK =
AR6320_HOST_INT_STATUS_MBOX_DATA_MASK,
.d_HOST_INT_STATUS_MBOX_DATA_LSB =
AR6320_HOST_INT_STATUS_MBOX_DATA_LSB,
#endif
.d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS,
.d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK,
.d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS,
.d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS,
.d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS,
.d_HOST_CE_COUNT = 8,
.d_ENABLE_MSI = 0,
};
struct ce_reg_def ar6320_ce_targetdef = {
/* copy_engine.c */
.d_DST_WR_INDEX_ADDRESS = AR6320_DST_WR_INDEX_ADDRESS,
.d_SRC_WATERMARK_ADDRESS = AR6320_SRC_WATERMARK_ADDRESS,
.d_SRC_WATERMARK_LOW_MASK = AR6320_SRC_WATERMARK_LOW_MASK,
.d_SRC_WATERMARK_HIGH_MASK = AR6320_SRC_WATERMARK_HIGH_MASK,
.d_DST_WATERMARK_LOW_MASK = AR6320_DST_WATERMARK_LOW_MASK,
.d_DST_WATERMARK_HIGH_MASK = AR6320_DST_WATERMARK_HIGH_MASK,
.d_CURRENT_SRRI_ADDRESS = AR6320_CURRENT_SRRI_ADDRESS,
.d_CURRENT_DRRI_ADDRESS = AR6320_CURRENT_DRRI_ADDRESS,
.d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK =
AR6320_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK,
.d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK =
AR6320_HOST_IS_SRC_RING_LOW_WATERMARK_MASK,
.d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK =
AR6320_HOST_IS_DST_RING_HIGH_WATERMARK_MASK,
.d_HOST_IS_DST_RING_LOW_WATERMARK_MASK =
AR6320_HOST_IS_DST_RING_LOW_WATERMARK_MASK,
.d_HOST_IS_ADDRESS = AR6320_HOST_IS_ADDRESS,
.d_HOST_IS_COPY_COMPLETE_MASK = AR6320_HOST_IS_COPY_COMPLETE_MASK,
.d_CE_WRAPPER_BASE_ADDRESS = AR6320_CE_WRAPPER_BASE_ADDRESS,
.d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS =
AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS,
.d_HOST_IE_ADDRESS = AR6320_HOST_IE_ADDRESS,
.d_HOST_IE_COPY_COMPLETE_MASK = AR6320_HOST_IE_COPY_COMPLETE_MASK,
.d_SR_BA_ADDRESS = AR6320_SR_BA_ADDRESS,
.d_SR_SIZE_ADDRESS = AR6320_SR_SIZE_ADDRESS,
.d_CE_CTRL1_ADDRESS = AR6320_CE_CTRL1_ADDRESS,
.d_CE_CTRL1_DMAX_LENGTH_MASK = AR6320_CE_CTRL1_DMAX_LENGTH_MASK,
.d_DR_BA_ADDRESS = AR6320_DR_BA_ADDRESS,
.d_DR_SIZE_ADDRESS = AR6320_DR_SIZE_ADDRESS,
.d_MISC_IE_ADDRESS = AR6320_MISC_IE_ADDRESS,
.d_MISC_IS_AXI_ERR_MASK = AR6320_MISC_IS_AXI_ERR_MASK,
.d_MISC_IS_DST_ADDR_ERR_MASK = AR6320_MISC_IS_DST_ADDR_ERR_MASK,
.d_MISC_IS_SRC_LEN_ERR_MASK = AR6320_MISC_IS_SRC_LEN_ERR_MASK,
.d_MISC_IS_DST_MAX_LEN_VIO_MASK = AR6320_MISC_IS_DST_MAX_LEN_VIO_MASK,
.d_MISC_IS_DST_RING_OVERFLOW_MASK =
AR6320_MISC_IS_DST_RING_OVERFLOW_MASK,
.d_MISC_IS_SRC_RING_OVERFLOW_MASK =
AR6320_MISC_IS_SRC_RING_OVERFLOW_MASK,
.d_SRC_WATERMARK_LOW_LSB = AR6320_SRC_WATERMARK_LOW_LSB,
.d_SRC_WATERMARK_HIGH_LSB = AR6320_SRC_WATERMARK_HIGH_LSB,
.d_DST_WATERMARK_LOW_LSB = AR6320_DST_WATERMARK_LOW_LSB,
.d_DST_WATERMARK_HIGH_LSB = AR6320_DST_WATERMARK_HIGH_LSB,
.d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK =
AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK,
.d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB =
AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB,
.d_CE_CTRL1_DMAX_LENGTH_LSB = AR6320_CE_CTRL1_DMAX_LENGTH_LSB,
.d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK =
AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK,
.d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK =
AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK,
.d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB =
AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB,
.d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB =
AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB,
.d_CE_WRAPPER_DEBUG_OFFSET = AR6320_CE_WRAPPER_DEBUG_OFFSET,
.d_CE_WRAPPER_DEBUG_SEL_MSB = AR6320_CE_WRAPPER_DEBUG_SEL_MSB,
.d_CE_WRAPPER_DEBUG_SEL_LSB = AR6320_CE_WRAPPER_DEBUG_SEL_LSB,
.d_CE_WRAPPER_DEBUG_SEL_MASK = AR6320_CE_WRAPPER_DEBUG_SEL_MASK,
.d_CE_DEBUG_OFFSET = AR6320_CE_DEBUG_OFFSET,
.d_CE_DEBUG_SEL_MSB = AR6320_CE_DEBUG_SEL_MSB,
.d_CE_DEBUG_SEL_LSB = AR6320_CE_DEBUG_SEL_LSB,
.d_CE_DEBUG_SEL_MASK = AR6320_CE_DEBUG_SEL_MASK,
.d_CE0_BASE_ADDRESS = AR6320_CE0_BASE_ADDRESS,
.d_CE1_BASE_ADDRESS = AR6320_CE1_BASE_ADDRESS,
};
#endif

View File

@ -1,815 +0,0 @@
/*
* Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _AR6320V2DEF_H_
#define _AR6320V2DEF_H_
/* Base Addresses */
#define AR6320V2_RTC_SOC_BASE_ADDRESS 0x00000800
#define AR6320V2_RTC_WMAC_BASE_ADDRESS 0x00001000
#define AR6320V2_MAC_COEX_BASE_ADDRESS 0x0000f000
#define AR6320V2_BT_COEX_BASE_ADDRESS 0x00002000
#define AR6320V2_SOC_PCIE_BASE_ADDRESS 0x00038000
#define AR6320V2_SOC_CORE_BASE_ADDRESS 0x0003a000
#define AR6320V2_WLAN_UART_BASE_ADDRESS 0x0000c000
#define AR6320V2_WLAN_SI_BASE_ADDRESS 0x00010000
#define AR6320V2_WLAN_GPIO_BASE_ADDRESS 0x00005000
#define AR6320V2_WLAN_ANALOG_INTF_BASE_ADDRESS 0x00006000
#define AR6320V2_WLAN_MAC_BASE_ADDRESS 0x00010000
#define AR6320V2_EFUSE_BASE_ADDRESS 0x00024000
#define AR6320V2_FPGA_REG_BASE_ADDRESS 0x00039000
#define AR6320V2_WLAN_UART2_BASE_ADDRESS 0x00054c00
#define AR6320V2_CE_WRAPPER_BASE_ADDRESS 0x00034000
#define AR6320V2_CE0_BASE_ADDRESS 0x00034400
#define AR6320V2_CE1_BASE_ADDRESS 0x00034800
#define AR6320V2_CE2_BASE_ADDRESS 0x00034c00
#define AR6320V2_CE3_BASE_ADDRESS 0x00035000
#define AR6320V2_CE4_BASE_ADDRESS 0x00035400
#define AR6320V2_CE5_BASE_ADDRESS 0x00035800
#define AR6320V2_CE6_BASE_ADDRESS 0x00035c00
#define AR6320V2_CE7_BASE_ADDRESS 0x00036000
#define AR6320V2_DBI_BASE_ADDRESS 0x0003c000
#define AR6320V2_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x00007800
#define AR6320V2_SCRATCH_3_ADDRESS 0x0028
#define AR6320V2_TARG_DRAM_START 0x00400000
#define AR6320V2_SOC_SYSTEM_SLEEP_OFFSET 0x000000c0
#define AR6320V2_SOC_RESET_CONTROL_OFFSET 0x00000000
#define AR6320V2_SOC_CLOCK_CONTROL_OFFSET 0x00000028
#define AR6320V2_SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
#define AR6320V2_SOC_RESET_CONTROL_SI0_RST_MASK 0x00000000
#define AR6320V2_WLAN_GPIO_PIN0_ADDRESS 0x00000068
#define AR6320V2_WLAN_GPIO_PIN1_ADDRESS 0x0000006c
#define AR6320V2_WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
#define AR6320V2_WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
#define AR6320V2_SOC_CPU_CLOCK_OFFSET 0x00000020
#define AR6320V2_SOC_LPO_CAL_OFFSET 0x000000e0
#define AR6320V2_WLAN_GPIO_PIN10_ADDRESS 0x00000090
#define AR6320V2_WLAN_GPIO_PIN11_ADDRESS 0x00000094
#define AR6320V2_WLAN_GPIO_PIN12_ADDRESS 0x00000098
#define AR6320V2_WLAN_GPIO_PIN13_ADDRESS 0x0000009c
#define AR6320V2_SOC_CPU_CLOCK_STANDARD_LSB 0
#define AR6320V2_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
#define AR6320V2_SOC_LPO_CAL_ENABLE_LSB 20
#define AR6320V2_SOC_LPO_CAL_ENABLE_MASK 0x00100000
#define AR6320V2_WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
#define AR6320V2_WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
#define AR6320V2_WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
#define AR6320V2_WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
#define AR6320V2_SI_CONFIG_BIDIR_OD_DATA_LSB 18
#define AR6320V2_SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
#define AR6320V2_SI_CONFIG_I2C_LSB 16
#define AR6320V2_SI_CONFIG_I2C_MASK 0x00010000
#define AR6320V2_SI_CONFIG_POS_SAMPLE_LSB 7
#define AR6320V2_SI_CONFIG_POS_SAMPLE_MASK 0x00000080
#define AR6320V2_SI_CONFIG_INACTIVE_CLK_LSB 4
#define AR6320V2_SI_CONFIG_INACTIVE_CLK_MASK 0x00000010
#define AR6320V2_SI_CONFIG_INACTIVE_DATA_LSB 5
#define AR6320V2_SI_CONFIG_INACTIVE_DATA_MASK 0x00000020
#define AR6320V2_SI_CONFIG_DIVIDER_LSB 0
#define AR6320V2_SI_CONFIG_DIVIDER_MASK 0x0000000f
#define AR6320V2_SI_CONFIG_OFFSET 0x00000000
#define AR6320V2_SI_TX_DATA0_OFFSET 0x00000008
#define AR6320V2_SI_TX_DATA1_OFFSET 0x0000000c
#define AR6320V2_SI_RX_DATA0_OFFSET 0x00000010
#define AR6320V2_SI_RX_DATA1_OFFSET 0x00000014
#define AR6320V2_SI_CS_OFFSET 0x00000004
#define AR6320V2_SI_CS_DONE_ERR_MASK 0x00000400
#define AR6320V2_SI_CS_DONE_INT_MASK 0x00000200
#define AR6320V2_SI_CS_START_LSB 8
#define AR6320V2_SI_CS_START_MASK 0x00000100
#define AR6320V2_SI_CS_RX_CNT_LSB 4
#define AR6320V2_SI_CS_RX_CNT_MASK 0x000000f0
#define AR6320V2_SI_CS_TX_CNT_LSB 0
#define AR6320V2_SI_CS_TX_CNT_MASK 0x0000000f
#define AR6320V2_CE_COUNT 8
#define AR6320V2_SR_WR_INDEX_ADDRESS 0x003c
#define AR6320V2_DST_WATERMARK_ADDRESS 0x0050
#define AR6320V2_RX_MSDU_END_4_FIRST_MSDU_LSB 14
#define AR6320V2_RX_MSDU_END_4_FIRST_MSDU_MASK 0x00004000
#define AR6320V2_RX_MPDU_START_0_RETRY_LSB 14
#define AR6320V2_RX_MPDU_START_0_RETRY_MASK 0x00004000
#define AR6320V2_RX_MPDU_START_0_SEQ_NUM_LSB 16
#define AR6320V2_RX_MPDU_START_0_SEQ_NUM_MASK 0x0fff0000
#define AR6320V2_RX_MPDU_START_2_PN_47_32_LSB 0
#define AR6320V2_RX_MPDU_START_2_PN_47_32_MASK 0x0000ffff
#define AR6320V2_RX_MPDU_START_2_TID_LSB 28
#define AR6320V2_RX_MPDU_START_2_TID_MASK 0xf0000000
#define AR6320V2_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB 16
#define AR6320V2_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK 0xffff0000
#define AR6320V2_RX_MSDU_END_4_LAST_MSDU_LSB 15
#define AR6320V2_RX_MSDU_END_4_LAST_MSDU_MASK 0x00008000
#define AR6320V2_RX_ATTENTION_0_MCAST_BCAST_LSB 2
#define AR6320V2_RX_ATTENTION_0_MCAST_BCAST_MASK 0x00000004
#define AR6320V2_RX_ATTENTION_0_FRAGMENT_LSB 13
#define AR6320V2_RX_ATTENTION_0_FRAGMENT_MASK 0x00002000
#define AR6320V2_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK 0x08000000
#define AR6320V2_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB 16
#define AR6320V2_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK 0x00ff0000
#define AR6320V2_RX_MSDU_START_0_MSDU_LENGTH_LSB 0
#define AR6320V2_RX_MSDU_START_0_MSDU_LENGTH_MASK 0x00003fff
#define AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_OFFSET 0x00000008
#define AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_LSB 8
#define AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_MASK 0x00000300
#define AR6320V2_RX_MPDU_START_0_ENCRYPTED_LSB 13
#define AR6320V2_RX_MPDU_START_0_ENCRYPTED_MASK 0x00002000
#define AR6320V2_RX_ATTENTION_0_MORE_DATA_MASK 0x00000400
#define AR6320V2_RX_ATTENTION_0_MSDU_DONE_MASK 0x80000000
#define AR6320V2_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK 0x00040000
#define AR6320V2_DST_WR_INDEX_ADDRESS 0x0040
#define AR6320V2_SRC_WATERMARK_ADDRESS 0x004c
#define AR6320V2_SRC_WATERMARK_LOW_MASK 0xffff0000
#define AR6320V2_SRC_WATERMARK_HIGH_MASK 0x0000ffff
#define AR6320V2_DST_WATERMARK_LOW_MASK 0xffff0000
#define AR6320V2_DST_WATERMARK_HIGH_MASK 0x0000ffff
#define AR6320V2_CURRENT_SRRI_ADDRESS 0x0044
#define AR6320V2_CURRENT_DRRI_ADDRESS 0x0048
#define AR6320V2_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
#define AR6320V2_HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
#define AR6320V2_HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
#define AR6320V2_HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
#define AR6320V2_HOST_IS_ADDRESS 0x0030
#define AR6320V2_HOST_IS_COPY_COMPLETE_MASK 0x00000001
#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
#define AR6320V2_HOST_IE_ADDRESS 0x002c
#define AR6320V2_HOST_IE_COPY_COMPLETE_MASK 0x00000001
#define AR6320V2_SR_BA_ADDRESS 0x0000
#define AR6320V2_SR_SIZE_ADDRESS 0x0004
#define AR6320V2_CE_CTRL1_ADDRESS 0x0010
#define AR6320V2_CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
#define AR6320V2_DR_BA_ADDRESS 0x0008
#define AR6320V2_DR_SIZE_ADDRESS 0x000c
#define AR6320V2_MISC_IE_ADDRESS 0x0034
#define AR6320V2_MISC_IS_AXI_ERR_MASK 0x00000400
#define AR6320V2_MISC_IS_DST_ADDR_ERR_MASK 0x00000200
#define AR6320V2_MISC_IS_SRC_LEN_ERR_MASK 0x00000100
#define AR6320V2_MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
#define AR6320V2_MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
#define AR6320V2_MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
#define AR6320V2_SRC_WATERMARK_LOW_LSB 16
#define AR6320V2_SRC_WATERMARK_HIGH_LSB 0
#define AR6320V2_DST_WATERMARK_LOW_LSB 16
#define AR6320V2_DST_WATERMARK_HIGH_LSB 0
#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
#define AR6320V2_CE_CTRL1_DMAX_LENGTH_LSB 0
#define AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
#define AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
#define AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
#define AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
#define AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000020
#define AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB 5
#define AR6320V2_SOC_GLOBAL_RESET_ADDRESS 0x0008
#define AR6320V2_RTC_STATE_ADDRESS 0x0000
#define AR6320V2_RTC_STATE_COLD_RESET_MASK 0x00002000
#define AR6320V2_PCIE_SOC_WAKE_RESET 0x00000000
#define AR6320V2_PCIE_SOC_WAKE_ADDRESS 0x0004
#define AR6320V2_PCIE_SOC_WAKE_V_MASK 0x00000001
#define AR6320V2_RTC_STATE_V_MASK 0x00000007
#define AR6320V2_RTC_STATE_V_LSB 0
#define AR6320V2_RTC_STATE_V_ON 3
#define AR6320V2_MUX_ID_MASK 0x0000
#define AR6320V2_TRANSACTION_ID_MASK 0x3fff
#define AR6320V2_PCIE_LOCAL_BASE_ADDRESS 0x80000
#define AR6320V2_FW_IND_EVENT_PENDING 1
#define AR6320V2_FW_IND_INITIALIZED 2
#define AR6320V2_FW_IND_HELPER 4
#define AR6320V2_PCIE_INTR_ENABLE_ADDRESS 0x0008
#define AR6320V2_PCIE_INTR_CLR_ADDRESS 0x0014
#define AR6320V2_PCIE_INTR_FIRMWARE_MASK 0x00000400
#define AR6320V2_PCIE_INTR_CE0_MASK 0x00000800
#define AR6320V2_PCIE_INTR_CE_MASK_ALL 0x0007f800
#define AR6320V2_PCIE_INTR_CAUSE_ADDRESS 0x000c
#define AR6320V2_CPU_INTR_ADDRESS 0x0010
#define AR6320V2_SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
#define AR6320V2_SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
#define AR6320V2_SOC_RESET_CONTROL_ADDRESS 0x00000000
#define AR6320V2_SOC_RESET_CONTROL_CE_RST_MASK 0x00000001
#define AR6320V2_SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
#define AR6320V2_CORE_CTRL_ADDRESS 0x0000
#define AR6320V2_CORE_CTRL_CPU_INTR_MASK 0x00002000
#define AR6320V2_LOCAL_SCRATCH_OFFSET 0x000000c0
#define AR6320V2_CLOCK_GPIO_OFFSET 0xffffffff
#define AR6320V2_CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0
#define AR6320V2_CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
#define AR6320V2_SOC_CHIP_ID_ADDRESS 0x000000f0
#define AR6320V2_SOC_CHIP_ID_VERSION_MASK 0xfffc0000
#define AR6320V2_SOC_CHIP_ID_VERSION_LSB 18
#define AR6320V2_SOC_CHIP_ID_REVISION_MASK 0x00000f00
#define AR6320V2_SOC_CHIP_ID_REVISION_LSB 8
#define AR6320V2_SOC_POWER_REG_OFFSET 0x0000010c
/* Copy Engine Debug */
#define AR6320V2_WLAN_DEBUG_INPUT_SEL_OFFSET 0x0000010c
#define AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MSB 3
#define AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_LSB 0
#define AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MASK 0x0000000f
#define AR6320V2_WLAN_DEBUG_CONTROL_OFFSET 0x00000108
#define AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_MSB 0
#define AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_LSB 0
#define AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_MASK 0x00000001
#define AR6320V2_WLAN_DEBUG_OUT_OFFSET 0x00000110
#define AR6320V2_WLAN_DEBUG_OUT_DATA_MSB 19
#define AR6320V2_WLAN_DEBUG_OUT_DATA_LSB 0
#define AR6320V2_WLAN_DEBUG_OUT_DATA_MASK 0x000fffff
#define AR6320V2_AMBA_DEBUG_BUS_OFFSET 0x0000011c
#define AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB 13
#define AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB 8
#define AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK 0x00003f00
#define AR6320V2_AMBA_DEBUG_BUS_SEL_MSB 4
#define AR6320V2_AMBA_DEBUG_BUS_SEL_LSB 0
#define AR6320V2_AMBA_DEBUG_BUS_SEL_MASK 0x0000001f
#define AR6320V2_CE_WRAPPER_DEBUG_OFFSET 0x0008
#define AR6320V2_CE_WRAPPER_DEBUG_SEL_MSB 5
#define AR6320V2_CE_WRAPPER_DEBUG_SEL_LSB 0
#define AR6320V2_CE_WRAPPER_DEBUG_SEL_MASK 0x0000003f
#define AR6320V2_CE_DEBUG_OFFSET 0x0054
#define AR6320V2_CE_DEBUG_SEL_MSB 5
#define AR6320V2_CE_DEBUG_SEL_LSB 0
#define AR6320V2_CE_DEBUG_SEL_MASK 0x0000003f
/* End */
/* PLL start */
#define AR6320V2_EFUSE_OFFSET 0x0000032c
#define AR6320V2_EFUSE_XTAL_SEL_MSB 10
#define AR6320V2_EFUSE_XTAL_SEL_LSB 8
#define AR6320V2_EFUSE_XTAL_SEL_MASK 0x00000700
#define AR6320V2_BB_PLL_CONFIG_OFFSET 0x000002f4
#define AR6320V2_BB_PLL_CONFIG_OUTDIV_MSB 20
#define AR6320V2_BB_PLL_CONFIG_OUTDIV_LSB 18
#define AR6320V2_BB_PLL_CONFIG_OUTDIV_MASK 0x001c0000
#define AR6320V2_BB_PLL_CONFIG_FRAC_MSB 17
#define AR6320V2_BB_PLL_CONFIG_FRAC_LSB 0
#define AR6320V2_BB_PLL_CONFIG_FRAC_MASK 0x0003ffff
#define AR6320V2_WLAN_PLL_SETTLE_TIME_MSB 10
#define AR6320V2_WLAN_PLL_SETTLE_TIME_LSB 0
#define AR6320V2_WLAN_PLL_SETTLE_TIME_MASK 0x000007ff
#define AR6320V2_WLAN_PLL_SETTLE_OFFSET 0x0018
#define AR6320V2_WLAN_PLL_SETTLE_SW_MASK 0x000007ff
#define AR6320V2_WLAN_PLL_SETTLE_RSTMASK 0xffffffff
#define AR6320V2_WLAN_PLL_SETTLE_RESET 0x00000400
#define AR6320V2_WLAN_PLL_CONTROL_NOPWD_MSB 18
#define AR6320V2_WLAN_PLL_CONTROL_NOPWD_LSB 18
#define AR6320V2_WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000
#define AR6320V2_WLAN_PLL_CONTROL_BYPASS_MSB 16
#define AR6320V2_WLAN_PLL_CONTROL_BYPASS_LSB 16
#define AR6320V2_WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000
#define AR6320V2_WLAN_PLL_CONTROL_BYPASS_RESET 0x1
#define AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_MSB 15
#define AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_LSB 14
#define AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_MASK 0x0000c000
#define AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_RESET 0x0
#define AR6320V2_WLAN_PLL_CONTROL_REFDIV_MSB 13
#define AR6320V2_WLAN_PLL_CONTROL_REFDIV_LSB 10
#define AR6320V2_WLAN_PLL_CONTROL_REFDIV_MASK 0x00003c00
#define AR6320V2_WLAN_PLL_CONTROL_REFDIV_RESET 0x0
#define AR6320V2_WLAN_PLL_CONTROL_DIV_MSB 9
#define AR6320V2_WLAN_PLL_CONTROL_DIV_LSB 0
#define AR6320V2_WLAN_PLL_CONTROL_DIV_MASK 0x000003ff
#define AR6320V2_WLAN_PLL_CONTROL_DIV_RESET 0x11
#define AR6320V2_WLAN_PLL_CONTROL_OFFSET 0x0014
#define AR6320V2_WLAN_PLL_CONTROL_SW_MASK 0x001fffff
#define AR6320V2_WLAN_PLL_CONTROL_RSTMASK 0xffffffff
#define AR6320V2_WLAN_PLL_CONTROL_RESET 0x00010011
#define AR6320V2_SOC_CORE_CLK_CTRL_OFFSET 0x00000114
#define AR6320V2_SOC_CORE_CLK_CTRL_DIV_MSB 2
#define AR6320V2_SOC_CORE_CLK_CTRL_DIV_LSB 0
#define AR6320V2_SOC_CORE_CLK_CTRL_DIV_MASK 0x00000007
#define AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_MSB 5
#define AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_LSB 5
#define AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020
#define AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_RESET 0x0
#define AR6320V2_RTC_SYNC_STATUS_OFFSET 0x0244
#define AR6320V2_SOC_CPU_CLOCK_OFFSET 0x00000020
#define AR6320V2_SOC_CPU_CLOCK_STANDARD_MSB 1
#define AR6320V2_SOC_CPU_CLOCK_STANDARD_LSB 0
#define AR6320V2_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
/* PLL end */
#define AR6320V2_PCIE_INTR_CE_MASK(n) \
(AR6320V2_PCIE_INTR_CE0_MASK << (n))
#define AR6320V2_DRAM_BASE_ADDRESS AR6320V2_TARG_DRAM_START
#define AR6320V2_FW_INDICATOR_ADDRESS \
(AR6320V2_SOC_CORE_BASE_ADDRESS + AR6320V2_SCRATCH_3_ADDRESS)
#define AR6320V2_SYSTEM_SLEEP_OFFSET AR6320V2_SOC_SYSTEM_SLEEP_OFFSET
#define AR6320V2_WLAN_SYSTEM_SLEEP_OFFSET 0x002c
#define AR6320V2_WLAN_RESET_CONTROL_OFFSET AR6320V2_SOC_RESET_CONTROL_OFFSET
#define AR6320V2_CLOCK_CONTROL_OFFSET AR6320V2_SOC_CLOCK_CONTROL_OFFSET
#define AR6320V2_CLOCK_CONTROL_SI0_CLK_MASK \
AR6320V2_SOC_CLOCK_CONTROL_SI0_CLK_MASK
#define AR6320V2_RESET_CONTROL_MBOX_RST_MASK 0x00000004
#define AR6320V2_RESET_CONTROL_SI0_RST_MASK \
AR6320V2_SOC_RESET_CONTROL_SI0_RST_MASK
#define AR6320V2_GPIO_BASE_ADDRESS AR6320V2_WLAN_GPIO_BASE_ADDRESS
#define AR6320V2_GPIO_PIN0_OFFSET AR6320V2_WLAN_GPIO_PIN0_ADDRESS
#define AR6320V2_GPIO_PIN1_OFFSET AR6320V2_WLAN_GPIO_PIN1_ADDRESS
#define AR6320V2_GPIO_PIN0_CONFIG_MASK AR6320V2_WLAN_GPIO_PIN0_CONFIG_MASK
#define AR6320V2_GPIO_PIN1_CONFIG_MASK AR6320V2_WLAN_GPIO_PIN1_CONFIG_MASK
#define AR6320V2_SI_BASE_ADDRESS 0x00050000
#define AR6320V2_CPU_CLOCK_OFFSET AR6320V2_SOC_CPU_CLOCK_OFFSET
#define AR6320V2_LPO_CAL_OFFSET AR6320V2_SOC_LPO_CAL_OFFSET
#define AR6320V2_GPIO_PIN10_OFFSET AR6320V2_WLAN_GPIO_PIN10_ADDRESS
#define AR6320V2_GPIO_PIN11_OFFSET AR6320V2_WLAN_GPIO_PIN11_ADDRESS
#define AR6320V2_GPIO_PIN12_OFFSET AR6320V2_WLAN_GPIO_PIN12_ADDRESS
#define AR6320V2_GPIO_PIN13_OFFSET AR6320V2_WLAN_GPIO_PIN13_ADDRESS
#define AR6320V2_CPU_CLOCK_STANDARD_LSB AR6320V2_SOC_CPU_CLOCK_STANDARD_LSB
#define AR6320V2_CPU_CLOCK_STANDARD_MASK AR6320V2_SOC_CPU_CLOCK_STANDARD_MASK
#define AR6320V2_LPO_CAL_ENABLE_LSB AR6320V2_SOC_LPO_CAL_ENABLE_LSB
#define AR6320V2_LPO_CAL_ENABLE_MASK AR6320V2_SOC_LPO_CAL_ENABLE_MASK
#define AR6320V2_ANALOG_INTF_BASE_ADDRESS \
AR6320V2_WLAN_ANALOG_INTF_BASE_ADDRESS
#define AR6320V2_MBOX_BASE_ADDRESS 0x00008000
#define AR6320V2_INT_STATUS_ENABLE_ERROR_LSB 7
#define AR6320V2_INT_STATUS_ENABLE_ERROR_MASK 0x00000080
#define AR6320V2_INT_STATUS_ENABLE_CPU_LSB 6
#define AR6320V2_INT_STATUS_ENABLE_CPU_MASK 0x00000040
#define AR6320V2_INT_STATUS_ENABLE_COUNTER_LSB 4
#define AR6320V2_INT_STATUS_ENABLE_COUNTER_MASK 0x00000010
#define AR6320V2_INT_STATUS_ENABLE_MBOX_DATA_LSB 0
#define AR6320V2_INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f
#define AR6320V2_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 17
#define AR6320V2_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00020000
#define AR6320V2_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 16
#define AR6320V2_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00010000
#define AR6320V2_COUNTER_INT_STATUS_ENABLE_BIT_LSB 24
#define AR6320V2_COUNTER_INT_STATUS_ENABLE_BIT_MASK 0xff000000
#define AR6320V2_INT_STATUS_ENABLE_ADDRESS 0x0828
#define AR6320V2_CPU_INT_STATUS_ENABLE_BIT_LSB 8
#define AR6320V2_CPU_INT_STATUS_ENABLE_BIT_MASK 0x0000ff00
#define AR6320V2_HOST_INT_STATUS_ADDRESS 0x0800
#define AR6320V2_CPU_INT_STATUS_ADDRESS 0x0801
#define AR6320V2_ERROR_INT_STATUS_ADDRESS 0x0802
#define AR6320V2_ERROR_INT_STATUS_WAKEUP_MASK 0x00040000
#define AR6320V2_ERROR_INT_STATUS_WAKEUP_LSB 18
#define AR6320V2_ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00020000
#define AR6320V2_ERROR_INT_STATUS_RX_UNDERFLOW_LSB 17
#define AR6320V2_ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00010000
#define AR6320V2_ERROR_INT_STATUS_TX_OVERFLOW_LSB 16
#define AR6320V2_COUNT_DEC_ADDRESS 0x0840
#define AR6320V2_HOST_INT_STATUS_CPU_MASK 0x00000040
#define AR6320V2_HOST_INT_STATUS_CPU_LSB 6
#define AR6320V2_HOST_INT_STATUS_ERROR_MASK 0x00000080
#define AR6320V2_HOST_INT_STATUS_ERROR_LSB 7
#define AR6320V2_HOST_INT_STATUS_COUNTER_MASK 0x00000010
#define AR6320V2_HOST_INT_STATUS_COUNTER_LSB 4
#define AR6320V2_RX_LOOKAHEAD_VALID_ADDRESS 0x0805
#define AR6320V2_WINDOW_DATA_ADDRESS 0x0874
#define AR6320V2_WINDOW_READ_ADDR_ADDRESS 0x087c
#define AR6320V2_WINDOW_WRITE_ADDR_ADDRESS 0x0878
struct targetdef_s ar6320v2_targetdef = {
.d_RTC_SOC_BASE_ADDRESS = AR6320V2_RTC_SOC_BASE_ADDRESS,
.d_RTC_WMAC_BASE_ADDRESS = AR6320V2_RTC_WMAC_BASE_ADDRESS,
.d_SYSTEM_SLEEP_OFFSET = AR6320V2_WLAN_SYSTEM_SLEEP_OFFSET,
.d_WLAN_SYSTEM_SLEEP_OFFSET = AR6320V2_WLAN_SYSTEM_SLEEP_OFFSET,
.d_WLAN_SYSTEM_SLEEP_DISABLE_LSB =
AR6320V2_WLAN_SYSTEM_SLEEP_DISABLE_LSB,
.d_WLAN_SYSTEM_SLEEP_DISABLE_MASK =
AR6320V2_WLAN_SYSTEM_SLEEP_DISABLE_MASK,
.d_CLOCK_CONTROL_OFFSET = AR6320V2_CLOCK_CONTROL_OFFSET,
.d_CLOCK_CONTROL_SI0_CLK_MASK = AR6320V2_CLOCK_CONTROL_SI0_CLK_MASK,
.d_RESET_CONTROL_OFFSET = AR6320V2_SOC_RESET_CONTROL_OFFSET,
.d_RESET_CONTROL_MBOX_RST_MASK = AR6320V2_RESET_CONTROL_MBOX_RST_MASK,
.d_RESET_CONTROL_SI0_RST_MASK = AR6320V2_RESET_CONTROL_SI0_RST_MASK,
.d_WLAN_RESET_CONTROL_OFFSET = AR6320V2_WLAN_RESET_CONTROL_OFFSET,
.d_WLAN_RESET_CONTROL_COLD_RST_MASK =
AR6320V2_WLAN_RESET_CONTROL_COLD_RST_MASK,
.d_WLAN_RESET_CONTROL_WARM_RST_MASK =
AR6320V2_WLAN_RESET_CONTROL_WARM_RST_MASK,
.d_GPIO_BASE_ADDRESS = AR6320V2_GPIO_BASE_ADDRESS,
.d_GPIO_PIN0_OFFSET = AR6320V2_GPIO_PIN0_OFFSET,
.d_GPIO_PIN1_OFFSET = AR6320V2_GPIO_PIN1_OFFSET,
.d_GPIO_PIN0_CONFIG_MASK = AR6320V2_GPIO_PIN0_CONFIG_MASK,
.d_GPIO_PIN1_CONFIG_MASK = AR6320V2_GPIO_PIN1_CONFIG_MASK,
.d_SI_CONFIG_BIDIR_OD_DATA_LSB = AR6320V2_SI_CONFIG_BIDIR_OD_DATA_LSB,
.d_SI_CONFIG_BIDIR_OD_DATA_MASK =
AR6320V2_SI_CONFIG_BIDIR_OD_DATA_MASK,
.d_SI_CONFIG_I2C_LSB = AR6320V2_SI_CONFIG_I2C_LSB,
.d_SI_CONFIG_I2C_MASK = AR6320V2_SI_CONFIG_I2C_MASK,
.d_SI_CONFIG_POS_SAMPLE_LSB = AR6320V2_SI_CONFIG_POS_SAMPLE_LSB,
.d_SI_CONFIG_POS_SAMPLE_MASK = AR6320V2_SI_CONFIG_POS_SAMPLE_MASK,
.d_SI_CONFIG_INACTIVE_CLK_LSB = AR6320V2_SI_CONFIG_INACTIVE_CLK_LSB,
.d_SI_CONFIG_INACTIVE_CLK_MASK = AR6320V2_SI_CONFIG_INACTIVE_CLK_MASK,
.d_SI_CONFIG_INACTIVE_DATA_LSB = AR6320V2_SI_CONFIG_INACTIVE_DATA_LSB,
.d_SI_CONFIG_INACTIVE_DATA_MASK =
AR6320V2_SI_CONFIG_INACTIVE_DATA_MASK,
.d_SI_CONFIG_DIVIDER_LSB = AR6320V2_SI_CONFIG_DIVIDER_LSB,
.d_SI_CONFIG_DIVIDER_MASK = AR6320V2_SI_CONFIG_DIVIDER_MASK,
.d_SI_BASE_ADDRESS = AR6320V2_SI_BASE_ADDRESS,
.d_SI_CONFIG_OFFSET = AR6320V2_SI_CONFIG_OFFSET,
.d_SI_TX_DATA0_OFFSET = AR6320V2_SI_TX_DATA0_OFFSET,
.d_SI_TX_DATA1_OFFSET = AR6320V2_SI_TX_DATA1_OFFSET,
.d_SI_RX_DATA0_OFFSET = AR6320V2_SI_RX_DATA0_OFFSET,
.d_SI_RX_DATA1_OFFSET = AR6320V2_SI_RX_DATA1_OFFSET,
.d_SI_CS_OFFSET = AR6320V2_SI_CS_OFFSET,
.d_SI_CS_DONE_ERR_MASK = AR6320V2_SI_CS_DONE_ERR_MASK,
.d_SI_CS_DONE_INT_MASK = AR6320V2_SI_CS_DONE_INT_MASK,
.d_SI_CS_START_LSB = AR6320V2_SI_CS_START_LSB,
.d_SI_CS_START_MASK = AR6320V2_SI_CS_START_MASK,
.d_SI_CS_RX_CNT_LSB = AR6320V2_SI_CS_RX_CNT_LSB,
.d_SI_CS_RX_CNT_MASK = AR6320V2_SI_CS_RX_CNT_MASK,
.d_SI_CS_TX_CNT_LSB = AR6320V2_SI_CS_TX_CNT_LSB,
.d_SI_CS_TX_CNT_MASK = AR6320V2_SI_CS_TX_CNT_MASK,
.d_BOARD_DATA_SZ = AR6320_BOARD_DATA_SZ,
.d_BOARD_EXT_DATA_SZ = AR6320_BOARD_EXT_DATA_SZ,
.d_MBOX_BASE_ADDRESS = AR6320V2_MBOX_BASE_ADDRESS,
.d_LOCAL_SCRATCH_OFFSET = AR6320V2_LOCAL_SCRATCH_OFFSET,
.d_CPU_CLOCK_OFFSET = AR6320V2_CPU_CLOCK_OFFSET,
.d_LPO_CAL_OFFSET = AR6320V2_LPO_CAL_OFFSET,
.d_GPIO_PIN10_OFFSET = AR6320V2_GPIO_PIN10_OFFSET,
.d_GPIO_PIN11_OFFSET = AR6320V2_GPIO_PIN11_OFFSET,
.d_GPIO_PIN12_OFFSET = AR6320V2_GPIO_PIN12_OFFSET,
.d_GPIO_PIN13_OFFSET = AR6320V2_GPIO_PIN13_OFFSET,
.d_CLOCK_GPIO_OFFSET = AR6320V2_CLOCK_GPIO_OFFSET,
.d_CPU_CLOCK_STANDARD_LSB = AR6320V2_CPU_CLOCK_STANDARD_LSB,
.d_CPU_CLOCK_STANDARD_MASK = AR6320V2_CPU_CLOCK_STANDARD_MASK,
.d_LPO_CAL_ENABLE_LSB = AR6320V2_LPO_CAL_ENABLE_LSB,
.d_LPO_CAL_ENABLE_MASK = AR6320V2_LPO_CAL_ENABLE_MASK,
.d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB =
AR6320V2_CLOCK_GPIO_BT_CLK_OUT_EN_LSB,
.d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK =
AR6320V2_CLOCK_GPIO_BT_CLK_OUT_EN_MASK,
.d_ANALOG_INTF_BASE_ADDRESS = AR6320V2_ANALOG_INTF_BASE_ADDRESS,
.d_WLAN_MAC_BASE_ADDRESS = AR6320V2_WLAN_MAC_BASE_ADDRESS,
.d_FW_INDICATOR_ADDRESS = AR6320V2_FW_INDICATOR_ADDRESS,
.d_DRAM_BASE_ADDRESS = AR6320V2_DRAM_BASE_ADDRESS,
.d_SOC_CORE_BASE_ADDRESS = AR6320V2_SOC_CORE_BASE_ADDRESS,
.d_CORE_CTRL_ADDRESS = AR6320V2_CORE_CTRL_ADDRESS,
.d_CE_COUNT = AR6320V2_CE_COUNT,
.d_MSI_NUM_REQUEST = MSI_NUM_REQUEST,
.d_MSI_ASSIGN_FW = MSI_ASSIGN_FW,
.d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL,
.d_PCIE_INTR_ENABLE_ADDRESS = AR6320V2_PCIE_INTR_ENABLE_ADDRESS,
.d_PCIE_INTR_CLR_ADDRESS = AR6320V2_PCIE_INTR_CLR_ADDRESS,
.d_PCIE_INTR_FIRMWARE_MASK = AR6320V2_PCIE_INTR_FIRMWARE_MASK,
.d_PCIE_INTR_CE_MASK_ALL = AR6320V2_PCIE_INTR_CE_MASK_ALL,
.d_CORE_CTRL_CPU_INTR_MASK = AR6320V2_CORE_CTRL_CPU_INTR_MASK,
.d_SR_WR_INDEX_ADDRESS = AR6320V2_SR_WR_INDEX_ADDRESS,
.d_DST_WATERMARK_ADDRESS = AR6320V2_DST_WATERMARK_ADDRESS,
/* htt_rx.c */
.d_RX_MSDU_END_4_FIRST_MSDU_MASK =
AR6320V2_RX_MSDU_END_4_FIRST_MSDU_MASK,
.d_RX_MSDU_END_4_FIRST_MSDU_LSB =
AR6320V2_RX_MSDU_END_4_FIRST_MSDU_LSB,
.d_RX_MPDU_START_0_RETRY_MASK =
AR6320V2_RX_MPDU_START_0_RETRY_MASK,
.d_RX_MPDU_START_0_SEQ_NUM_MASK =
AR6320V2_RX_MPDU_START_0_SEQ_NUM_MASK,
.d_RX_MPDU_START_0_SEQ_NUM_MASK =
AR6320V2_RX_MPDU_START_0_SEQ_NUM_MASK,
.d_RX_MPDU_START_0_SEQ_NUM_LSB = AR6320V2_RX_MPDU_START_0_SEQ_NUM_LSB,
.d_RX_MPDU_START_2_PN_47_32_LSB =
AR6320V2_RX_MPDU_START_2_PN_47_32_LSB,
.d_RX_MPDU_START_2_PN_47_32_MASK =
AR6320V2_RX_MPDU_START_2_PN_47_32_MASK,
.d_RX_MPDU_START_2_TID_LSB =
AR6320V2_RX_MPDU_START_2_TID_LSB,
.d_RX_MPDU_START_2_TID_MASK =
AR6320V2_RX_MPDU_START_2_TID_MASK,
.d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK =
AR6320V2_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK,
.d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB =
AR6320V2_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB,
.d_RX_MSDU_END_4_LAST_MSDU_MASK =
AR6320V2_RX_MSDU_END_4_LAST_MSDU_MASK,
.d_RX_MSDU_END_4_LAST_MSDU_LSB = AR6320V2_RX_MSDU_END_4_LAST_MSDU_LSB,
.d_RX_ATTENTION_0_MCAST_BCAST_MASK =
AR6320V2_RX_ATTENTION_0_MCAST_BCAST_MASK,
.d_RX_ATTENTION_0_MCAST_BCAST_LSB =
AR6320V2_RX_ATTENTION_0_MCAST_BCAST_LSB,
.d_RX_ATTENTION_0_FRAGMENT_MASK =
AR6320V2_RX_ATTENTION_0_FRAGMENT_MASK,
.d_RX_ATTENTION_0_FRAGMENT_LSB = AR6320V2_RX_ATTENTION_0_FRAGMENT_LSB,
.d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK =
AR6320V2_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK,
.d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK =
AR6320V2_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK,
.d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB =
AR6320V2_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB,
.d_RX_MSDU_START_0_MSDU_LENGTH_MASK =
AR6320V2_RX_MSDU_START_0_MSDU_LENGTH_MASK,
.d_RX_MSDU_START_0_MSDU_LENGTH_LSB =
AR6320V2_RX_MSDU_START_0_MSDU_LENGTH_LSB,
.d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET =
AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_OFFSET,
.d_RX_MSDU_START_2_DECAP_FORMAT_MASK =
AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_MASK,
.d_RX_MSDU_START_2_DECAP_FORMAT_LSB =
AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_LSB,
.d_RX_MPDU_START_0_ENCRYPTED_MASK =
AR6320V2_RX_MPDU_START_0_ENCRYPTED_MASK,
.d_RX_MPDU_START_0_ENCRYPTED_LSB =
AR6320V2_RX_MPDU_START_0_ENCRYPTED_LSB,
.d_RX_ATTENTION_0_MORE_DATA_MASK =
AR6320V2_RX_ATTENTION_0_MORE_DATA_MASK,
.d_RX_ATTENTION_0_MSDU_DONE_MASK =
AR6320V2_RX_ATTENTION_0_MSDU_DONE_MASK,
.d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK =
AR6320V2_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK,
/* PLL start */
.d_EFUSE_OFFSET = AR6320V2_EFUSE_OFFSET,
.d_EFUSE_XTAL_SEL_MSB = AR6320V2_EFUSE_XTAL_SEL_MSB,
.d_EFUSE_XTAL_SEL_LSB = AR6320V2_EFUSE_XTAL_SEL_LSB,
.d_EFUSE_XTAL_SEL_MASK = AR6320V2_EFUSE_XTAL_SEL_MASK,
.d_BB_PLL_CONFIG_OFFSET = AR6320V2_BB_PLL_CONFIG_OFFSET,
.d_BB_PLL_CONFIG_OUTDIV_MSB = AR6320V2_BB_PLL_CONFIG_OUTDIV_MSB,
.d_BB_PLL_CONFIG_OUTDIV_LSB = AR6320V2_BB_PLL_CONFIG_OUTDIV_LSB,
.d_BB_PLL_CONFIG_OUTDIV_MASK = AR6320V2_BB_PLL_CONFIG_OUTDIV_MASK,
.d_BB_PLL_CONFIG_FRAC_MSB = AR6320V2_BB_PLL_CONFIG_FRAC_MSB,
.d_BB_PLL_CONFIG_FRAC_LSB = AR6320V2_BB_PLL_CONFIG_FRAC_LSB,
.d_BB_PLL_CONFIG_FRAC_MASK = AR6320V2_BB_PLL_CONFIG_FRAC_MASK,
.d_WLAN_PLL_SETTLE_TIME_MSB = AR6320V2_WLAN_PLL_SETTLE_TIME_MSB,
.d_WLAN_PLL_SETTLE_TIME_LSB = AR6320V2_WLAN_PLL_SETTLE_TIME_LSB,
.d_WLAN_PLL_SETTLE_TIME_MASK = AR6320V2_WLAN_PLL_SETTLE_TIME_MASK,
.d_WLAN_PLL_SETTLE_OFFSET = AR6320V2_WLAN_PLL_SETTLE_OFFSET,
.d_WLAN_PLL_SETTLE_SW_MASK = AR6320V2_WLAN_PLL_SETTLE_SW_MASK,
.d_WLAN_PLL_SETTLE_RSTMASK = AR6320V2_WLAN_PLL_SETTLE_RSTMASK,
.d_WLAN_PLL_SETTLE_RESET = AR6320V2_WLAN_PLL_SETTLE_RESET,
.d_WLAN_PLL_CONTROL_NOPWD_MSB = AR6320V2_WLAN_PLL_CONTROL_NOPWD_MSB,
.d_WLAN_PLL_CONTROL_NOPWD_LSB = AR6320V2_WLAN_PLL_CONTROL_NOPWD_LSB,
.d_WLAN_PLL_CONTROL_NOPWD_MASK = AR6320V2_WLAN_PLL_CONTROL_NOPWD_MASK,
.d_WLAN_PLL_CONTROL_BYPASS_MSB = AR6320V2_WLAN_PLL_CONTROL_BYPASS_MSB,
.d_WLAN_PLL_CONTROL_BYPASS_LSB = AR6320V2_WLAN_PLL_CONTROL_BYPASS_LSB,
.d_WLAN_PLL_CONTROL_BYPASS_MASK =
AR6320V2_WLAN_PLL_CONTROL_BYPASS_MASK,
.d_WLAN_PLL_CONTROL_BYPASS_RESET =
AR6320V2_WLAN_PLL_CONTROL_BYPASS_RESET,
.d_WLAN_PLL_CONTROL_CLK_SEL_MSB =
AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_MSB,
.d_WLAN_PLL_CONTROL_CLK_SEL_LSB =
AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_LSB,
.d_WLAN_PLL_CONTROL_CLK_SEL_MASK =
AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_MASK,
.d_WLAN_PLL_CONTROL_CLK_SEL_RESET =
AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_RESET,
.d_WLAN_PLL_CONTROL_REFDIV_MSB = AR6320V2_WLAN_PLL_CONTROL_REFDIV_MSB,
.d_WLAN_PLL_CONTROL_REFDIV_LSB = AR6320V2_WLAN_PLL_CONTROL_REFDIV_LSB,
.d_WLAN_PLL_CONTROL_REFDIV_MASK =
AR6320V2_WLAN_PLL_CONTROL_REFDIV_MASK,
.d_WLAN_PLL_CONTROL_REFDIV_RESET =
AR6320V2_WLAN_PLL_CONTROL_REFDIV_RESET,
.d_WLAN_PLL_CONTROL_DIV_MSB = AR6320V2_WLAN_PLL_CONTROL_DIV_MSB,
.d_WLAN_PLL_CONTROL_DIV_LSB = AR6320V2_WLAN_PLL_CONTROL_DIV_LSB,
.d_WLAN_PLL_CONTROL_DIV_MASK = AR6320V2_WLAN_PLL_CONTROL_DIV_MASK,
.d_WLAN_PLL_CONTROL_DIV_RESET = AR6320V2_WLAN_PLL_CONTROL_DIV_RESET,
.d_WLAN_PLL_CONTROL_OFFSET = AR6320V2_WLAN_PLL_CONTROL_OFFSET,
.d_WLAN_PLL_CONTROL_SW_MASK = AR6320V2_WLAN_PLL_CONTROL_SW_MASK,
.d_WLAN_PLL_CONTROL_RSTMASK = AR6320V2_WLAN_PLL_CONTROL_RSTMASK,
.d_WLAN_PLL_CONTROL_RESET = AR6320V2_WLAN_PLL_CONTROL_RESET,
.d_SOC_CORE_CLK_CTRL_OFFSET = AR6320V2_SOC_CORE_CLK_CTRL_OFFSET,
.d_SOC_CORE_CLK_CTRL_DIV_MSB = AR6320V2_SOC_CORE_CLK_CTRL_DIV_MSB,
.d_SOC_CORE_CLK_CTRL_DIV_LSB = AR6320V2_SOC_CORE_CLK_CTRL_DIV_LSB,
.d_SOC_CORE_CLK_CTRL_DIV_MASK = AR6320V2_SOC_CORE_CLK_CTRL_DIV_MASK,
.d_RTC_SYNC_STATUS_PLL_CHANGING_MSB =
AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_MSB,
.d_RTC_SYNC_STATUS_PLL_CHANGING_LSB =
AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_LSB,
.d_RTC_SYNC_STATUS_PLL_CHANGING_MASK =
AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_MASK,
.d_RTC_SYNC_STATUS_PLL_CHANGING_RESET =
AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_RESET,
.d_RTC_SYNC_STATUS_OFFSET = AR6320V2_RTC_SYNC_STATUS_OFFSET,
.d_SOC_CPU_CLOCK_OFFSET = AR6320V2_SOC_CPU_CLOCK_OFFSET,
.d_SOC_CPU_CLOCK_STANDARD_MSB = AR6320V2_SOC_CPU_CLOCK_STANDARD_MSB,
.d_SOC_CPU_CLOCK_STANDARD_LSB = AR6320V2_SOC_CPU_CLOCK_STANDARD_LSB,
.d_SOC_CPU_CLOCK_STANDARD_MASK = AR6320V2_SOC_CPU_CLOCK_STANDARD_MASK,
/* PLL end */
.d_SOC_POWER_REG_OFFSET = AR6320V2_SOC_POWER_REG_OFFSET,
.d_PCIE_INTR_CAUSE_ADDRESS = AR6320V2_PCIE_INTR_CAUSE_ADDRESS,
.d_SOC_RESET_CONTROL_ADDRESS = AR6320V2_SOC_RESET_CONTROL_ADDRESS,
.d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK =
AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK,
.d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB =
AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB,
.d_SOC_RESET_CONTROL_CE_RST_MASK =
AR6320V2_SOC_RESET_CONTROL_CE_RST_MASK,
.d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK =
AR6320V2_SOC_RESET_CONTROL_CPU_WARM_RST_MASK,
.d_CPU_INTR_ADDRESS = AR6320V2_CPU_INTR_ADDRESS,
.d_SOC_LF_TIMER_CONTROL0_ADDRESS =
AR6320V2_SOC_LF_TIMER_CONTROL0_ADDRESS,
.d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK =
AR6320V2_SOC_LF_TIMER_CONTROL0_ENABLE_MASK,
/* chip id start */
.d_SOC_CHIP_ID_ADDRESS = AR6320V2_SOC_CHIP_ID_ADDRESS,
.d_SOC_CHIP_ID_VERSION_MASK = AR6320V2_SOC_CHIP_ID_VERSION_MASK,
.d_SOC_CHIP_ID_VERSION_LSB = AR6320V2_SOC_CHIP_ID_VERSION_LSB,
.d_SOC_CHIP_ID_REVISION_MASK = AR6320V2_SOC_CHIP_ID_REVISION_MASK,
.d_SOC_CHIP_ID_REVISION_LSB = AR6320V2_SOC_CHIP_ID_REVISION_LSB,
/* chip id end */
.d_WLAN_DEBUG_INPUT_SEL_OFFSET = AR6320V2_WLAN_DEBUG_INPUT_SEL_OFFSET,
.d_WLAN_DEBUG_INPUT_SEL_SRC_MSB =
AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MSB,
.d_WLAN_DEBUG_INPUT_SEL_SRC_LSB =
AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_LSB,
.d_WLAN_DEBUG_INPUT_SEL_SRC_MASK =
AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MASK,
.d_WLAN_DEBUG_CONTROL_OFFSET = AR6320V2_WLAN_DEBUG_CONTROL_OFFSET,
.d_WLAN_DEBUG_CONTROL_ENABLE_MSB =
AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_MSB,
.d_WLAN_DEBUG_CONTROL_ENABLE_LSB =
AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_LSB,
.d_WLAN_DEBUG_CONTROL_ENABLE_MASK =
AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_MASK,
.d_WLAN_DEBUG_OUT_OFFSET = AR6320V2_WLAN_DEBUG_OUT_OFFSET,
.d_WLAN_DEBUG_OUT_DATA_MSB = AR6320V2_WLAN_DEBUG_OUT_DATA_MSB,
.d_WLAN_DEBUG_OUT_DATA_LSB = AR6320V2_WLAN_DEBUG_OUT_DATA_LSB,
.d_WLAN_DEBUG_OUT_DATA_MASK = AR6320V2_WLAN_DEBUG_OUT_DATA_MASK,
.d_AMBA_DEBUG_BUS_OFFSET = AR6320V2_AMBA_DEBUG_BUS_OFFSET,
.d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB =
AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB,
.d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB =
AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB,
.d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK =
AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK,
.d_AMBA_DEBUG_BUS_SEL_MSB = AR6320V2_AMBA_DEBUG_BUS_SEL_MSB,
.d_AMBA_DEBUG_BUS_SEL_LSB = AR6320V2_AMBA_DEBUG_BUS_SEL_LSB,
.d_AMBA_DEBUG_BUS_SEL_MASK = AR6320V2_AMBA_DEBUG_BUS_SEL_MASK,
};
struct hostdef_s ar6320v2_hostdef = {
.d_INT_STATUS_ENABLE_ERROR_LSB = AR6320V2_INT_STATUS_ENABLE_ERROR_LSB,
.d_INT_STATUS_ENABLE_ERROR_MASK =
AR6320V2_INT_STATUS_ENABLE_ERROR_MASK,
.d_INT_STATUS_ENABLE_CPU_LSB = AR6320V2_INT_STATUS_ENABLE_CPU_LSB,
.d_INT_STATUS_ENABLE_CPU_MASK = AR6320V2_INT_STATUS_ENABLE_CPU_MASK,
.d_INT_STATUS_ENABLE_COUNTER_LSB =
AR6320V2_INT_STATUS_ENABLE_COUNTER_LSB,
.d_INT_STATUS_ENABLE_COUNTER_MASK =
AR6320V2_INT_STATUS_ENABLE_COUNTER_MASK,
.d_INT_STATUS_ENABLE_MBOX_DATA_LSB =
AR6320V2_INT_STATUS_ENABLE_MBOX_DATA_LSB,
.d_INT_STATUS_ENABLE_MBOX_DATA_MASK =
AR6320V2_INT_STATUS_ENABLE_MBOX_DATA_MASK,
.d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB =
AR6320V2_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB,
.d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK =
AR6320V2_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK,
.d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB =
AR6320V2_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB,
.d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK =
AR6320V2_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK,
.d_COUNTER_INT_STATUS_ENABLE_BIT_LSB =
AR6320V2_COUNTER_INT_STATUS_ENABLE_BIT_LSB,
.d_COUNTER_INT_STATUS_ENABLE_BIT_MASK =
AR6320V2_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
.d_INT_STATUS_ENABLE_ADDRESS = AR6320V2_INT_STATUS_ENABLE_ADDRESS,
.d_CPU_INT_STATUS_ENABLE_BIT_LSB =
AR6320V2_CPU_INT_STATUS_ENABLE_BIT_LSB,
.d_CPU_INT_STATUS_ENABLE_BIT_MASK =
AR6320V2_CPU_INT_STATUS_ENABLE_BIT_MASK,
.d_HOST_INT_STATUS_ADDRESS = AR6320V2_HOST_INT_STATUS_ADDRESS,
.d_CPU_INT_STATUS_ADDRESS = AR6320V2_CPU_INT_STATUS_ADDRESS,
.d_ERROR_INT_STATUS_ADDRESS = AR6320V2_ERROR_INT_STATUS_ADDRESS,
.d_ERROR_INT_STATUS_WAKEUP_MASK =
AR6320V2_ERROR_INT_STATUS_WAKEUP_MASK,
.d_ERROR_INT_STATUS_WAKEUP_LSB = AR6320V2_ERROR_INT_STATUS_WAKEUP_LSB,
.d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK =
AR6320V2_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
.d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB =
AR6320V2_ERROR_INT_STATUS_RX_UNDERFLOW_LSB,
.d_ERROR_INT_STATUS_TX_OVERFLOW_MASK =
AR6320V2_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
.d_ERROR_INT_STATUS_TX_OVERFLOW_LSB =
AR6320V2_ERROR_INT_STATUS_TX_OVERFLOW_LSB,
.d_COUNT_DEC_ADDRESS = AR6320V2_COUNT_DEC_ADDRESS,
.d_HOST_INT_STATUS_CPU_MASK = AR6320V2_HOST_INT_STATUS_CPU_MASK,
.d_HOST_INT_STATUS_CPU_LSB = AR6320V2_HOST_INT_STATUS_CPU_LSB,
.d_HOST_INT_STATUS_ERROR_MASK = AR6320V2_HOST_INT_STATUS_ERROR_MASK,
.d_HOST_INT_STATUS_ERROR_LSB = AR6320V2_HOST_INT_STATUS_ERROR_LSB,
.d_HOST_INT_STATUS_COUNTER_MASK =
AR6320V2_HOST_INT_STATUS_COUNTER_MASK,
.d_HOST_INT_STATUS_COUNTER_LSB = AR6320V2_HOST_INT_STATUS_COUNTER_LSB,
.d_RX_LOOKAHEAD_VALID_ADDRESS = AR6320V2_RX_LOOKAHEAD_VALID_ADDRESS,
.d_WINDOW_DATA_ADDRESS = AR6320V2_WINDOW_DATA_ADDRESS,
.d_WINDOW_READ_ADDR_ADDRESS = AR6320V2_WINDOW_READ_ADDR_ADDRESS,
.d_WINDOW_WRITE_ADDR_ADDRESS = AR6320V2_WINDOW_WRITE_ADDR_ADDRESS,
.d_SOC_GLOBAL_RESET_ADDRESS = AR6320V2_SOC_GLOBAL_RESET_ADDRESS,
.d_RTC_STATE_ADDRESS = AR6320V2_RTC_STATE_ADDRESS,
.d_RTC_STATE_COLD_RESET_MASK = AR6320V2_RTC_STATE_COLD_RESET_MASK,
.d_PCIE_LOCAL_BASE_ADDRESS = AR6320V2_PCIE_LOCAL_BASE_ADDRESS,
.d_PCIE_SOC_WAKE_RESET = AR6320V2_PCIE_SOC_WAKE_RESET,
.d_PCIE_SOC_WAKE_ADDRESS = AR6320V2_PCIE_SOC_WAKE_ADDRESS,
.d_PCIE_SOC_WAKE_V_MASK = AR6320V2_PCIE_SOC_WAKE_V_MASK,
.d_RTC_STATE_V_MASK = AR6320V2_RTC_STATE_V_MASK,
.d_RTC_STATE_V_LSB = AR6320V2_RTC_STATE_V_LSB,
.d_FW_IND_EVENT_PENDING = AR6320V2_FW_IND_EVENT_PENDING,
.d_FW_IND_INITIALIZED = AR6320V2_FW_IND_INITIALIZED,
.d_FW_IND_HELPER = AR6320V2_FW_IND_HELPER,
.d_RTC_STATE_V_ON = AR6320V2_RTC_STATE_V_ON,
.d_MUX_ID_MASK = AR6320V2_MUX_ID_MASK,
.d_TRANSACTION_ID_MASK = AR6320V2_TRANSACTION_ID_MASK,
#if defined(SDIO_3_0)
.d_HOST_INT_STATUS_MBOX_DATA_MASK =
AR6320V2_HOST_INT_STATUS_MBOX_DATA_MASK,
.d_HOST_INT_STATUS_MBOX_DATA_LSB =
AR6320V2_HOST_INT_STATUS_MBOX_DATA_LSB,
#endif
.d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS,
.d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK,
.d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS,
.d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS,
.d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS,
.d_HOST_CE_COUNT = 8,
.d_ENABLE_MSI = 0,
};
struct ce_reg_def ar6320v2_ce_targetdef = {
/* copy_engine.c */
.d_DST_WR_INDEX_ADDRESS = AR6320V2_DST_WR_INDEX_ADDRESS,
.d_SRC_WATERMARK_ADDRESS = AR6320V2_SRC_WATERMARK_ADDRESS,
.d_SRC_WATERMARK_LOW_MASK = AR6320V2_SRC_WATERMARK_LOW_MASK,
.d_SRC_WATERMARK_HIGH_MASK = AR6320V2_SRC_WATERMARK_HIGH_MASK,
.d_DST_WATERMARK_LOW_MASK = AR6320V2_DST_WATERMARK_LOW_MASK,
.d_DST_WATERMARK_HIGH_MASK = AR6320V2_DST_WATERMARK_HIGH_MASK,
.d_CURRENT_SRRI_ADDRESS = AR6320V2_CURRENT_SRRI_ADDRESS,
.d_CURRENT_DRRI_ADDRESS = AR6320V2_CURRENT_DRRI_ADDRESS,
.d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK =
AR6320V2_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK,
.d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK =
AR6320V2_HOST_IS_SRC_RING_LOW_WATERMARK_MASK,
.d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK =
AR6320V2_HOST_IS_DST_RING_HIGH_WATERMARK_MASK,
.d_HOST_IS_DST_RING_LOW_WATERMARK_MASK =
AR6320V2_HOST_IS_DST_RING_LOW_WATERMARK_MASK,
.d_HOST_IS_ADDRESS = AR6320V2_HOST_IS_ADDRESS,
.d_HOST_IS_COPY_COMPLETE_MASK = AR6320V2_HOST_IS_COPY_COMPLETE_MASK,
.d_CE_WRAPPER_BASE_ADDRESS = AR6320V2_CE_WRAPPER_BASE_ADDRESS,
.d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS =
AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS,
.d_HOST_IE_ADDRESS = AR6320V2_HOST_IE_ADDRESS,
.d_HOST_IE_COPY_COMPLETE_MASK = AR6320V2_HOST_IE_COPY_COMPLETE_MASK,
.d_SR_BA_ADDRESS = AR6320V2_SR_BA_ADDRESS,
.d_SR_SIZE_ADDRESS = AR6320V2_SR_SIZE_ADDRESS,
.d_CE_CTRL1_ADDRESS = AR6320V2_CE_CTRL1_ADDRESS,
.d_CE_CTRL1_DMAX_LENGTH_MASK = AR6320V2_CE_CTRL1_DMAX_LENGTH_MASK,
.d_DR_BA_ADDRESS = AR6320V2_DR_BA_ADDRESS,
.d_DR_SIZE_ADDRESS = AR6320V2_DR_SIZE_ADDRESS,
.d_MISC_IE_ADDRESS = AR6320V2_MISC_IE_ADDRESS,
.d_MISC_IS_AXI_ERR_MASK = AR6320V2_MISC_IS_AXI_ERR_MASK,
.d_MISC_IS_DST_ADDR_ERR_MASK = AR6320V2_MISC_IS_DST_ADDR_ERR_MASK,
.d_MISC_IS_SRC_LEN_ERR_MASK = AR6320V2_MISC_IS_SRC_LEN_ERR_MASK,
.d_MISC_IS_DST_MAX_LEN_VIO_MASK =
AR6320V2_MISC_IS_DST_MAX_LEN_VIO_MASK,
.d_MISC_IS_DST_RING_OVERFLOW_MASK =
AR6320V2_MISC_IS_DST_RING_OVERFLOW_MASK,
.d_MISC_IS_SRC_RING_OVERFLOW_MASK =
AR6320V2_MISC_IS_SRC_RING_OVERFLOW_MASK,
.d_SRC_WATERMARK_LOW_LSB = AR6320V2_SRC_WATERMARK_LOW_LSB,
.d_SRC_WATERMARK_HIGH_LSB = AR6320V2_SRC_WATERMARK_HIGH_LSB,
.d_DST_WATERMARK_LOW_LSB = AR6320V2_DST_WATERMARK_LOW_LSB,
.d_DST_WATERMARK_HIGH_LSB = AR6320V2_DST_WATERMARK_HIGH_LSB,
.d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK =
AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK,
.d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB =
AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB,
.d_CE_CTRL1_DMAX_LENGTH_LSB = AR6320V2_CE_CTRL1_DMAX_LENGTH_LSB,
.d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK =
AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK,
.d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK =
AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK,
.d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB =
AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB,
.d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB =
AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB,
.d_CE_WRAPPER_DEBUG_OFFSET = AR6320V2_CE_WRAPPER_DEBUG_OFFSET,
.d_CE_WRAPPER_DEBUG_SEL_MSB = AR6320V2_CE_WRAPPER_DEBUG_SEL_MSB,
.d_CE_WRAPPER_DEBUG_SEL_LSB = AR6320V2_CE_WRAPPER_DEBUG_SEL_LSB,
.d_CE_WRAPPER_DEBUG_SEL_MASK = AR6320V2_CE_WRAPPER_DEBUG_SEL_MASK,
.d_CE_DEBUG_OFFSET = AR6320V2_CE_DEBUG_OFFSET,
.d_CE_DEBUG_SEL_MSB = AR6320V2_CE_DEBUG_SEL_MSB,
.d_CE_DEBUG_SEL_LSB = AR6320V2_CE_DEBUG_SEL_LSB,
.d_CE_DEBUG_SEL_MASK = AR6320V2_CE_DEBUG_SEL_MASK,
.d_CE0_BASE_ADDRESS = AR6320V2_CE0_BASE_ADDRESS,
.d_CE1_BASE_ADDRESS = AR6320V2_CE1_BASE_ADDRESS,
};
#endif

View File

@ -1,590 +0,0 @@
/*
* Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _AR9888DEF_H_
#define AR9888__AR9888DEF_H_
/* Base Addresses */
#define AR9888_RTC_SOC_BASE_ADDRESS 0x00004000
#define AR9888_RTC_WMAC_BASE_ADDRESS 0x00005000
#define AR9888_MAC_COEX_BASE_ADDRESS 0x00006000
#define AR9888_BT_COEX_BASE_ADDRESS 0x00007000
#define AR9888_SOC_PCIE_BASE_ADDRESS 0x00008000
#define AR9888_SOC_CORE_BASE_ADDRESS 0x00009000
#define AR9888_WLAN_UART_BASE_ADDRESS 0x0000c000
#define AR9888_WLAN_SI_BASE_ADDRESS 0x00010000
#define AR9888_WLAN_GPIO_BASE_ADDRESS 0x00014000
#define AR9888_WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000
#define AR9888_WLAN_MAC_BASE_ADDRESS 0x00020000
#define AR9888_EFUSE_BASE_ADDRESS 0x00030000
#define AR9888_FPGA_REG_BASE_ADDRESS 0x00039000
#define AR9888_WLAN_UART2_BASE_ADDRESS 0x00054c00
#define AR9888_CE_WRAPPER_BASE_ADDRESS 0x00057000
#define AR9888_CE0_BASE_ADDRESS 0x00057400
#define AR9888_CE1_BASE_ADDRESS 0x00057800
#define AR9888_CE2_BASE_ADDRESS 0x00057c00
#define AR9888_CE3_BASE_ADDRESS 0x00058000
#define AR9888_CE4_BASE_ADDRESS 0x00058400
#define AR9888_CE5_BASE_ADDRESS 0x00058800
#define AR9888_CE6_BASE_ADDRESS 0x00058c00
#define AR9888_CE7_BASE_ADDRESS 0x00059000
#define AR9888_DBI_BASE_ADDRESS 0x00060000
#define AR9888_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
#define AR9888_SCRATCH_3_ADDRESS 0x0030
#define AR9888_TARG_DRAM_START 0x00400000
#define AR9888_SOC_SYSTEM_SLEEP_OFFSET 0x000000c4
#define AR9888_SOC_RESET_CONTROL_OFFSET 0x00000000
#define AR9888_SOC_CLOCK_CONTROL_OFFSET 0x00000028
#define AR9888_SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
#define AR9888_SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001
#define AR9888_WLAN_GPIO_BASE_ADDRESS 0x00014000
#define AR9888_WLAN_GPIO_PIN0_ADDRESS 0x00000028
#define AR9888_WLAN_GPIO_PIN1_ADDRESS 0x0000002c
#define AR9888_WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
#define AR9888_WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
#define AR9888_WLAN_SI_BASE_ADDRESS 0x00010000
#define AR9888_SOC_CPU_CLOCK_OFFSET 0x00000020
#define AR9888_SOC_LPO_CAL_OFFSET 0x000000e0
#define AR9888_WLAN_GPIO_PIN10_ADDRESS 0x00000050
#define AR9888_WLAN_GPIO_PIN11_ADDRESS 0x00000054
#define AR9888_WLAN_GPIO_PIN12_ADDRESS 0x00000058
#define AR9888_WLAN_GPIO_PIN13_ADDRESS 0x0000005c
#define AR9888_SOC_CPU_CLOCK_STANDARD_LSB 0
#define AR9888_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
#define AR9888_SOC_LPO_CAL_ENABLE_LSB 20
#define AR9888_SOC_LPO_CAL_ENABLE_MASK 0x00100000
#define AR9888_WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000
#define AR9888_WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
#define AR9888_WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
#define AR9888_WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
#define AR9888_WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
#define AR9888_SI_CONFIG_BIDIR_OD_DATA_LSB 18
#define AR9888_SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
#define AR9888_SI_CONFIG_I2C_LSB 16
#define AR9888_SI_CONFIG_I2C_MASK 0x00010000
#define AR9888_SI_CONFIG_POS_SAMPLE_LSB 7
#define AR9888_SI_CONFIG_POS_SAMPLE_MASK 0x00000080
#define AR9888_SI_CONFIG_INACTIVE_CLK_LSB 4
#define AR9888_SI_CONFIG_INACTIVE_CLK_MASK 0x00000010
#define AR9888_SI_CONFIG_INACTIVE_DATA_LSB 5
#define AR9888_SI_CONFIG_INACTIVE_DATA_MASK 0x00000020
#define AR9888_SI_CONFIG_DIVIDER_LSB 0
#define AR9888_SI_CONFIG_DIVIDER_MASK 0x0000000f
#define AR9888_SI_CONFIG_OFFSET 0x00000000
#define AR9888_SI_TX_DATA0_OFFSET 0x00000008
#define AR9888_SI_TX_DATA1_OFFSET 0x0000000c
#define AR9888_SI_RX_DATA0_OFFSET 0x00000010
#define AR9888_SI_RX_DATA1_OFFSET 0x00000014
#define AR9888_SI_CS_OFFSET 0x00000004
#define AR9888_SI_CS_DONE_ERR_MASK 0x00000400
#define AR9888_SI_CS_DONE_INT_MASK 0x00000200
#define AR9888_SI_CS_START_LSB 8
#define AR9888_SI_CS_START_MASK 0x00000100
#define AR9888_SI_CS_RX_CNT_LSB 4
#define AR9888_SI_CS_RX_CNT_MASK 0x000000f0
#define AR9888_SI_CS_TX_CNT_LSB 0
#define AR9888_SI_CS_TX_CNT_MASK 0x0000000f
#define AR9888_CE_COUNT 8
#define AR9888_SR_WR_INDEX_ADDRESS 0x003c
#define AR9888_DST_WATERMARK_ADDRESS 0x0050
#define AR9888_RX_MSDU_END_4_FIRST_MSDU_LSB 14
#define AR9888_RX_MSDU_END_4_FIRST_MSDU_MASK 0x00004000
#define AR9888_RX_MPDU_START_0_SEQ_NUM_LSB 16
#define AR9888_RX_MPDU_START_0_SEQ_NUM_MASK 0x0fff0000
#define AR9888_RX_MPDU_START_2_PN_47_32_LSB 0
#define AR9888_RX_MPDU_START_2_PN_47_32_MASK 0x0000ffff
#define AR9888_RX_MSDU_END_1_KEY_ID_OCT_MASK 0x000000ff
#define AR9888_RX_MSDU_END_1_KEY_ID_OCT_LSB 0
#define AR9888_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB 16
#define AR9888_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK 0xffff0000
#define AR9888_RX_MSDU_END_4_LAST_MSDU_LSB 15
#define AR9888_RX_MSDU_END_4_LAST_MSDU_MASK 0x00008000
#define AR9888_RX_ATTENTION_0_MCAST_BCAST_LSB 2
#define AR9888_RX_ATTENTION_0_MCAST_BCAST_MASK 0x00000004
#define AR9888_RX_ATTENTION_0_FRAGMENT_LSB 13
#define AR9888_RX_ATTENTION_0_FRAGMENT_MASK 0x00002000
#define AR9888_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK 0x08000000
#define AR9888_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB 16
#define AR9888_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK 0x00ff0000
#define AR9888_RX_MSDU_START_0_MSDU_LENGTH_LSB 0
#define AR9888_RX_MSDU_START_0_MSDU_LENGTH_MASK 0x00003fff
#define AR9888_RX_MSDU_START_2_DECAP_FORMAT_OFFSET 0x00000008
#define AR9888_RX_MSDU_START_2_DECAP_FORMAT_LSB 8
#define AR9888_RX_MSDU_START_2_DECAP_FORMAT_MASK 0x00000300
#define AR9888_RX_MPDU_START_0_ENCRYPTED_LSB 13
#define AR9888_RX_MPDU_START_0_ENCRYPTED_MASK 0x00002000
#define AR9888_RX_ATTENTION_0_MORE_DATA_MASK 0x00000400
#define AR9888_RX_ATTENTION_0_MSDU_DONE_MASK 0x80000000
#define AR9888_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK 0x00040000
#define AR9888_DST_WR_INDEX_ADDRESS 0x0040
#define AR9888_SRC_WATERMARK_ADDRESS 0x004c
#define AR9888_SRC_WATERMARK_LOW_MASK 0xffff0000
#define AR9888_SRC_WATERMARK_HIGH_MASK 0x0000ffff
#define AR9888_DST_WATERMARK_LOW_MASK 0xffff0000
#define AR9888_DST_WATERMARK_HIGH_MASK 0x0000ffff
#define AR9888_CURRENT_SRRI_ADDRESS 0x0044
#define AR9888_CURRENT_DRRI_ADDRESS 0x0048
#define AR9888_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
#define AR9888_HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
#define AR9888_HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
#define AR9888_HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
#define AR9888_HOST_IS_ADDRESS 0x0030
#define AR9888_HOST_IS_COPY_COMPLETE_MASK 0x00000001
#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
#define AR9888_HOST_IE_ADDRESS 0x002c
#define AR9888_HOST_IE_COPY_COMPLETE_MASK 0x00000001
#define AR9888_SR_BA_ADDRESS 0x0000
#define AR9888_SR_SIZE_ADDRESS 0x0004
#define AR9888_CE_CTRL1_ADDRESS 0x0010
#define AR9888_CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
#define AR9888_DR_BA_ADDRESS 0x0008
#define AR9888_DR_SIZE_ADDRESS 0x000c
#define AR9888_MISC_IE_ADDRESS 0x0034
#define AR9888_MISC_IS_AXI_ERR_MASK 0x00000400
#define AR9888_MISC_IS_DST_ADDR_ERR_MASK 0x00000200
#define AR9888_MISC_IS_SRC_LEN_ERR_MASK 0x00000100
#define AR9888_MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
#define AR9888_MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
#define AR9888_MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
#define AR9888_SRC_WATERMARK_LOW_LSB 16
#define AR9888_SRC_WATERMARK_HIGH_LSB 0
#define AR9888_DST_WATERMARK_LOW_LSB 16
#define AR9888_DST_WATERMARK_HIGH_LSB 0
#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
#define AR9888_CE_CTRL1_DMAX_LENGTH_LSB 0
#define AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
#define AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
#define AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
#define AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
#define AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000004
#define AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB 2
#define AR9888_SOC_GLOBAL_RESET_ADDRESS 0x0008
#define AR9888_RTC_STATE_ADDRESS 0x0000
#define AR9888_RTC_STATE_COLD_RESET_MASK 0x00000400
#define AR9888_PCIE_SOC_WAKE_RESET 0x00000000
#define AR9888_PCIE_SOC_WAKE_ADDRESS 0x0004
#define AR9888_PCIE_SOC_WAKE_V_MASK 0x00000001
#define AR9888_RTC_STATE_V_MASK 0x00000007
#define AR9888_RTC_STATE_V_LSB 0
#define AR9888_RTC_STATE_V_ON 3
#define AR9888_MUX_ID_MASK 0x0000
#define AR9888_TRANSACTION_ID_MASK 0x3fff
#define AR9888_PCIE_LOCAL_BASE_ADDRESS 0x80000
#define AR9888_FW_IND_EVENT_PENDING 1
#define AR9888_FW_IND_INITIALIZED 2
#define AR9888_PCIE_INTR_ENABLE_ADDRESS 0x0008
#define AR9888_PCIE_INTR_CLR_ADDRESS 0x0014
#define AR9888_PCIE_INTR_FIRMWARE_MASK 0x00000400
#define AR9888_PCIE_INTR_CE0_MASK 0x00000800
#define AR9888_PCIE_INTR_CE_MASK_ALL 0x0007f800
#define AR9888_PCIE_INTR_CAUSE_ADDRESS 0x000c
#define AR9888_CPU_INTR_ADDRESS 0x0010
#define AR9888_SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
#define AR9888_SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
#define AR9888_SOC_RESET_CONTROL_ADDRESS 0x00000000
#define AR9888_SOC_RESET_CONTROL_CE_RST_MASK 0x00040000
#define AR9888_SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
#define AR9888_CORE_CTRL_ADDRESS 0x0000
#define AR9888_CORE_CTRL_CPU_INTR_MASK 0x00002000
#define AR9888_LOCAL_SCRATCH_OFFSET 0x18
#define AR9888_CLOCK_GPIO_OFFSET 0xffffffff
#define AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0
#define AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
#define AR9888_PCIE_INTR_CE_MASK(n) (AR9888_PCIE_INTR_CE0_MASK << (n))
#define AR9888_FW_EVENT_PENDING_ADDRESS \
(AR9888_SOC_CORE_BASE_ADDRESS + AR9888_SCRATCH_3_ADDRESS)
#define AR9888_DRAM_BASE_ADDRESS AR9888_TARG_DRAM_START
#define AR9888_FW_INDICATOR_ADDRESS \
(AR9888_SOC_CORE_BASE_ADDRESS + AR9888_SCRATCH_3_ADDRESS)
#define AR9888_SYSTEM_SLEEP_OFFSET AR9888_SOC_SYSTEM_SLEEP_OFFSET
#define AR9888_WLAN_SYSTEM_SLEEP_OFFSET AR9888_SOC_SYSTEM_SLEEP_OFFSET
#define AR9888_WLAN_RESET_CONTROL_OFFSET AR9888_SOC_RESET_CONTROL_OFFSET
#define AR9888_CLOCK_CONTROL_OFFSET AR9888_SOC_CLOCK_CONTROL_OFFSET
#define AR9888_CLOCK_CONTROL_SI0_CLK_MASK AR9888_SOC_CLOCK_CONTROL_SI0_CLK_MASK
#define AR9888_RESET_CONTROL_MBOX_RST_MASK MISSING
#define AR9888_RESET_CONTROL_SI0_RST_MASK AR9888_SOC_RESET_CONTROL_SI0_RST_MASK
#define AR9888_GPIO_BASE_ADDRESS AR9888_WLAN_GPIO_BASE_ADDRESS
#define AR9888_GPIO_PIN0_OFFSET AR9888_WLAN_GPIO_PIN0_ADDRESS
#define AR9888_GPIO_PIN1_OFFSET AR9888_WLAN_GPIO_PIN1_ADDRESS
#define AR9888_GPIO_PIN0_CONFIG_MASK AR9888_WLAN_GPIO_PIN0_CONFIG_MASK
#define AR9888_GPIO_PIN1_CONFIG_MASK AR9888_WLAN_GPIO_PIN1_CONFIG_MASK
#define AR9888_SI_BASE_ADDRESS AR9888_WLAN_SI_BASE_ADDRESS
#define AR9888_SCRATCH_BASE_ADDRESS AR9888_SOC_CORE_BASE_ADDRESS
#define AR9888_CPU_CLOCK_OFFSET AR9888_SOC_CPU_CLOCK_OFFSET
#define AR9888_LPO_CAL_OFFSET AR9888_SOC_LPO_CAL_OFFSET
#define AR9888_GPIO_PIN10_OFFSET AR9888_WLAN_GPIO_PIN10_ADDRESS
#define AR9888_GPIO_PIN11_OFFSET AR9888_WLAN_GPIO_PIN11_ADDRESS
#define AR9888_GPIO_PIN12_OFFSET AR9888_WLAN_GPIO_PIN12_ADDRESS
#define AR9888_GPIO_PIN13_OFFSET AR9888_WLAN_GPIO_PIN13_ADDRESS
#define AR9888_CPU_CLOCK_STANDARD_LSB AR9888_SOC_CPU_CLOCK_STANDARD_LSB
#define AR9888_CPU_CLOCK_STANDARD_MASK AR9888_SOC_CPU_CLOCK_STANDARD_MASK
#define AR9888_LPO_CAL_ENABLE_LSB AR9888_SOC_LPO_CAL_ENABLE_LSB
#define AR9888_LPO_CAL_ENABLE_MASK AR9888_SOC_LPO_CAL_ENABLE_MASK
#define AR9888_ANALOG_INTF_BASE_ADDRESS AR9888_WLAN_ANALOG_INTF_BASE_ADDRESS
#define AR9888_MBOX_BASE_ADDRESS MISSING
#define AR9888_INT_STATUS_ENABLE_ERROR_LSB MISSING
#define AR9888_INT_STATUS_ENABLE_ERROR_MASK MISSING
#define AR9888_INT_STATUS_ENABLE_CPU_LSB MISSING
#define AR9888_INT_STATUS_ENABLE_CPU_MASK MISSING
#define AR9888_INT_STATUS_ENABLE_COUNTER_LSB MISSING
#define AR9888_INT_STATUS_ENABLE_COUNTER_MASK MISSING
#define AR9888_INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING
#define AR9888_INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING
#define AR9888_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING
#define AR9888_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING
#define AR9888_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING
#define AR9888_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING
#define AR9888_COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING
#define AR9888_COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING
#define AR9888_INT_STATUS_ENABLE_ADDRESS MISSING
#define AR9888_CPU_INT_STATUS_ENABLE_BIT_LSB MISSING
#define AR9888_CPU_INT_STATUS_ENABLE_BIT_MASK MISSING
#define AR9888_HOST_INT_STATUS_ADDRESS MISSING
#define AR9888_CPU_INT_STATUS_ADDRESS MISSING
#define AR9888_ERROR_INT_STATUS_ADDRESS MISSING
#define AR9888_ERROR_INT_STATUS_WAKEUP_MASK MISSING
#define AR9888_ERROR_INT_STATUS_WAKEUP_LSB MISSING
#define AR9888_ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING
#define AR9888_ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING
#define AR9888_ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING
#define AR9888_ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING
#define AR9888_COUNT_DEC_ADDRESS MISSING
#define AR9888_HOST_INT_STATUS_CPU_MASK MISSING
#define AR9888_HOST_INT_STATUS_CPU_LSB MISSING
#define AR9888_HOST_INT_STATUS_ERROR_MASK MISSING
#define AR9888_HOST_INT_STATUS_ERROR_LSB MISSING
#define AR9888_HOST_INT_STATUS_COUNTER_MASK MISSING
#define AR9888_HOST_INT_STATUS_COUNTER_LSB MISSING
#define AR9888_RX_LOOKAHEAD_VALID_ADDRESS MISSING
#define AR9888_WINDOW_DATA_ADDRESS MISSING
#define AR9888_WINDOW_READ_ADDR_ADDRESS MISSING
#define AR9888_WINDOW_WRITE_ADDR_ADDRESS MISSING
struct targetdef_s ar9888_targetdef = {
.d_RTC_SOC_BASE_ADDRESS = AR9888_RTC_SOC_BASE_ADDRESS,
.d_RTC_WMAC_BASE_ADDRESS = AR9888_RTC_WMAC_BASE_ADDRESS,
.d_SYSTEM_SLEEP_OFFSET = AR9888_WLAN_SYSTEM_SLEEP_OFFSET,
.d_WLAN_SYSTEM_SLEEP_OFFSET = AR9888_WLAN_SYSTEM_SLEEP_OFFSET,
.d_WLAN_SYSTEM_SLEEP_DISABLE_LSB =
AR9888_WLAN_SYSTEM_SLEEP_DISABLE_LSB,
.d_WLAN_SYSTEM_SLEEP_DISABLE_MASK =
AR9888_WLAN_SYSTEM_SLEEP_DISABLE_MASK,
.d_CLOCK_CONTROL_OFFSET = AR9888_CLOCK_CONTROL_OFFSET,
.d_CLOCK_CONTROL_SI0_CLK_MASK = AR9888_CLOCK_CONTROL_SI0_CLK_MASK,
.d_RESET_CONTROL_OFFSET = AR9888_SOC_RESET_CONTROL_OFFSET,
.d_RESET_CONTROL_MBOX_RST_MASK = AR9888_RESET_CONTROL_MBOX_RST_MASK,
.d_RESET_CONTROL_SI0_RST_MASK = AR9888_RESET_CONTROL_SI0_RST_MASK,
.d_WLAN_RESET_CONTROL_OFFSET = AR9888_WLAN_RESET_CONTROL_OFFSET,
.d_WLAN_RESET_CONTROL_COLD_RST_MASK =
AR9888_WLAN_RESET_CONTROL_COLD_RST_MASK,
.d_WLAN_RESET_CONTROL_WARM_RST_MASK =
AR9888_WLAN_RESET_CONTROL_WARM_RST_MASK,
.d_GPIO_BASE_ADDRESS = AR9888_GPIO_BASE_ADDRESS,
.d_GPIO_PIN0_OFFSET = AR9888_GPIO_PIN0_OFFSET,
.d_GPIO_PIN1_OFFSET = AR9888_GPIO_PIN1_OFFSET,
.d_GPIO_PIN0_CONFIG_MASK = AR9888_GPIO_PIN0_CONFIG_MASK,
.d_GPIO_PIN1_CONFIG_MASK = AR9888_GPIO_PIN1_CONFIG_MASK,
.d_SI_CONFIG_BIDIR_OD_DATA_LSB = AR9888_SI_CONFIG_BIDIR_OD_DATA_LSB,
.d_SI_CONFIG_BIDIR_OD_DATA_MASK = AR9888_SI_CONFIG_BIDIR_OD_DATA_MASK,
.d_SI_CONFIG_I2C_LSB = AR9888_SI_CONFIG_I2C_LSB,
.d_SI_CONFIG_I2C_MASK = AR9888_SI_CONFIG_I2C_MASK,
.d_SI_CONFIG_POS_SAMPLE_LSB = AR9888_SI_CONFIG_POS_SAMPLE_LSB,
.d_SI_CONFIG_POS_SAMPLE_MASK = AR9888_SI_CONFIG_POS_SAMPLE_MASK,
.d_SI_CONFIG_INACTIVE_CLK_LSB = AR9888_SI_CONFIG_INACTIVE_CLK_LSB,
.d_SI_CONFIG_INACTIVE_CLK_MASK = AR9888_SI_CONFIG_INACTIVE_CLK_MASK,
.d_SI_CONFIG_INACTIVE_DATA_LSB = AR9888_SI_CONFIG_INACTIVE_DATA_LSB,
.d_SI_CONFIG_INACTIVE_DATA_MASK = AR9888_SI_CONFIG_INACTIVE_DATA_MASK,
.d_SI_CONFIG_DIVIDER_LSB = AR9888_SI_CONFIG_DIVIDER_LSB,
.d_SI_CONFIG_DIVIDER_MASK = AR9888_SI_CONFIG_DIVIDER_MASK,
.d_SI_BASE_ADDRESS = AR9888_SI_BASE_ADDRESS,
.d_SI_CONFIG_OFFSET = AR9888_SI_CONFIG_OFFSET,
.d_SI_TX_DATA0_OFFSET = AR9888_SI_TX_DATA0_OFFSET,
.d_SI_TX_DATA1_OFFSET = AR9888_SI_TX_DATA1_OFFSET,
.d_SI_RX_DATA0_OFFSET = AR9888_SI_RX_DATA0_OFFSET,
.d_SI_RX_DATA1_OFFSET = AR9888_SI_RX_DATA1_OFFSET,
.d_SI_CS_OFFSET = AR9888_SI_CS_OFFSET,
.d_SI_CS_DONE_ERR_MASK = AR9888_SI_CS_DONE_ERR_MASK,
.d_SI_CS_DONE_INT_MASK = AR9888_SI_CS_DONE_INT_MASK,
.d_SI_CS_START_LSB = AR9888_SI_CS_START_LSB,
.d_SI_CS_START_MASK = AR9888_SI_CS_START_MASK,
.d_SI_CS_RX_CNT_LSB = AR9888_SI_CS_RX_CNT_LSB,
.d_SI_CS_RX_CNT_MASK = AR9888_SI_CS_RX_CNT_MASK,
.d_SI_CS_TX_CNT_LSB = AR9888_SI_CS_TX_CNT_LSB,
.d_SI_CS_TX_CNT_MASK = AR9888_SI_CS_TX_CNT_MASK,
.d_BOARD_DATA_SZ = AR9888_BOARD_DATA_SZ,
.d_BOARD_EXT_DATA_SZ = AR9888_BOARD_EXT_DATA_SZ,
.d_MBOX_BASE_ADDRESS = AR9888_MBOX_BASE_ADDRESS,
.d_LOCAL_SCRATCH_OFFSET = AR9888_LOCAL_SCRATCH_OFFSET,
.d_CPU_CLOCK_OFFSET = AR9888_CPU_CLOCK_OFFSET,
.d_LPO_CAL_OFFSET = AR9888_LPO_CAL_OFFSET,
.d_GPIO_PIN10_OFFSET = AR9888_GPIO_PIN10_OFFSET,
.d_GPIO_PIN11_OFFSET = AR9888_GPIO_PIN11_OFFSET,
.d_GPIO_PIN12_OFFSET = AR9888_GPIO_PIN12_OFFSET,
.d_GPIO_PIN13_OFFSET = AR9888_GPIO_PIN13_OFFSET,
.d_CLOCK_GPIO_OFFSET = AR9888_CLOCK_GPIO_OFFSET,
.d_CPU_CLOCK_STANDARD_LSB = AR9888_CPU_CLOCK_STANDARD_LSB,
.d_CPU_CLOCK_STANDARD_MASK = AR9888_CPU_CLOCK_STANDARD_MASK,
.d_LPO_CAL_ENABLE_LSB = AR9888_LPO_CAL_ENABLE_LSB,
.d_LPO_CAL_ENABLE_MASK = AR9888_LPO_CAL_ENABLE_MASK,
.d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB = AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_LSB,
.d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK =
AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_MASK,
.d_ANALOG_INTF_BASE_ADDRESS = AR9888_ANALOG_INTF_BASE_ADDRESS,
.d_WLAN_MAC_BASE_ADDRESS = AR9888_WLAN_MAC_BASE_ADDRESS,
.d_FW_INDICATOR_ADDRESS = AR9888_FW_INDICATOR_ADDRESS,
.d_DRAM_BASE_ADDRESS = AR9888_DRAM_BASE_ADDRESS,
.d_SOC_CORE_BASE_ADDRESS = AR9888_SOC_CORE_BASE_ADDRESS,
.d_CORE_CTRL_ADDRESS = AR9888_CORE_CTRL_ADDRESS,
.d_CE_COUNT = AR9888_CE_COUNT,
.d_MSI_NUM_REQUEST = MSI_NUM_REQUEST,
.d_MSI_ASSIGN_FW = MSI_ASSIGN_FW,
.d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL,
.d_PCIE_INTR_ENABLE_ADDRESS = AR9888_PCIE_INTR_ENABLE_ADDRESS,
.d_PCIE_INTR_CLR_ADDRESS = AR9888_PCIE_INTR_CLR_ADDRESS,
.d_PCIE_INTR_FIRMWARE_MASK = AR9888_PCIE_INTR_FIRMWARE_MASK,
.d_PCIE_INTR_CE_MASK_ALL = AR9888_PCIE_INTR_CE_MASK_ALL,
.d_CORE_CTRL_CPU_INTR_MASK = AR9888_CORE_CTRL_CPU_INTR_MASK,
.d_SR_WR_INDEX_ADDRESS = AR9888_SR_WR_INDEX_ADDRESS,
.d_DST_WATERMARK_ADDRESS = AR9888_DST_WATERMARK_ADDRESS,
/* htt_rx.c */
.d_RX_MSDU_END_4_FIRST_MSDU_MASK =
AR9888_RX_MSDU_END_4_FIRST_MSDU_MASK,
.d_RX_MSDU_END_4_FIRST_MSDU_LSB = AR9888_RX_MSDU_END_4_FIRST_MSDU_LSB,
.d_RX_MPDU_START_0_SEQ_NUM_MASK = AR9888_RX_MPDU_START_0_SEQ_NUM_MASK,
.d_RX_MPDU_START_0_SEQ_NUM_LSB = AR9888_RX_MPDU_START_0_SEQ_NUM_LSB,
.d_RX_MPDU_START_2_PN_47_32_LSB = AR9888_RX_MPDU_START_2_PN_47_32_LSB,
.d_RX_MPDU_START_2_PN_47_32_MASK =
AR9888_RX_MPDU_START_2_PN_47_32_MASK,
.d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK =
AR9888_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK,
.d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB =
AR9888_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB,
.d_RX_MSDU_END_1_KEY_ID_OCT_MASK =
AR9888_RX_MSDU_END_1_KEY_ID_OCT_MASK,
.d_RX_MSDU_END_1_KEY_ID_OCT_LSB = AR9888_RX_MSDU_END_1_KEY_ID_OCT_LSB,
.d_RX_MSDU_END_4_LAST_MSDU_MASK = AR9888_RX_MSDU_END_4_LAST_MSDU_MASK,
.d_RX_MSDU_END_4_LAST_MSDU_LSB = AR9888_RX_MSDU_END_4_LAST_MSDU_LSB,
.d_RX_ATTENTION_0_MCAST_BCAST_MASK =
AR9888_RX_ATTENTION_0_MCAST_BCAST_MASK,
.d_RX_ATTENTION_0_MCAST_BCAST_LSB =
AR9888_RX_ATTENTION_0_MCAST_BCAST_LSB,
.d_RX_ATTENTION_0_FRAGMENT_MASK = AR9888_RX_ATTENTION_0_FRAGMENT_MASK,
.d_RX_ATTENTION_0_FRAGMENT_LSB = AR9888_RX_ATTENTION_0_FRAGMENT_LSB,
.d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK =
AR9888_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK,
.d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK =
AR9888_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK,
.d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB =
AR9888_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB,
.d_RX_MSDU_START_0_MSDU_LENGTH_MASK =
AR9888_RX_MSDU_START_0_MSDU_LENGTH_MASK,
.d_RX_MSDU_START_0_MSDU_LENGTH_LSB =
AR9888_RX_MSDU_START_0_MSDU_LENGTH_LSB,
.d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET =
AR9888_RX_MSDU_START_2_DECAP_FORMAT_OFFSET,
.d_RX_MSDU_START_2_DECAP_FORMAT_MASK =
AR9888_RX_MSDU_START_2_DECAP_FORMAT_MASK,
.d_RX_MSDU_START_2_DECAP_FORMAT_LSB =
AR9888_RX_MSDU_START_2_DECAP_FORMAT_LSB,
.d_RX_MPDU_START_0_ENCRYPTED_MASK =
AR9888_RX_MPDU_START_0_ENCRYPTED_MASK,
.d_RX_MPDU_START_0_ENCRYPTED_LSB =
AR9888_RX_MPDU_START_0_ENCRYPTED_LSB,
.d_RX_ATTENTION_0_MORE_DATA_MASK =
AR9888_RX_ATTENTION_0_MORE_DATA_MASK,
.d_RX_ATTENTION_0_MSDU_DONE_MASK =
AR9888_RX_ATTENTION_0_MSDU_DONE_MASK,
.d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK =
AR9888_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK,
.d_PCIE_INTR_CAUSE_ADDRESS = AR9888_PCIE_INTR_CAUSE_ADDRESS,
.d_SOC_RESET_CONTROL_ADDRESS = AR9888_SOC_RESET_CONTROL_ADDRESS,
.d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK =
AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK,
.d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB =
AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB,
.d_SOC_RESET_CONTROL_CE_RST_MASK =
AR9888_SOC_RESET_CONTROL_CE_RST_MASK,
.d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK =
AR9888_SOC_RESET_CONTROL_CPU_WARM_RST_MASK,
.d_CPU_INTR_ADDRESS = AR9888_CPU_INTR_ADDRESS,
.d_SOC_LF_TIMER_CONTROL0_ADDRESS =
AR9888_SOC_LF_TIMER_CONTROL0_ADDRESS,
.d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK =
AR9888_SOC_LF_TIMER_CONTROL0_ENABLE_MASK,
};
struct hostdef_s ar9888_hostdef = {
.d_INT_STATUS_ENABLE_ERROR_LSB = AR9888_INT_STATUS_ENABLE_ERROR_LSB,
.d_INT_STATUS_ENABLE_ERROR_MASK = AR9888_INT_STATUS_ENABLE_ERROR_MASK,
.d_INT_STATUS_ENABLE_CPU_LSB = AR9888_INT_STATUS_ENABLE_CPU_LSB,
.d_INT_STATUS_ENABLE_CPU_MASK = AR9888_INT_STATUS_ENABLE_CPU_MASK,
.d_INT_STATUS_ENABLE_COUNTER_LSB =
AR9888_INT_STATUS_ENABLE_COUNTER_LSB,
.d_INT_STATUS_ENABLE_COUNTER_MASK =
AR9888_INT_STATUS_ENABLE_COUNTER_MASK,
.d_INT_STATUS_ENABLE_MBOX_DATA_LSB =
AR9888_INT_STATUS_ENABLE_MBOX_DATA_LSB,
.d_INT_STATUS_ENABLE_MBOX_DATA_MASK =
AR9888_INT_STATUS_ENABLE_MBOX_DATA_MASK,
.d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB =
AR9888_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB,
.d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK =
AR9888_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK,
.d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB =
AR9888_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB,
.d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK =
AR9888_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK,
.d_COUNTER_INT_STATUS_ENABLE_BIT_LSB =
AR9888_COUNTER_INT_STATUS_ENABLE_BIT_LSB,
.d_COUNTER_INT_STATUS_ENABLE_BIT_MASK =
AR9888_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
.d_INT_STATUS_ENABLE_ADDRESS = AR9888_INT_STATUS_ENABLE_ADDRESS,
.d_CPU_INT_STATUS_ENABLE_BIT_LSB =
AR9888_CPU_INT_STATUS_ENABLE_BIT_LSB,
.d_CPU_INT_STATUS_ENABLE_BIT_MASK =
AR9888_CPU_INT_STATUS_ENABLE_BIT_MASK,
.d_HOST_INT_STATUS_ADDRESS = AR9888_HOST_INT_STATUS_ADDRESS,
.d_CPU_INT_STATUS_ADDRESS = AR9888_CPU_INT_STATUS_ADDRESS,
.d_ERROR_INT_STATUS_ADDRESS = AR9888_ERROR_INT_STATUS_ADDRESS,
.d_ERROR_INT_STATUS_WAKEUP_MASK = AR9888_ERROR_INT_STATUS_WAKEUP_MASK,
.d_ERROR_INT_STATUS_WAKEUP_LSB = AR9888_ERROR_INT_STATUS_WAKEUP_LSB,
.d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK =
AR9888_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
.d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB =
AR9888_ERROR_INT_STATUS_RX_UNDERFLOW_LSB,
.d_ERROR_INT_STATUS_TX_OVERFLOW_MASK =
AR9888_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
.d_ERROR_INT_STATUS_TX_OVERFLOW_LSB =
AR9888_ERROR_INT_STATUS_TX_OVERFLOW_LSB,
.d_COUNT_DEC_ADDRESS = AR9888_COUNT_DEC_ADDRESS,
.d_HOST_INT_STATUS_CPU_MASK = AR9888_HOST_INT_STATUS_CPU_MASK,
.d_HOST_INT_STATUS_CPU_LSB = AR9888_HOST_INT_STATUS_CPU_LSB,
.d_HOST_INT_STATUS_ERROR_MASK = AR9888_HOST_INT_STATUS_ERROR_MASK,
.d_HOST_INT_STATUS_ERROR_LSB = AR9888_HOST_INT_STATUS_ERROR_LSB,
.d_HOST_INT_STATUS_COUNTER_MASK = AR9888_HOST_INT_STATUS_COUNTER_MASK,
.d_HOST_INT_STATUS_COUNTER_LSB = AR9888_HOST_INT_STATUS_COUNTER_LSB,
.d_RX_LOOKAHEAD_VALID_ADDRESS = AR9888_RX_LOOKAHEAD_VALID_ADDRESS,
.d_WINDOW_DATA_ADDRESS = AR9888_WINDOW_DATA_ADDRESS,
.d_WINDOW_READ_ADDR_ADDRESS = AR9888_WINDOW_READ_ADDR_ADDRESS,
.d_WINDOW_WRITE_ADDR_ADDRESS = AR9888_WINDOW_WRITE_ADDR_ADDRESS,
.d_SOC_GLOBAL_RESET_ADDRESS = AR9888_SOC_GLOBAL_RESET_ADDRESS,
.d_RTC_STATE_ADDRESS = AR9888_RTC_STATE_ADDRESS,
.d_RTC_STATE_COLD_RESET_MASK = AR9888_RTC_STATE_COLD_RESET_MASK,
.d_PCIE_LOCAL_BASE_ADDRESS = AR9888_PCIE_LOCAL_BASE_ADDRESS,
.d_PCIE_SOC_WAKE_RESET = AR9888_PCIE_SOC_WAKE_RESET,
.d_PCIE_SOC_WAKE_ADDRESS = AR9888_PCIE_SOC_WAKE_ADDRESS,
.d_PCIE_SOC_WAKE_V_MASK = AR9888_PCIE_SOC_WAKE_V_MASK,
.d_RTC_STATE_V_MASK = AR9888_RTC_STATE_V_MASK,
.d_RTC_STATE_V_LSB = AR9888_RTC_STATE_V_LSB,
.d_FW_IND_EVENT_PENDING = AR9888_FW_IND_EVENT_PENDING,
.d_FW_IND_INITIALIZED = AR9888_FW_IND_INITIALIZED,
.d_RTC_STATE_V_ON = AR9888_RTC_STATE_V_ON,
.d_MUX_ID_MASK = AR9888_MUX_ID_MASK,
.d_TRANSACTION_ID_MASK = AR9888_TRANSACTION_ID_MASK,
#if defined(SDIO_3_0)
.d_HOST_INT_STATUS_MBOX_DATA_MASK =
AR9888_HOST_INT_STATUS_MBOX_DATA_MASK,
.d_HOST_INT_STATUS_MBOX_DATA_LSB =
AR9888_HOST_INT_STATUS_MBOX_DATA_LSB,
#endif
.d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS,
.d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK,
.d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS,
.d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS,
.d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS,
.d_HOST_CE_COUNT = 8,
.d_ENABLE_MSI = 0,
};
struct ce_reg_def ar9888_ce_targetdef = {
/* copy_engine.c */
.d_DST_WR_INDEX_ADDRESS = AR9888_DST_WR_INDEX_ADDRESS,
.d_SRC_WATERMARK_ADDRESS = AR9888_SRC_WATERMARK_ADDRESS,
.d_SRC_WATERMARK_LOW_MASK = AR9888_SRC_WATERMARK_LOW_MASK,
.d_SRC_WATERMARK_HIGH_MASK = AR9888_SRC_WATERMARK_HIGH_MASK,
.d_DST_WATERMARK_LOW_MASK = AR9888_DST_WATERMARK_LOW_MASK,
.d_DST_WATERMARK_HIGH_MASK = AR9888_DST_WATERMARK_HIGH_MASK,
.d_CURRENT_SRRI_ADDRESS = AR9888_CURRENT_SRRI_ADDRESS,
.d_CURRENT_DRRI_ADDRESS = AR9888_CURRENT_DRRI_ADDRESS,
.d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK =
AR9888_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK,
.d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK =
AR9888_HOST_IS_SRC_RING_LOW_WATERMARK_MASK,
.d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK =
AR9888_HOST_IS_DST_RING_HIGH_WATERMARK_MASK,
.d_HOST_IS_DST_RING_LOW_WATERMARK_MASK =
AR9888_HOST_IS_DST_RING_LOW_WATERMARK_MASK,
.d_HOST_IS_ADDRESS = AR9888_HOST_IS_ADDRESS,
.d_HOST_IS_COPY_COMPLETE_MASK = AR9888_HOST_IS_COPY_COMPLETE_MASK,
.d_CE_WRAPPER_BASE_ADDRESS = AR9888_CE_WRAPPER_BASE_ADDRESS,
.d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS =
AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS,
.d_HOST_IE_ADDRESS = AR9888_HOST_IE_ADDRESS,
.d_HOST_IE_COPY_COMPLETE_MASK = AR9888_HOST_IE_COPY_COMPLETE_MASK,
.d_SR_BA_ADDRESS = AR9888_SR_BA_ADDRESS,
.d_SR_SIZE_ADDRESS = AR9888_SR_SIZE_ADDRESS,
.d_CE_CTRL1_ADDRESS = AR9888_CE_CTRL1_ADDRESS,
.d_CE_CTRL1_DMAX_LENGTH_MASK = AR9888_CE_CTRL1_DMAX_LENGTH_MASK,
.d_DR_BA_ADDRESS = AR9888_DR_BA_ADDRESS,
.d_DR_SIZE_ADDRESS = AR9888_DR_SIZE_ADDRESS,
.d_MISC_IE_ADDRESS = AR9888_MISC_IE_ADDRESS,
.d_MISC_IS_AXI_ERR_MASK = AR9888_MISC_IS_AXI_ERR_MASK,
.d_MISC_IS_DST_ADDR_ERR_MASK = AR9888_MISC_IS_DST_ADDR_ERR_MASK,
.d_MISC_IS_SRC_LEN_ERR_MASK = AR9888_MISC_IS_SRC_LEN_ERR_MASK,
.d_MISC_IS_DST_MAX_LEN_VIO_MASK = AR9888_MISC_IS_DST_MAX_LEN_VIO_MASK,
.d_MISC_IS_DST_RING_OVERFLOW_MASK =
AR9888_MISC_IS_DST_RING_OVERFLOW_MASK,
.d_MISC_IS_SRC_RING_OVERFLOW_MASK =
AR9888_MISC_IS_SRC_RING_OVERFLOW_MASK,
.d_SRC_WATERMARK_LOW_LSB = AR9888_SRC_WATERMARK_LOW_LSB,
.d_SRC_WATERMARK_HIGH_LSB = AR9888_SRC_WATERMARK_HIGH_LSB,
.d_DST_WATERMARK_LOW_LSB = AR9888_DST_WATERMARK_LOW_LSB,
.d_DST_WATERMARK_HIGH_LSB = AR9888_DST_WATERMARK_HIGH_LSB,
.d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK =
AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK,
.d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB =
AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB,
.d_CE_CTRL1_DMAX_LENGTH_LSB = AR9888_CE_CTRL1_DMAX_LENGTH_LSB,
.d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK =
AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK,
.d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK =
AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK,
.d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB =
AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB,
.d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB =
AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB,
.d_CE0_BASE_ADDRESS = AR9888_CE0_BASE_ADDRESS,
.d_CE1_BASE_ADDRESS = AR9888_CE1_BASE_ADDRESS,
};
#endif

View File

@ -1,199 +0,0 @@
/*
* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if defined(CONFIG_ATH_PROCFS_DIAG_SUPPORT)
#include <linux/module.h> /* Specifically, a module */
#include <linux/kernel.h> /* We're doing kernel work */
#include <linux/version.h> /* We're doing kernel work */
#include <linux/proc_fs.h> /* Necessary because we use the proc fs */
#include <asm/uaccess.h> /* for copy_from_user */
#include "ol_if_athvar.h"
#include "hif.h"
#if defined(HIF_PCI)
#include "if_pci.h"
#elif defined(HIF_USB)
#include "if_usb.h"
#elif defined(HIF_SDIO)
#include "if_ath_sdio.h"
#endif
#include "cds_api.h"
#include "hif_debug.h"
#define PROCFS_NAME "athdiagpfs"
#define PROCFS_DIR "cld"
/**
* This structure hold information about the /proc file
*
*/
static struct proc_dir_entry *proc_file, *proc_dir;
static void *get_hif_hdl_from_file(struct file *file)
{
struct ol_softc *scn;
scn = (struct ol_softc *)PDE_DATA(file_inode(file));
return (void *)scn;
}
static ssize_t ath_procfs_diag_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
hif_handle_t hif_hdl;
int rv;
uint8_t *read_buffer = NULL;
read_buffer = cdf_mem_malloc(count);
if (NULL == read_buffer) {
HIF_ERROR("%s: cdf_mem_alloc failed", __func__);
return -ENOMEM;
}
hif_hdl = get_hif_hdl_from_file(file);
HIF_DBG("rd buff 0x%p cnt %zu offset 0x%x buf 0x%p",
read_buffer, count, (int)*pos, buf);
if ((count == 4) && ((((uint32_t) (*pos)) & 3) == 0)) {
/* reading a word? */
rv = hif_diag_read_access(hif_hdl, (uint32_t)(*pos),
(uint32_t *)read_buffer);
} else {
rv = hif_diag_read_mem(hif_hdl, (uint32_t)(*pos),
(uint8_t *)read_buffer, count);
}
if (copy_to_user(buf, read_buffer, count)) {
cdf_mem_free(read_buffer);
HIF_ERROR("%s: copy_to_user error in /proc/%s",
__func__, PROCFS_NAME);
return -EFAULT;
} else
cdf_mem_free(read_buffer);
if (rv == 0) {
return count;
} else {
return -EIO;
}
}
static ssize_t ath_procfs_diag_write(struct file *file,
const char __user *buf,
size_t count, loff_t *pos)
{
hif_handle_t hif_hdl;
int rv;
uint8_t *write_buffer = NULL;
write_buffer = cdf_mem_malloc(count);
if (NULL == write_buffer) {
HIF_ERROR("%s: cdf_mem_alloc failed", __func__);
return -ENOMEM;
}
if (copy_from_user(write_buffer, buf, count)) {
cdf_mem_free(write_buffer);
HIF_ERROR("%s: copy_to_user error in /proc/%s",
__func__, PROCFS_NAME);
return -EFAULT;
}
hif_hdl = get_hif_hdl_from_file(file);
HIF_DBG("wr buff 0x%p buf 0x%p cnt %zu offset 0x%x value 0x%x",
write_buffer, buf, count,
(int)*pos, *((uint32_t *) write_buffer));
if ((count == 4) && ((((uint32_t) (*pos)) & 3) == 0)) {
/* reading a word? */
uint32_t value = *((uint32_t *)write_buffer);
rv = hif_diag_write_access(hif_hdl, (uint32_t)(*pos), value);
} else {
rv = hif_diag_write_mem(hif_hdl, (uint32_t)(*pos),
(uint8_t *)write_buffer, count);
}
cdf_mem_free(write_buffer);
if (rv == 0) {
return count;
} else {
return -EIO;
}
}
static const struct file_operations athdiag_fops = {
.read = ath_procfs_diag_read,
.write = ath_procfs_diag_write,
};
/**
*This function is called when the module is loaded
*
*/
int athdiag_procfs_init(void *scn)
{
proc_dir = proc_mkdir(PROCFS_DIR, NULL);
if (proc_dir == NULL) {
remove_proc_entry(PROCFS_DIR, NULL);
HIF_ERROR("%s: Error: Could not initialize /proc/%s",
__func__, PROCFS_DIR);
return -ENOMEM;
}
proc_file = proc_create_data(PROCFS_NAME,
S_IRUSR | S_IWUSR, proc_dir,
&athdiag_fops, (void *)scn);
if (proc_file == NULL) {
remove_proc_entry(PROCFS_NAME, proc_dir);
HIF_ERROR("%s: Could not initialize /proc/%s",
__func__, PROCFS_NAME);
return -ENOMEM;
}
HIF_DBG("/proc/%s/%s created", PROCFS_DIR, PROCFS_NAME);
return 0; /* everything is ok */
}
/**
*This function is called when the module is unloaded
*
*/
void athdiag_procfs_remove(void)
{
if (proc_dir != NULL) {
remove_proc_entry(PROCFS_NAME, proc_dir);
HIF_DBG("/proc/%s/%s removed", PROCFS_DIR, PROCFS_NAME);
remove_proc_entry(PROCFS_DIR, NULL);
HIF_DBG("/proc/%s removed", PROCFS_DIR);
proc_dir = NULL;
}
}
#else
int athdiag_procfs_init(void *scn)
{
return 0;
}
void athdiag_procfs_remove(void) {}
#endif

View File

@ -1,477 +0,0 @@
/*
* Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __COPY_ENGINE_API_H__
#define __COPY_ENGINE_API_H__
#include "ce_main.h"
/* TBDXXX: Use int return values for consistency with Target */
/* TBDXXX: Perhaps merge Host/Target-->common */
/*
* Copy Engine support: low-level Target-side Copy Engine API.
* This is a hardware access layer used by code that understands
* how to use copy engines.
*/
/*
* A "struct CE_handle *" serves as an opaque pointer-sized
* handle to a specific copy engine.
*/
struct CE_handle;
/*
* "Send Completion" callback type for Send Completion Notification.
*
* If a Send Completion callback is registered and one or more sends
* have completed, the callback is invoked.
*
* per_ce_send_context is a context supplied by the calling layer
* (via ce_send_cb_register). It is associated with a copy engine.
*
* per_transfer_send_context is context supplied by the calling layer
* (via the "send" call). It may be different for each invocation
* of send.
*
* The buffer parameter is the first byte sent of the first buffer
* sent (if more than one buffer).
*
* nbytes is the number of bytes of that buffer that were sent.
*
* transfer_id matches the value used when the buffer or
* buf_list was sent.
*
* Implementation note: Pops 1 completed send buffer from Source ring
*/
typedef void (*ce_send_cb)(struct CE_handle *copyeng,
void *per_ce_send_context,
void *per_transfer_send_context,
cdf_dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int sw_index,
unsigned int hw_index,
uint32_t toeplitz_hash_result);
/*
* "Buffer Received" callback type for Buffer Received Notification.
*
* Implementation note: Pops 1 completed recv buffer from Dest ring
*/
typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
void *per_CE_recv_context,
void *per_transfer_recv_context,
cdf_dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags);
/*
* Copy Engine Watermark callback type.
*
* Allows upper layers to be notified when watermarks are reached:
* space is available and/or running short in a source ring
* buffers are exhausted and/or abundant in a destination ring
*
* The flags parameter indicates which condition triggered this
* callback. See CE_WM_FLAG_*.
*
* Watermark APIs are provided to allow upper layers "batch"
* descriptor processing and to allow upper layers to
* throttle/unthrottle.
*/
typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
void *per_CE_wm_context, unsigned int flags);
#define CE_WM_FLAG_SEND_HIGH 1
#define CE_WM_FLAG_SEND_LOW 2
#define CE_WM_FLAG_RECV_HIGH 4
#define CE_WM_FLAG_RECV_LOW 8
/* A list of buffers to be gathered and sent */
struct ce_sendlist;
/* Copy Engine settable attributes */
struct CE_attr;
/*==================Send=====================================================*/
/* ce_send flags */
/* disable ring's byte swap, even if the default policy is to swap */
#define CE_SEND_FLAG_SWAP_DISABLE 1
/*
* Queue a source buffer to be sent to an anonymous destination buffer.
* copyeng - which copy engine to use
* buffer - address of buffer
* nbytes - number of bytes to send
* transfer_id - arbitrary ID; reflected to destination
* flags - CE_SEND_FLAG_* values
* Returns 0 on success; otherwise an error status.
*
* Note: If no flags are specified, use CE's default data swap mode.
*
* Implementation note: pushes 1 buffer to Source ring
*/
int ce_send(struct CE_handle *copyeng,
void *per_transfer_send_context,
cdf_dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags,
unsigned int user_flags);
#ifdef WLAN_FEATURE_FASTPATH
int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
unsigned int num_msdus, unsigned int transfer_id);
#endif
void ce_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
/*
* Register a Send Callback function.
* This function is called as soon as the contents of a Send
* have reached the destination, unless disable_interrupts is
* requested. In this case, the callback is invoked when the
* send status is polled, shortly after the send completes.
*/
void ce_send_cb_register(struct CE_handle *copyeng,
ce_send_cb fn_ptr,
void *per_ce_send_context, int disable_interrupts);
/*
* Return the size of a SendList. This allows the caller to allocate
* a SendList while the SendList structure remains opaque.
*/
unsigned int ce_sendlist_sizeof(void);
/* Initialize a sendlist */
void ce_sendlist_init(struct ce_sendlist *sendlist);
/* Append a simple buffer (address/length) to a sendlist. */
int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
cdf_dma_addr_t buffer,
unsigned int nbytes,
uint32_t flags, /* OR-ed with internal flags */
uint32_t user_flags);
/*
* Queue a "sendlist" of buffers to be sent using gather to a single
* anonymous destination buffer
* copyeng - which copy engine to use
* sendlist - list of simple buffers to send using gather
* transfer_id - arbitrary ID; reflected to destination
* Returns 0 on success; otherwise an error status.
*
* Implemenation note: Pushes multiple buffers with Gather to Source ring.
*/
int ce_sendlist_send(struct CE_handle *copyeng,
void *per_transfer_send_context,
struct ce_sendlist *sendlist,
unsigned int transfer_id);
/*==================Recv=====================================================*/
/*
* Make a buffer available to receive. The buffer must be at least of a
* minimal size appropriate for this copy engine (src_sz_max attribute).
* copyeng - which copy engine to use
* per_transfer_recv_context - context passed back to caller's recv_cb
* buffer - address of buffer in CE space
* Returns 0 on success; otherwise an error status.
*
* Implemenation note: Pushes a buffer to Dest ring.
*/
int ce_recv_buf_enqueue(struct CE_handle *copyeng,
void *per_transfer_recv_context,
cdf_dma_addr_t buffer);
/*
* Register a Receive Callback function.
* This function is called as soon as data is received
* from the source.
*/
void ce_recv_cb_register(struct CE_handle *copyeng,
CE_recv_cb fn_ptr,
void *per_CE_recv_context,
int disable_interrupts);
/*==================CE Watermark=============================================*/
/*
* Register a Watermark Callback function.
* This function is called as soon as a watermark level
* is crossed. A Watermark Callback function is free to
* handle received data "en masse"; but then some coordination
* is required with a registered Receive Callback function.
* [Suggestion: Either handle Receives in a Receive Callback
* or en masse in a Watermark Callback; but not both.]
*/
void ce_watermark_cb_register(struct CE_handle *copyeng,
CE_watermark_cb fn_ptr,
void *per_CE_wm_context);
/*
* Set low/high watermarks for the send/source side of a copy engine.
*
* Typically, the destination side CPU manages watermarks for
* the receive side and the source side CPU manages watermarks
* for the send side.
*
* A low watermark of 0 is never hit (so the watermark function
* will never be called for a Low Watermark condition).
*
* A high watermark equal to nentries is never hit (so the
* watermark function will never be called for a High Watermark
* condition).
*/
void ce_send_watermarks_set(struct CE_handle *copyeng,
unsigned int low_alert_nentries,
unsigned int high_alert_nentries);
/* Set low/high watermarks for the receive/destination side of copy engine. */
void ce_recv_watermarks_set(struct CE_handle *copyeng,
unsigned int low_alert_nentries,
unsigned int high_alert_nentries);
/*
* Return the number of entries that can be queued
* to a ring at an instant in time.
*
* For source ring, does not imply that destination-side
* buffers are available; merely indicates descriptor space
* in the source ring.
*
* For destination ring, does not imply that previously
* received buffers have been processed; merely indicates
* descriptor space in destination ring.
*
* Mainly for use with CE Watermark callback.
*/
unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
/*
* Return the number of entries in the ring that are ready
* to be processed by software.
*
* For source ring, the number of descriptors that have
* been completed and can now be overwritten with new send
* descriptors.
*
* For destination ring, the number of descriptors that
* are available to be processed (newly received buffers).
*/
unsigned int ce_send_entries_done(struct CE_handle *copyeng);
unsigned int ce_recv_entries_done(struct CE_handle *copyeng);
/* recv flags */
/* Data is byte-swapped */
#define CE_RECV_FLAG_SWAPPED 1
void ce_enable_msi(struct ol_softc *scn,
unsigned int CE_id,
uint32_t msi_addr_lo,
uint32_t msi_addr_hi,
uint32_t msi_data);
/*
* Supply data for the next completed unprocessed receive descriptor.
*
* For use
* with CE Watermark callback,
* in a recv_cb function when processing buf_lists
* in a recv_cb function in order to mitigate recv_cb's.
*
* Implemenation note: Pops buffer from Dest ring.
*/
int ce_completed_recv_next(struct CE_handle *copyeng,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp);
/*
* Supply data for the next completed unprocessed send descriptor.
*
* For use
* with CE Watermark callback
* in a send_cb function in order to mitigate send_cb's.
*
* Implementation note: Pops 1 completed send buffer from Source ring
*/
int ce_completed_send_next(struct CE_handle *copyeng,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *sw_idx,
unsigned int *hw_idx,
uint32_t *toeplitz_hash_result);
/*==================CE Engine Initialization=================================*/
/* Initialize an instance of a CE */
struct CE_handle *ce_init(struct ol_softc *scn,
unsigned int CE_id, struct CE_attr *attr);
/*==================CE Engine Shutdown=======================================*/
/*
* Support clean shutdown by allowing the caller to revoke
* receive buffers. Target DMA must be stopped before using
* this API.
*/
CDF_STATUS
ce_revoke_recv_next(struct CE_handle *copyeng,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp);
/*
* Support clean shutdown by allowing the caller to cancel
* pending sends. Target DMA must be stopped before using
* this API.
*/
CDF_STATUS
ce_cancel_send_next(struct CE_handle *copyeng,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
uint32_t *toeplitz_hash_result);
void ce_fini(struct CE_handle *copyeng);
/*==================CE Interrupt Handlers====================================*/
void ce_per_engine_service_any(int irq, struct ol_softc *scn);
int ce_per_engine_service(struct ol_softc *scn, unsigned int CE_id);
void ce_per_engine_servicereap(struct ol_softc *scn, unsigned int CE_id);
/*===================CE cmpl interrupt Enable/Disable =======================*/
void ce_disable_any_copy_compl_intr_nolock(struct ol_softc *scn);
void ce_enable_any_copy_compl_intr_nolock(struct ol_softc *scn);
/* API to check if any of the copy engine pipes has
* pending frames for prcoessing
*/
bool ce_get_rx_pending(struct ol_softc *scn);
/* CE_attr.flags values */
#define CE_ATTR_NO_SNOOP 0x01 /* Use NonSnooping PCIe accesses? */
#define CE_ATTR_BYTE_SWAP_DATA 0x02 /* Byte swap data words */
#define CE_ATTR_SWIZZLE_DESCRIPTORS 0x04 /* Swizzle descriptors? */
#define CE_ATTR_DISABLE_INTR 0x08 /* no interrupt on copy completion */
#define CE_ATTR_ENABLE_POLL 0x10 /* poll for residue descriptors */
/* Attributes of an instance of a Copy Engine */
struct CE_attr {
unsigned int flags; /* CE_ATTR_* values */
unsigned int priority; /* TBD */
unsigned int src_nentries; /* #entries in source ring -
* Must be a power of 2 */
unsigned int src_sz_max; /* Max source send size for this CE.
* This is also the minimum size of
* a destination buffer. */
unsigned int dest_nentries; /* #entries in destination ring -
* Must be a power of 2 */
void *reserved; /* Future use */
};
/*
* When using sendlist_send to transfer multiple buffer fragments, the
* transfer context of each fragment, except last one, will be filled
* with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
* each fragment done with send and the transfer context would be
* CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
* status of a send completion.
*/
#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
/*
* This is an opaque type that is at least large enough to hold
* a sendlist. A sendlist can only be accessed through CE APIs,
* but this allows a sendlist to be allocated on the run-time
* stack. TBDXXX: un-opaque would be simpler...
*/
struct ce_sendlist {
unsigned int word[62];
};
#define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half/DPC */
#define ATH_ISR_SCHED 0x0001 /* Schedule the bottom half for execution */
#define ATH_ISR_NOTMINE 0x0002 /* for shared IRQ's */
#ifdef IPA_OFFLOAD
void ce_ipa_get_resource(struct CE_handle *ce,
cdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr);
#else
/**
* ce_ipa_get_resource() - get uc resource on copyengine
* @ce: copyengine context
* @ce_sr_base_paddr: copyengine source ring base physical address
* @ce_sr_ring_size: copyengine source ring size
* @ce_reg_paddr: copyengine register physical address
*
* Copy engine should release resource to micro controller
* Micro controller needs
* - Copy engine source descriptor base address
* - Copy engine source descriptor size
* - PCI BAR address to access copy engine regiser
*
* Return: None
*/
static inline void ce_ipa_get_resource(struct CE_handle *ce,
cdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr)
{
return;
}
#endif /* IPA_OFFLOAD */
static inline void ce_pkt_error_count_incr(
struct HIF_CE_state *_hif_state,
enum ol_ath_hif_pkt_ecodes _hif_ecode)
{
if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
(_hif_state->scn->pkt_stats.hif_pipe_no_resrc_count)
+= 1;
}
bool ce_check_rx_pending(struct ol_softc *scn, int ce_id);
#if defined(FEATURE_LRO)
void ce_lro_flush_cb_register(struct ol_softc *scn,
void (handler)(void *), void *data);
void ce_lro_flush_cb_deregister(struct ol_softc *scn);
#endif
#endif /* __COPY_ENGINE_API_H__ */

View File

@ -1,301 +0,0 @@
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/*
* Implementation of the Host-side Host InterFace (HIF) API
* for a Host/Target interconnect using Copy Engines over PCIe.
*/
#ifndef __HIF_PCI_INTERNAL_H__
#define __HIF_PCI_INTERNAL_H__
#define HIF_PCI_DEBUG ATH_DEBUG_MAKE_MODULE_MASK(0)
#define HIF_PCI_IPA_UC_ASSIGNED_CE 5
#if defined(DEBUG)
static ATH_DEBUG_MASK_DESCRIPTION g_hif_debug_description[] = {
{HIF_PCI_DEBUG, "hif_pci"},
};
ATH_DEBUG_INSTANTIATE_MODULE_VAR(hif, "hif", "PCIe Host Interface",
ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_INFO,
ATH_DEBUG_DESCRIPTION_COUNT
(g_hif_debug_description),
g_hif_debug_description);
#endif
#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
spinlock_t pcie_access_log_lock;
unsigned int pcie_access_log_seqnum = 0;
HIF_ACCESS_LOG pcie_access_log[PCIE_ACCESS_LOG_NUM];
static void hif_target_dump_access_log(void);
#endif
/*
* Host software's Copy Engine configuration.
* This table is derived from the CE_PCI TABLE, above.
*/
#ifdef BIG_ENDIAN_HOST
#define CE_ATTR_FLAGS CE_ATTR_BYTE_SWAP_DATA
#else
#define CE_ATTR_FLAGS 0
#endif
/* Maximum number of Copy Engine's supported */
#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
#define DIAG_CE_ID 7
#define EPPING_CE_FLAGS_POLL \
(CE_ATTR_DISABLE_INTR|CE_ATTR_ENABLE_POLL|CE_ATTR_FLAGS)
#ifdef QCA_WIFI_3_0
static struct CE_attr host_ce_config_wlan[] = {
/* host->target HTC control and raw streams */
{ /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,},
/* target->host HTT + HTC control */
{ /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,},
/* target->host WMI */
{ /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
/* host->target WMI */
{ /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
/* host->target HTT */
{ /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,
CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
/* ipa_uc->target HTC control */
{ /* CE5 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,
1024, 512, 0, NULL,},
/* Target autonomous HIF_memcpy */
{ /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
/* ce_diag, the Diagnostic Window */
{ /* CE7 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,
2, DIAG_TRANSFER_LIMIT, 2, NULL,},
/* Target to uMC */
{ /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
/*The following CEs are not being used yet */
{ /* CE9 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
{ /* CE10 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
{ /* CE11 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
};
static struct CE_pipe_config target_ce_config_wlan[] = {
/* host->target HTC control and raw streams */
{ /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
/* target->host HTT */
{ /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
/* target->host WMI + HTC control */
{ /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
/* host->target WMI */
{ /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
/* host->target HTT */
{ /* CE4 */ 4, PIPEDIR_OUT, 256, 256,
(CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,},
/* NB: 50% of src nentries, since tx has 2 frags */
/* ipa_uc->target */
{ /* CE5 */ 5, PIPEDIR_OUT, 1024, 64,
(CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,},
/* Reserved for target autonomous HIF_memcpy */
{ /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
/* CE7 used only by Host */
{ /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0,
(CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,},
/* CE8 used only by IPA */
{ /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
/*The following CEs are not being used yet*/
{ /* CE9 */ 9, PIPEDIR_IN, 0, 0, CE_ATTR_FLAGS, 0,},
{ /* CE10 */ 9, PIPEDIR_IN, 0, 0, CE_ATTR_FLAGS, 0,},
{ /* CE11 */ 9, PIPEDIR_IN, 0, 0, CE_ATTR_FLAGS, 0,},
};
static struct CE_attr host_ce_config_wlan_epping_poll[] = {
/* host->target HTC control and raw streams */
{ /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,},
/* target->host EP-ping */
{ /* CE1 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 128, NULL,},
/* target->host EP-ping */
{ /* CE2 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 128, NULL,},
/* host->target EP-ping */
{ /* CE3 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,},
/* host->target EP-ping */
{ /* CE4 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,},
/* EP-ping heartbeat */
{ /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,},
/* unused */
{ /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
/* ce_diag, the Diagnostic Window */
{ /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
};
static struct CE_attr host_ce_config_wlan_epping_irq[] = {
/* host->target HTC control and raw streams */
{ /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,},
/* target->host EP-ping */
{ /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,},
/* target->host EP-ping */
{ /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,},
/* host->target EP-ping */
{ /* CE3 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,},
/* host->target EP-ping */
{ /* CE4 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,},
/* EP-ping heartbeat */
{ /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,},
/* unused */
{ /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
/* ce_diag, the Diagnostic Window */
{ /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
};
/*
* EP-ping firmware's CE configuration
*/
static struct CE_pipe_config target_ce_config_wlan_epping[] = {
/* host->target HTC control and raw streams */
{ /* CE0 */ 0, PIPEDIR_OUT, 16, 2048, CE_ATTR_FLAGS, 0,},
/* target->host EP-ping */
{ /* CE1 */ 1, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,},
/* target->host EP-ping */
{ /* CE2 */ 2, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,},
/* host->target EP-ping */
{ /* CE3 */ 3, PIPEDIR_OUT, 128, 2048, CE_ATTR_FLAGS, 0,},
/* host->target EP-ping */
{ /* CE4 */ 4, PIPEDIR_OUT, 128, 2048, CE_ATTR_FLAGS, 0,},
/* EP-ping heartbeat */
{ /* CE5 */ 5, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,},
/* unused */
{ /* CE6 */ 6, PIPEDIR_INOUT, 0, 0, CE_ATTR_FLAGS, 0,},
/* CE7 used only by Host */
{ /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, 0, 0,},
/* CE8 used only by IPA */
{ /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}
};
#else
static struct CE_attr host_ce_config_wlan[] = {
/* host->target HTC control and raw streams */
{ /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
/* target->host HTT + HTC control */
{ /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,},
/* target->host WMI */
{ /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
/* host->target WMI */
{ /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
/* host->target HTT */
{ /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0,
CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
/* ipa_uc->target HTC control */
{ /* CE5 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0,
1024, 512, 0, NULL,},
/* Target autonomous HIF_memcpy */
{ /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
/* ce_diag, the Diagnostic Window */
{ /* CE7 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR,
0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
};
static struct CE_pipe_config target_ce_config_wlan[] = {
/* host->target HTC control and raw streams */
{ /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
/* target->host HTT + HTC control */
{ /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
/* target->host WMI */
{ /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
/* host->target WMI */
{ /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
/* host->target HTT */
{ /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
/* NB: 50% of src nentries, since tx has 2 frags */
/* ipa_uc->target HTC control */
{ /* CE5 */ 5, PIPEDIR_OUT, 1024, 64, CE_ATTR_FLAGS, 0,},
/* Reserved for target autonomous HIF_memcpy */
{ /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
/* CE7 used only by Host */
{ /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, 0, 0,},
/* CE8 used only by IPA */
{ /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}
};
static struct CE_attr host_ce_config_wlan_epping_poll[] = {
/* host->target HTC control and raw streams */
{ /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
/* target->host EP-ping */
{ /* CE1 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 128, NULL,},
/* target->host EP-ping */
{ /* CE2 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 128, NULL,},
/* host->target EP-ping */
{ /* CE3 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,},
/* host->target EP-ping */
{ /* CE4 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,},
/* EP-ping heartbeat */
{ /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,},
/* unused */
{ /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
/* ce_diag, the Diagnostic Window */
{ /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
};
static struct CE_attr host_ce_config_wlan_epping_irq[] = {
/* host->target HTC control and raw streams */
{ /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
/* target->host EP-ping */
{ /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,},
/* target->host EP-ping */
{ /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,},
/* host->target EP-ping */
{ /* CE3 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,},
/* host->target EP-ping */
{ /* CE4 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,},
/* EP-ping heartbeat */
{ /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,},
/* unused */
{ /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
/* ce_diag, the Diagnostic Window */
{ /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
};
/*
* EP-ping firmware's CE configuration
*/
static struct CE_pipe_config target_ce_config_wlan_epping[] = {
/* host->target HTC control and raw streams */
{ /* CE0 */ 0, PIPEDIR_OUT, 16, 256, CE_ATTR_FLAGS, 0,},
/* target->host EP-ping */
{ /* CE1 */ 1, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,},
/* target->host EP-ping */
{ /* CE2 */ 2, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,},
/* host->target EP-ping */
{ /* CE3 */ 3, PIPEDIR_OUT, 128, 2048, CE_ATTR_FLAGS, 0,},
/* host->target EP-ping */
{ /* CE4 */ 4, PIPEDIR_OUT, 128, 2048, CE_ATTR_FLAGS, 0,},
/* EP-ping heartbeat */
{ /* CE5 */ 5, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,},
/* unused */
{ /* CE6 */ 6, PIPEDIR_INOUT, 0, 0, CE_ATTR_FLAGS, 0,},
/* CE7 used only by Host */
{ /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, 0, 0,},
/* CE8 used only by IPA */
{ /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}
};
#endif
static struct CE_attr *host_ce_config = host_ce_config_wlan;
static struct CE_pipe_config *target_ce_config = target_ce_config_wlan;
static int target_ce_config_sz = sizeof(target_ce_config_wlan);
#endif /* __HIF_PCI_INTERNAL_H__ */

View File

@ -1,292 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include <osdep.h>
#include "a_types.h"
#include "athdefs.h"
#include "osapi_linux.h"
#include "targcfg.h"
#include "cdf_lock.h"
#include "cdf_status.h"
#include <cdf_atomic.h> /* cdf_atomic_read */
#include <targaddrs.h>
#include <bmi_msg.h>
#include "hif_io32.h"
#include <hif.h>
#include "regtable.h"
#define ATH_MODULE_NAME hif
#include <a_debug.h>
#include "hif_main.h"
#include "ce_api.h"
#include "cdf_trace.h"
#include "cds_api.h"
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#else
#include "cnss_stub.h"
#endif
#include "epping_main.h"
#include "hif_debug.h"
/* Track a BMI transaction that is in progress */
#ifndef BIT
#define BIT(n) (1 << (n))
#endif
enum {
BMI_REQ_SEND_DONE = BIT(0), /* the bmi tx completion */
BMI_RESP_RECV_DONE = BIT(1), /* the bmi respond is received */
};
struct BMI_transaction {
struct HIF_CE_state *hif_state;
cdf_semaphore_t bmi_transaction_sem;
uint8_t *bmi_request_host; /* Req BMI msg in Host addr space */
cdf_dma_addr_t bmi_request_CE; /* Req BMI msg in CE addr space */
uint32_t bmi_request_length; /* Length of BMI request */
uint8_t *bmi_response_host; /* Rsp BMI msg in Host addr space */
cdf_dma_addr_t bmi_response_CE; /* Rsp BMI msg in CE addr space */
unsigned int bmi_response_length; /* Length of received response */
unsigned int bmi_timeout_ms;
uint32_t bmi_transaction_flags; /* flags for the transcation */
};
/*
* send/recv completion functions for BMI.
* NB: The "net_buf" parameter is actually just a
* straight buffer, not an sk_buff.
*/
void hif_bmi_send_done(struct CE_handle *copyeng, void *ce_context,
void *transfer_context, cdf_dma_addr_t data,
unsigned int nbytes,
unsigned int transfer_id, unsigned int sw_index,
unsigned int hw_index, uint32_t toeplitz_hash_result)
{
struct BMI_transaction *transaction =
(struct BMI_transaction *)transfer_context;
struct ol_softc *scn = transaction->hif_state->scn;
#ifdef BMI_RSP_POLLING
/*
* Fix EV118783, Release a semaphore after sending
* no matter whether a response is been expecting now.
*/
cdf_semaphore_release(scn->cdf_dev,
&transaction->bmi_transaction_sem);
#else
/*
* If a response is anticipated, we'll complete the
* transaction if the response has been received.
* If no response is anticipated, complete the
* transaction now.
*/
transaction->bmi_transaction_flags |= BMI_REQ_SEND_DONE;
/* resp is't needed or has already been received,
* never assume resp comes later then this */
if (!transaction->bmi_response_CE ||
(transaction->bmi_transaction_flags & BMI_RESP_RECV_DONE)) {
cdf_semaphore_release(scn->cdf_dev,
&transaction->bmi_transaction_sem);
}
#endif
}
#ifndef BMI_RSP_POLLING
void hif_bmi_recv_data(struct CE_handle *copyeng, void *ce_context,
void *transfer_context, cdf_dma_addr_t data,
unsigned int nbytes,
unsigned int transfer_id, unsigned int flags)
{
struct BMI_transaction *transaction =
(struct BMI_transaction *)transfer_context;
struct ol_softc *scn = transaction->hif_state->scn;
transaction->bmi_response_length = nbytes;
transaction->bmi_transaction_flags |= BMI_RESP_RECV_DONE;
/* when both send/recv are done, the sem can be released */
if (transaction->bmi_transaction_flags & BMI_REQ_SEND_DONE) {
cdf_semaphore_release(scn->cdf_dev,
&transaction->bmi_transaction_sem);
}
}
#endif
CDF_STATUS hif_exchange_bmi_msg(struct ol_softc *scn,
uint8_t *bmi_request,
uint32_t request_length,
uint8_t *bmi_response,
uint32_t *bmi_response_lengthp, uint32_t TimeoutMS)
{
struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
struct HIF_CE_pipe_info *send_pipe_info =
&(hif_state->pipe_info[BMI_CE_NUM_TO_TARG]);
struct CE_handle *ce_send_hdl = send_pipe_info->ce_hdl;
cdf_dma_addr_t CE_request, CE_response = 0;
struct BMI_transaction *transaction = NULL;
int status = CDF_STATUS_SUCCESS;
struct HIF_CE_pipe_info *recv_pipe_info =
&(hif_state->pipe_info[BMI_CE_NUM_TO_HOST]);
struct CE_handle *ce_recv = recv_pipe_info->ce_hdl;
unsigned int mux_id = 0;
unsigned int transaction_id = 0xffff;
unsigned int user_flags = 0;
#ifdef BMI_RSP_POLLING
cdf_dma_addr_t buf;
unsigned int completed_nbytes, id, flags;
int i;
#endif
transaction =
(struct BMI_transaction *)cdf_mem_malloc(sizeof(*transaction));
if (unlikely(!transaction)) {
HIF_ERROR("%s: no memory", __func__);
return CDF_STATUS_E_NOMEM;
}
transaction_id = (mux_id & MUX_ID_MASK) |
(transaction_id & TRANSACTION_ID_MASK);
#ifdef QCA_WIFI_3_0
user_flags &= DESC_DATA_FLAG_MASK;
#endif
A_TARGET_ACCESS_LIKELY(scn);
/* Initialize bmi_transaction_sem to block */
cdf_semaphore_init(&transaction->bmi_transaction_sem);
cdf_semaphore_acquire(scn->cdf_dev, &transaction->bmi_transaction_sem);
transaction->hif_state = hif_state;
transaction->bmi_request_host = bmi_request;
transaction->bmi_request_length = request_length;
transaction->bmi_response_length = 0;
transaction->bmi_timeout_ms = TimeoutMS;
transaction->bmi_transaction_flags = 0;
/*
* CE_request = dma_map_single(dev,
* (void *)bmi_request, request_length, DMA_TO_DEVICE);
*/
CE_request = scn->bmi_cmd_da;
transaction->bmi_request_CE = CE_request;
if (bmi_response) {
/*
* CE_response = dma_map_single(dev, bmi_response,
* BMI_DATASZ_MAX, DMA_FROM_DEVICE);
*/
CE_response = scn->bmi_rsp_da;
transaction->bmi_response_host = bmi_response;
transaction->bmi_response_CE = CE_response;
/* dma_cache_sync(dev, bmi_response,
BMI_DATASZ_MAX, DMA_FROM_DEVICE); */
cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev,
CE_response,
BMI_DATASZ_MAX,
DMA_FROM_DEVICE);
ce_recv_buf_enqueue(ce_recv, transaction,
transaction->bmi_response_CE);
/* NB: see HIF_BMI_recv_done */
} else {
transaction->bmi_response_host = NULL;
transaction->bmi_response_CE = 0;
}
/* dma_cache_sync(dev, bmi_request, request_length, DMA_TO_DEVICE); */
cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev, CE_request,
request_length, DMA_TO_DEVICE);
status =
ce_send(ce_send_hdl, transaction,
CE_request, request_length,
transaction_id, 0, user_flags);
ASSERT(status == CDF_STATUS_SUCCESS);
/* NB: see hif_bmi_send_done */
/* TBDXXX: handle timeout */
/* Wait for BMI request/response transaction to complete */
/* Always just wait for BMI request here if
* BMI_RSP_POLLING is defined */
while (cdf_semaphore_acquire
(scn->cdf_dev, &transaction->bmi_transaction_sem)) {
/*need some break out condition(time out?) */
}
if (bmi_response) {
#ifdef BMI_RSP_POLLING
/* Fix EV118783, do not wait a semaphore for the BMI response
* since the relative interruption may be lost.
* poll the BMI response instead.
*/
i = 0;
while (ce_completed_recv_next(
ce_recv, NULL, NULL, &buf,
&completed_nbytes, &id,
&flags) != CDF_STATUS_SUCCESS) {
if (i++ > BMI_RSP_TO_MILLISEC) {
HIF_ERROR("%s:error, can't get bmi response\n",
__func__);
status = CDF_STATUS_E_BUSY;
break;
}
OS_DELAY(1000);
}
if ((status == CDF_STATUS_SUCCESS) && bmi_response_lengthp)
*bmi_response_lengthp = completed_nbytes;
#else
if ((status == CDF_STATUS_SUCCESS) && bmi_response_lengthp) {
*bmi_response_lengthp =
transaction->bmi_response_length;
}
#endif
}
/* dma_unmap_single(dev, transaction->bmi_request_CE,
request_length, DMA_TO_DEVICE); */
/* bus_unmap_single(scn->sc_osdev,
transaction->bmi_request_CE,
request_length, BUS_DMA_TODEVICE); */
if (status != CDF_STATUS_SUCCESS) {
cdf_dma_addr_t unused_buffer;
unsigned int unused_nbytes;
unsigned int unused_id;
unsigned int toeplitz_hash_result;
ce_cancel_send_next(ce_send_hdl,
NULL, NULL, &unused_buffer,
&unused_nbytes, &unused_id,
&toeplitz_hash_result);
}
A_TARGET_ACCESS_UNLIKELY(scn);
cdf_mem_free(transaction);
return status;
}

View File

@ -1,45 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __CE_BMI_H__
#define __CE_BMI_H__
#include <cdf_atomic.h> /* cdf_atomic_read */
#include "cdf_lock.h"
#include "ce_api.h"
#include "cepci.h"
void hif_bmi_recv_data(struct CE_handle *copyeng, void *ce_context,
void *transfer_context, cdf_dma_addr_t data,
unsigned int nbytes,
unsigned int transfer_id, unsigned int flags);
void hif_bmi_send_done(struct CE_handle *copyeng, void *ce_context,
void *transfer_context, cdf_dma_addr_t data,
unsigned int nbytes,
unsigned int transfer_id, unsigned int sw_index,
unsigned int hw_index, uint32_t toeplitz_hash_result);
#endif /* __CE_BMI_H__ */

View File

@ -1,456 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include <osdep.h>
#include "a_types.h"
#include "athdefs.h"
#include "osapi_linux.h"
#include "targcfg.h"
#include "cdf_lock.h"
#include "cdf_status.h"
#include <cdf_atomic.h> /* cdf_atomic_read */
#include <targaddrs.h>
#include <bmi_msg.h>
#include "hif_io32.h"
#include <hif.h>
#include <htc_services.h>
#include "regtable.h"
#include <a_debug.h>
#include "hif_main.h"
#include "ce_api.h"
#include "cdf_trace.h"
#include "cds_api.h"
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#endif
#include "hif_debug.h"
#include "epping_main.h"
#include "cds_concurrency.h"
void hif_dump_target_memory(struct ol_softc *scn, void *ramdump_base,
uint32_t address, uint32_t size)
{
uint32_t loc = address;
uint32_t val = 0;
uint32_t j = 0;
u8 *temp = ramdump_base;
A_TARGET_ACCESS_BEGIN(scn);
while (j < size) {
val = hif_read32_mb(scn->mem + loc + j);
cdf_mem_copy(temp, &val, 4);
j += 4;
temp += 4;
}
A_TARGET_ACCESS_END(scn);
}
/*
* TBDXXX: Should be a function call specific to each Target-type.
* This convoluted macro converts from Target CPU Virtual Address
* Space to CE Address Space. As part of this process, we
* conservatively fetch the current PCIE_BAR. MOST of the time,
* this should match the upper bits of PCI space for this device;
* but that's not guaranteed.
*/
#ifdef QCA_WIFI_3_0
#define TARG_CPU_SPACE_TO_CE_SPACE(pci_addr, addr) \
(scn->mem_pa + addr)
#else
#define TARG_CPU_SPACE_TO_CE_SPACE(pci_addr, addr) \
(((hif_read32_mb((pci_addr) + \
(SOC_CORE_BASE_ADDRESS|CORE_CTRL_ADDRESS)) & 0x7ff) << 21) \
| 0x100000 | ((addr) & 0xfffff))
#endif
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10
/*
* Diagnostic read/write access is provided for startup/config/debug usage.
* Caller must guarantee proper alignment, when applicable, and single user
* at any moment.
*/
CDF_STATUS
hif_diag_read_mem(struct ol_softc *scn, uint32_t address, uint8_t *data,
int nbytes)
{
struct HIF_CE_state *hif_state;
CDF_STATUS status = CDF_STATUS_SUCCESS;
cdf_dma_addr_t buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
struct CE_handle *ce_diag;
cdf_dma_addr_t CE_data; /* Host buffer address in CE space */
cdf_dma_addr_t CE_data_base = 0;
void *data_buf = NULL;
int i;
unsigned int mux_id = 0;
unsigned int transaction_id = 0xffff;
cdf_dma_addr_t ce_phy_addr = address;
unsigned int toeplitz_hash_result;
unsigned int user_flags = 0;
hif_state = (struct HIF_CE_state *)scn->hif_hdl;
transaction_id = (mux_id & MUX_ID_MASK) |
(transaction_id & TRANSACTION_ID_MASK);
#ifdef QCA_WIFI_3_0
user_flags &= DESC_DATA_FLAG_MASK;
#endif
/* This code cannot handle reads to non-memory space. Redirect to the
* register read fn but preserve the multi word read capability of
* this fn
*/
if (address < DRAM_BASE_ADDRESS) {
if ((address & 0x3) || ((uintptr_t) data & 0x3))
return CDF_STATUS_E_INVAL;
while ((nbytes >= 4) &&
(CDF_STATUS_SUCCESS == (status =
hif_diag_read_access(scn, address,
(uint32_t *)data)))) {
nbytes -= sizeof(uint32_t);
address += sizeof(uint32_t);
data += sizeof(uint32_t);
}
return status;
}
ce_diag = hif_state->ce_diag;
A_TARGET_ACCESS_LIKELY(scn);
/*
* Allocate a temporary bounce buffer to hold caller's data
* to be DMA'ed from Target. This guarantees
* 1) 4-byte alignment
* 2) Buffer in DMA-able space
*/
orig_nbytes = nbytes;
data_buf = cdf_os_mem_alloc_consistent(scn->cdf_dev,
orig_nbytes, &CE_data_base, 0);
if (!data_buf) {
status = CDF_STATUS_E_NOMEM;
goto done;
}
cdf_mem_set(data_buf, orig_nbytes, 0);
cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev, CE_data_base,
orig_nbytes, DMA_FROM_DEVICE);
remaining_bytes = orig_nbytes;
CE_data = CE_data_base;
while (remaining_bytes) {
nbytes = min(remaining_bytes, DIAG_TRANSFER_LIMIT);
{
status = ce_recv_buf_enqueue(ce_diag, NULL, CE_data);
if (status != CDF_STATUS_SUCCESS)
goto done;
}
{ /* Request CE to send from Target(!)
* address to Host buffer */
/*
* The address supplied by the caller is in the
* Target CPU virtual address space.
*
* In order to use this address with the diagnostic CE,
* convert it from
* Target CPU virtual address space
* to
* CE address space
*/
A_TARGET_ACCESS_BEGIN_RET(scn);
ce_phy_addr =
TARG_CPU_SPACE_TO_CE_SPACE(scn->mem, address);
A_TARGET_ACCESS_END_RET(scn);
status =
ce_send(ce_diag, NULL, ce_phy_addr, nbytes,
transaction_id, 0, user_flags);
if (status != CDF_STATUS_SUCCESS)
goto done;
}
i = 0;
while (ce_completed_send_next(ce_diag, NULL, NULL, &buf,
&completed_nbytes, &id, NULL, NULL,
&toeplitz_hash_result) != CDF_STATUS_SUCCESS) {
cdf_mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
status = CDF_STATUS_E_BUSY;
goto done;
}
}
if (nbytes != completed_nbytes) {
status = CDF_STATUS_E_FAILURE;
goto done;
}
if (buf != ce_phy_addr) {
status = CDF_STATUS_E_FAILURE;
goto done;
}
i = 0;
while (ce_completed_recv_next
(ce_diag, NULL, NULL, &buf,
&completed_nbytes, &id,
&flags) != CDF_STATUS_SUCCESS) {
cdf_mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
status = CDF_STATUS_E_BUSY;
goto done;
}
}
if (nbytes != completed_nbytes) {
status = CDF_STATUS_E_FAILURE;
goto done;
}
if (buf != CE_data) {
status = CDF_STATUS_E_FAILURE;
goto done;
}
remaining_bytes -= nbytes;
address += nbytes;
CE_data += nbytes;
}
done:
A_TARGET_ACCESS_UNLIKELY(scn);
if (status == CDF_STATUS_SUCCESS)
cdf_mem_copy(data, data_buf, orig_nbytes);
else
HIF_ERROR("%s failure (0x%x)", __func__, address);
if (data_buf)
cdf_os_mem_free_consistent(scn->cdf_dev, orig_nbytes,
data_buf, CE_data_base, 0);
return status;
}
/* Read 4-byte aligned data from Target memory or register */
CDF_STATUS hif_diag_read_access(struct ol_softc *scn,
uint32_t address, uint32_t *data)
{
struct HIF_CE_state *hif_state;
hif_state = (struct HIF_CE_state *)scn->hif_hdl;
if (address >= DRAM_BASE_ADDRESS) {
/* Assume range doesn't cross this boundary */
return hif_diag_read_mem(scn, address, (uint8_t *) data,
sizeof(uint32_t));
} else {
A_TARGET_ACCESS_BEGIN_RET(scn);
*data = A_TARGET_READ(scn, address);
A_TARGET_ACCESS_END_RET(scn);
return CDF_STATUS_SUCCESS;
}
}
CDF_STATUS hif_diag_write_mem(struct ol_softc *scn,
uint32_t address, uint8_t *data, int nbytes)
{
struct HIF_CE_state *hif_state;
CDF_STATUS status = CDF_STATUS_SUCCESS;
cdf_dma_addr_t buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
struct CE_handle *ce_diag;
void *data_buf = NULL;
cdf_dma_addr_t CE_data; /* Host buffer address in CE space */
cdf_dma_addr_t CE_data_base = 0;
int i;
unsigned int mux_id = 0;
unsigned int transaction_id = 0xffff;
cdf_dma_addr_t ce_phy_addr = address;
unsigned int toeplitz_hash_result;
unsigned int user_flags = 0;
hif_state = (struct HIF_CE_state *)scn->hif_hdl;
ce_diag = hif_state->ce_diag;
transaction_id = (mux_id & MUX_ID_MASK) |
(transaction_id & TRANSACTION_ID_MASK);
#ifdef QCA_WIFI_3_0
user_flags &= DESC_DATA_FLAG_MASK;
#endif
A_TARGET_ACCESS_LIKELY(scn);
/*
* Allocate a temporary bounce buffer to hold caller's data
* to be DMA'ed to Target. This guarantees
* 1) 4-byte alignment
* 2) Buffer in DMA-able space
*/
orig_nbytes = nbytes;
data_buf = cdf_os_mem_alloc_consistent(scn->cdf_dev,
orig_nbytes, &CE_data_base, 0);
if (!data_buf) {
status = A_NO_MEMORY;
goto done;
}
/* Copy caller's data to allocated DMA buf */
cdf_mem_copy(data_buf, data, orig_nbytes);
cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev, CE_data_base,
orig_nbytes, DMA_TO_DEVICE);
/*
* The address supplied by the caller is in the
* Target CPU virtual address space.
*
* In order to use this address with the diagnostic CE,
* convert it from
* Target CPU virtual address space
* to
* CE address space
*/
A_TARGET_ACCESS_BEGIN_RET(scn);
ce_phy_addr = TARG_CPU_SPACE_TO_CE_SPACE(scn->mem, address);
A_TARGET_ACCESS_END_RET(scn);
remaining_bytes = orig_nbytes;
CE_data = CE_data_base;
while (remaining_bytes) {
nbytes = min(remaining_bytes, DIAG_TRANSFER_LIMIT);
{ /* Set up to receive directly into Target(!) address */
status = ce_recv_buf_enqueue(ce_diag,
NULL, ce_phy_addr);
if (status != CDF_STATUS_SUCCESS)
goto done;
}
{
/*
* Request CE to send caller-supplied data that
* was copied to bounce buffer to Target(!) address.
*/
status =
ce_send(ce_diag, NULL,
(cdf_dma_addr_t) CE_data, nbytes,
transaction_id, 0, user_flags);
if (status != CDF_STATUS_SUCCESS)
goto done;
}
i = 0;
while (ce_completed_send_next(ce_diag, NULL, NULL, &buf,
&completed_nbytes, &id,
NULL, NULL, &toeplitz_hash_result) !=
CDF_STATUS_SUCCESS) {
cdf_mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
status = CDF_STATUS_E_BUSY;
goto done;
}
}
if (nbytes != completed_nbytes) {
status = CDF_STATUS_E_FAILURE;
goto done;
}
if (buf != CE_data) {
status = CDF_STATUS_E_FAILURE;
goto done;
}
i = 0;
while (ce_completed_recv_next
(ce_diag, NULL, NULL, &buf,
&completed_nbytes, &id,
&flags) != CDF_STATUS_SUCCESS) {
cdf_mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
status = CDF_STATUS_E_BUSY;
goto done;
}
}
if (nbytes != completed_nbytes) {
status = CDF_STATUS_E_FAILURE;
goto done;
}
if (buf != ce_phy_addr) {
status = CDF_STATUS_E_FAILURE;
goto done;
}
remaining_bytes -= nbytes;
address += nbytes;
CE_data += nbytes;
}
done:
A_TARGET_ACCESS_UNLIKELY(scn);
if (data_buf) {
cdf_os_mem_free_consistent(scn->cdf_dev, orig_nbytes,
data_buf, CE_data_base, 0);
}
if (status != CDF_STATUS_SUCCESS) {
HIF_ERROR("%s failure (0x%llu)", __func__,
(uint64_t)ce_phy_addr);
}
return status;
}
/* Write 4B data to Target memory or register */
CDF_STATUS
hif_diag_write_access(struct ol_softc *scn, uint32_t address, uint32_t data)
{
struct HIF_CE_state *hif_state;
hif_state = (struct HIF_CE_state *)scn->hif_hdl;
if (address >= DRAM_BASE_ADDRESS) {
/* Assume range doesn't cross this boundary */
uint32_t data_buf = data;
return hif_diag_write_mem(scn, address,
(uint8_t *) &data_buf,
sizeof(uint32_t));
} else {
A_TARGET_ACCESS_BEGIN_RET(scn);
A_TARGET_WRITE(scn, address, data);
A_TARGET_ACCESS_END_RET(scn);
return CDF_STATUS_SUCCESS;
}
}

View File

@ -1,365 +0,0 @@
/*
* Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __COPY_ENGINE_INTERNAL_H__
#define __COPY_ENGINE_INTERNAL_H__
#include <hif.h> /* A_TARGET_WRITE */
/* Copy Engine operational state */
enum CE_op_state {
CE_UNUSED,
CE_PAUSED,
CE_RUNNING,
};
enum ol_ath_hif_ce_ecodes {
CE_RING_DELTA_FAIL = 0
};
struct CE_src_desc;
/* Copy Engine Ring internal state */
struct CE_ring_state {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
unsigned int nentries_mask;
/*
* For dest ring, this is the next index to be processed
* by software after it was/is received into.
*
* For src ring, this is the last descriptor that was sent
* and completion processed by software.
*
* Regardless of src or dest ring, this is an invariant
* (modulo ring size):
* write index >= read index >= sw_index
*/
unsigned int sw_index;
unsigned int write_index; /* cached copy */
/*
* For src ring, this is the next index not yet processed by HW.
* This is a cached copy of the real HW index (read index), used
* for avoiding reading the HW index register more often than
* necessary.
* This extends the invariant:
* write index >= read index >= hw_index >= sw_index
*
* For dest ring, this is currently unused.
*/
unsigned int hw_index; /* cached copy */
/* Start of DMA-coherent area reserved for descriptors */
void *base_addr_owner_space_unaligned; /* Host address space */
cdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
/*
* Actual start of descriptors.
* Aligned to descriptor-size boundary.
* Points into reserved DMA-coherent area, above.
*/
void *base_addr_owner_space; /* Host address space */
cdf_dma_addr_t base_addr_CE_space; /* CE address space */
/*
* Start of shadow copy of descriptors, within regular memory.
* Aligned to descriptor-size boundary.
*/
char *shadow_base_unaligned;
struct CE_src_desc *shadow_base;
unsigned int low_water_mark_nentries;
unsigned int high_water_mark_nentries;
void **per_transfer_context;
OS_DMA_MEM_CONTEXT(ce_dmacontext) /* OS Specific DMA context */
};
/* Copy Engine internal state */
struct CE_state {
struct ol_softc *scn;
unsigned int id;
unsigned int attr_flags; /* CE_ATTR_* */
uint32_t ctrl_addr; /* relative to BAR */
enum CE_op_state state;
#ifdef WLAN_FEATURE_FASTPATH
u_int32_t download_len; /* pkt download length for source ring */
#endif /* WLAN_FEATURE_FASTPATH */
ce_send_cb send_cb;
void *send_context;
CE_recv_cb recv_cb;
void *recv_context;
/* misc_cbs - are any callbacks besides send and recv enabled? */
uint8_t misc_cbs;
CE_watermark_cb watermark_cb;
void *wm_context;
/*Record the state of the copy compl interrupt */
int disable_copy_compl_intr;
unsigned int src_sz_max;
struct CE_ring_state *src_ring;
struct CE_ring_state *dest_ring;
atomic_t rx_pending;
cdf_spinlock_t ce_index_lock;
bool force_break; /* Flag to indicate whether to
* break out the DPC context */
unsigned int receive_count; /* count Num Of Receive Buffers
* handled for one interrupt
* DPC routine */
/* epping */
bool timer_inited;
cdf_softirq_timer_t poll_timer;
void (*lro_flush_cb)(void *);
void *lro_data;
};
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
#ifdef QCA_WIFI_3_0
#define HIF_CE_DESC_ADDR_TO_DMA(desc) \
(cdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32)))
#else
#define HIF_CE_DESC_ADDR_TO_DMA(desc) \
(cdf_dma_addr_t)((desc)->buffer_addr)
#endif
#ifdef QCA_WIFI_3_0
struct CE_src_desc {
uint32_t buffer_addr:32;
#if _BYTE_ORDER == _BIG_ENDIAN
uint32_t gather:1,
enable_11h:1,
meta_data_low:2, /* fw_metadata_low */
packet_result_offset:12,
toeplitz_hash_enable:1,
addr_y_search_disable:1,
addr_x_search_disable:1,
misc_int_disable:1,
target_int_disable:1,
host_int_disable:1,
dest_byte_swap:1,
byte_swap:1,
type:2,
tx_classify:1,
buffer_addr_hi:5;
uint32_t meta_data:16, /* fw_metadata_high */
nbytes:16; /* length in register map */
#else
uint32_t buffer_addr_hi:5,
tx_classify:1,
type:2,
byte_swap:1, /* src_byte_swap */
dest_byte_swap:1,
host_int_disable:1,
target_int_disable:1,
misc_int_disable:1,
addr_x_search_disable:1,
addr_y_search_disable:1,
toeplitz_hash_enable:1,
packet_result_offset:12,
meta_data_low:2, /* fw_metadata_low */
enable_11h:1,
gather:1;
uint32_t nbytes:16, /* length in register map */
meta_data:16; /* fw_metadata_high */
#endif
uint32_t toeplitz_hash_result:32;
};
struct CE_dest_desc {
uint32_t buffer_addr:32;
#if _BYTE_ORDER == _BIG_ENDIAN
uint32_t gather:1,
enable_11h:1,
meta_data_low:2, /* fw_metadata_low */
packet_result_offset:12,
toeplitz_hash_enable:1,
addr_y_search_disable:1,
addr_x_search_disable:1,
misc_int_disable:1,
target_int_disable:1,
host_int_disable:1,
byte_swap:1,
src_byte_swap:1,
type:2,
tx_classify:1,
buffer_addr_hi:5;
uint32_t meta_data:16, /* fw_metadata_high */
nbytes:16; /* length in register map */
#else
uint32_t buffer_addr_hi:5,
tx_classify:1,
type:2,
src_byte_swap:1,
byte_swap:1, /* dest_byte_swap */
host_int_disable:1,
target_int_disable:1,
misc_int_disable:1,
addr_x_search_disable:1,
addr_y_search_disable:1,
toeplitz_hash_enable:1,
packet_result_offset:12,
meta_data_low:2, /* fw_metadata_low */
enable_11h:1,
gather:1;
uint32_t nbytes:16, /* length in register map */
meta_data:16; /* fw_metadata_high */
#endif
uint32_t toeplitz_hash_result:32;
};
#else
struct CE_src_desc {
uint32_t buffer_addr;
#if _BYTE_ORDER == _BIG_ENDIAN
uint32_t meta_data:14,
byte_swap:1,
gather:1,
nbytes:16;
#else
uint32_t nbytes:16,
gather:1,
byte_swap:1,
meta_data:14;
#endif
};
struct CE_dest_desc {
uint32_t buffer_addr;
#if _BYTE_ORDER == _BIG_ENDIAN
uint32_t meta_data:14,
byte_swap:1,
gather:1,
nbytes:16;
#else
uint32_t nbytes:16,
gather:1,
byte_swap:1,
meta_data:14;
#endif
};
#endif /* QCA_WIFI_3_0 */
#define CE_SENDLIST_ITEMS_MAX 12
/**
* union ce_desc - unified data type for ce descriptors
*
* Both src and destination descriptors follow the same format.
* They use different data structures for different access symantics.
* Here we provice a unifying data type.
*/
union ce_desc {
struct CE_src_desc src_desc;
struct CE_dest_desc dest_desc;
};
/**
* enum hif_ce_event_type - HIF copy engine event type
* @HIF_RX_DESC_POST: event recorded before updating write index of RX ring.
* @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring.
* @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
* @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
* @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
* @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
* @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
* @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
* @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
* @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
* @HIF_CE_REAP_EXIT: records when we process completion outside of a bh
*/
enum hif_ce_event_type {
HIF_RX_DESC_POST,
HIF_RX_DESC_COMPLETION,
HIF_TX_GATHER_DESC_POST,
HIF_TX_DESC_POST,
HIF_TX_DESC_COMPLETION,
HIF_IRQ_EVENT,
HIF_CE_TASKLET_ENTRY,
HIF_CE_TASKLET_RESCHEDULE,
HIF_CE_TASKLET_EXIT,
HIF_CE_REAP_ENTRY,
HIF_CE_REAP_EXIT,
};
void ce_init_ce_desc_event_log(int ce_id, int size);
void hif_record_ce_desc_event(int ce_id, enum hif_ce_event_type type,
union ce_desc *descriptor, void *memory, int index);
enum ce_sendlist_type_e {
CE_SIMPLE_BUFFER_TYPE,
/* TBDXXX: CE_RX_DESC_LIST, */
};
/*
* There's a public "ce_sendlist" and a private "ce_sendlist_s".
* The former is an opaque structure with sufficient space
* to hold the latter. The latter is the actual structure
* definition and it is only used internally. The opaque version
* of the structure allows callers to allocate an instance on the
* run-time stack without knowing any of the details of the
* structure layout.
*/
struct ce_sendlist_s {
unsigned int num_items;
struct ce_sendlist_item {
enum ce_sendlist_type_e send_type;
dma_addr_t data; /* e.g. buffer or desc list */
union {
unsigned int nbytes; /* simple buffer */
unsigned int ndesc; /* Rx descriptor list */
} u;
/* flags: externally-specified flags;
* OR-ed with internal flags */
uint32_t flags;
uint32_t user_flags;
} item[CE_SENDLIST_ITEMS_MAX];
};
#ifdef WLAN_FEATURE_FASTPATH
void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
#endif
/* which ring of a CE? */
#define CE_RING_SRC 0
#define CE_RING_DEST 1
#define CDC_WAR_MAGIC_STR 0xceef0000
#define CDC_WAR_DATA_CE 4
/* Additional internal-only ce_send flags */
#define CE_SEND_FLAG_GATHER 0x00010000 /* Use Gather */
#endif /* __COPY_ENGINE_INTERNAL_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,136 +0,0 @@
/*
* Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __CE_H__
#define __CE_H__
#include "cdf_atomic.h"
#include "cdf_lock.h"
#include "hif.h"
#define CE_HTT_T2H_MSG 1
#define CE_HTT_H2T_MSG 4
#define CE_OFFSET 0x00000400
#define CE_USEFUL_SIZE 0x00000058
/**
* enum ce_id_type
*
* @ce_id_type: Copy engine ID
*/
enum ce_id_type {
CE_ID_0,
CE_ID_1,
CE_ID_2,
CE_ID_3,
CE_ID_4,
CE_ID_5,
CE_ID_6,
CE_ID_7,
CE_ID_8,
CE_ID_9,
CE_ID_10,
CE_ID_11,
CE_ID_MAX
};
enum ol_ath_hif_pkt_ecodes {
HIF_PIPE_NO_RESOURCE = 0
};
struct HIF_CE_state;
/* Per-pipe state. */
struct HIF_CE_pipe_info {
/* Handle of underlying Copy Engine */
struct CE_handle *ce_hdl;
/* Our pipe number; facilitiates use of pipe_info ptrs. */
uint8_t pipe_num;
/* Convenience back pointer to HIF_CE_state. */
struct HIF_CE_state *HIF_CE_state;
/* Instantaneous number of receive buffers that should be posted */
atomic_t recv_bufs_needed;
cdf_size_t buf_sz;
cdf_spinlock_t recv_bufs_needed_lock;
cdf_spinlock_t completion_freeq_lock;
/* Limit the number of outstanding send requests. */
int num_sends_allowed;
/* adding three counts for debugging ring buffer errors */
uint32_t nbuf_alloc_err_count;
uint32_t nbuf_dma_err_count;
uint32_t nbuf_ce_enqueue_err_count;
};
/**
* struct ce_tasklet_entry
*
* @intr_tq: intr_tq
* @ce_id: ce_id
* @inited: inited
* @hif_ce_state: hif_ce_state
* @from_irq: from_irq
*/
struct ce_tasklet_entry {
struct tasklet_struct intr_tq;
enum ce_id_type ce_id;
bool inited;
void *hif_ce_state;
};
struct HIF_CE_state {
struct ol_softc *scn;
bool started;
struct ce_tasklet_entry tasklets[CE_COUNT_MAX];
cdf_spinlock_t keep_awake_lock;
unsigned int keep_awake_count;
bool verified_awake;
bool fake_sleep;
cdf_softirq_timer_t sleep_timer;
bool sleep_timer_init;
unsigned long sleep_ticks;
/* Per-pipe state. */
struct HIF_CE_pipe_info pipe_info[CE_COUNT_MAX];
/* to be activated after BMI_DONE */
struct hif_msg_callbacks msg_callbacks_pending;
/* current msg callbacks in use */
struct hif_msg_callbacks msg_callbacks_current;
/* Target address used to signal a pending firmware event */
uint32_t fw_indicator_address;
/* Copy Engine used for Diagnostic Accesses */
struct CE_handle *ce_diag;
};
int hif_dump_ce_registers(struct ol_softc *scn);
#endif /* __CE_H__ */

View File

@ -1,544 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __CE_REG_H__
#define __CE_REG_H__
#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) ((COPY_ENGINE_BASE_ADDRESS \
- CE0_BASE_ADDRESS)/(CE1_BASE_ADDRESS - CE0_BASE_ADDRESS))
#define DST_WR_INDEX_ADDRESS (scn->target_ce_def->d_DST_WR_INDEX_ADDRESS)
#define SRC_WATERMARK_ADDRESS (scn->target_ce_def->d_SRC_WATERMARK_ADDRESS)
#define SRC_WATERMARK_LOW_MASK (scn->target_ce_def->d_SRC_WATERMARK_LOW_MASK)
#define SRC_WATERMARK_HIGH_MASK (scn->target_ce_def->d_SRC_WATERMARK_HIGH_MASK)
#define DST_WATERMARK_LOW_MASK (scn->target_ce_def->d_DST_WATERMARK_LOW_MASK)
#define DST_WATERMARK_HIGH_MASK (scn->target_ce_def->d_DST_WATERMARK_HIGH_MASK)
#define CURRENT_SRRI_ADDRESS (scn->target_ce_def->d_CURRENT_SRRI_ADDRESS)
#define CURRENT_DRRI_ADDRESS (scn->target_ce_def->d_CURRENT_DRRI_ADDRESS)
#define SHADOW_VALUE0 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_0)
#define SHADOW_VALUE1 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_1)
#define SHADOW_VALUE2 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_2)
#define SHADOW_VALUE3 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_3)
#define SHADOW_VALUE4 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_4)
#define SHADOW_VALUE5 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_5)
#define SHADOW_VALUE6 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_6)
#define SHADOW_VALUE7 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_7)
#define SHADOW_VALUE8 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_8)
#define SHADOW_VALUE9 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_9)
#define SHADOW_VALUE10 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_10)
#define SHADOW_VALUE11 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_11)
#define SHADOW_VALUE12 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_12)
#define SHADOW_VALUE13 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_13)
#define SHADOW_VALUE14 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_14)
#define SHADOW_VALUE15 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_15)
#define SHADOW_VALUE16 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_16)
#define SHADOW_VALUE17 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_17)
#define SHADOW_VALUE18 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_18)
#define SHADOW_VALUE19 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_19)
#define SHADOW_VALUE20 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_20)
#define SHADOW_VALUE21 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_21)
#define SHADOW_VALUE22 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_22)
#define SHADOW_VALUE23 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_23)
#define SHADOW_ADDRESS0 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_0)
#define SHADOW_ADDRESS1 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_1)
#define SHADOW_ADDRESS2 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_2)
#define SHADOW_ADDRESS3 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_3)
#define SHADOW_ADDRESS4 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_4)
#define SHADOW_ADDRESS5 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_5)
#define SHADOW_ADDRESS6 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_6)
#define SHADOW_ADDRESS7 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_7)
#define SHADOW_ADDRESS8 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_8)
#define SHADOW_ADDRESS9 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_9)
#define SHADOW_ADDRESS10 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_10)
#define SHADOW_ADDRESS11 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_11)
#define SHADOW_ADDRESS12 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_12)
#define SHADOW_ADDRESS13 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_13)
#define SHADOW_ADDRESS14 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_14)
#define SHADOW_ADDRESS15 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_15)
#define SHADOW_ADDRESS16 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_16)
#define SHADOW_ADDRESS17 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_17)
#define SHADOW_ADDRESS18 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_18)
#define SHADOW_ADDRESS19 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_19)
#define SHADOW_ADDRESS20 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_20)
#define SHADOW_ADDRESS21 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_21)
#define SHADOW_ADDRESS22 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_22)
#define SHADOW_ADDRESS23 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_23)
#define SHADOW_ADDRESS(i) (SHADOW_ADDRESS0 + i*(SHADOW_ADDRESS1-SHADOW_ADDRESS0))
#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK \
(scn->target_ce_def->d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK)
#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK \
(scn->target_ce_def->d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK)
#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK \
(scn->target_ce_def->d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
#define HOST_IS_DST_RING_LOW_WATERMARK_MASK \
(scn->target_ce_def->d_HOST_IS_DST_RING_LOW_WATERMARK_MASK)
#define MISC_IS_ADDRESS (scn->target_ce_def->d_MISC_IS_ADDRESS)
#define HOST_IS_COPY_COMPLETE_MASK \
(scn->target_ce_def->d_HOST_IS_COPY_COMPLETE_MASK)
#define CE_WRAPPER_BASE_ADDRESS (scn->target_ce_def->d_CE_WRAPPER_BASE_ADDRESS)
#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS \
(scn->target_ce_def->d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)
#define CE_DDR_ADDRESS_FOR_RRI_LOW \
(scn->target_ce_def->d_CE_DDR_ADDRESS_FOR_RRI_LOW)
#define CE_DDR_ADDRESS_FOR_RRI_HIGH \
(scn->target_ce_def->d_CE_DDR_ADDRESS_FOR_RRI_HIGH)
#define HOST_IE_COPY_COMPLETE_MASK \
(scn->target_ce_def->d_HOST_IE_COPY_COMPLETE_MASK)
#define SR_BA_ADDRESS (scn->target_ce_def->d_SR_BA_ADDRESS)
#define SR_BA_ADDRESS_HIGH (scn->target_ce_def->d_SR_BA_ADDRESS_HIGH)
#define SR_SIZE_ADDRESS (scn->target_ce_def->d_SR_SIZE_ADDRESS)
#define CE_CTRL1_ADDRESS (scn->target_ce_def->d_CE_CTRL1_ADDRESS)
#define CE_CTRL1_DMAX_LENGTH_MASK \
(scn->target_ce_def->d_CE_CTRL1_DMAX_LENGTH_MASK)
#define DR_BA_ADDRESS (scn->target_ce_def->d_DR_BA_ADDRESS)
#define DR_BA_ADDRESS_HIGH (scn->target_ce_def->d_DR_BA_ADDRESS_HIGH)
#define DR_SIZE_ADDRESS (scn->target_ce_def->d_DR_SIZE_ADDRESS)
#define CE_CMD_REGISTER (scn->target_ce_def->d_CE_CMD_REGISTER)
#define CE_MSI_ADDRESS (scn->target_ce_def->d_CE_MSI_ADDRESS)
#define CE_MSI_ADDRESS_HIGH (scn->target_ce_def->d_CE_MSI_ADDRESS_HIGH)
#define CE_MSI_DATA (scn->target_ce_def->d_CE_MSI_DATA)
#define CE_MSI_ENABLE_BIT (scn->target_ce_def->d_CE_MSI_ENABLE_BIT)
#define MISC_IE_ADDRESS (scn->target_ce_def->d_MISC_IE_ADDRESS)
#define MISC_IS_AXI_ERR_MASK (scn->target_ce_def->d_MISC_IS_AXI_ERR_MASK)
#define MISC_IS_DST_ADDR_ERR_MASK \
(scn->target_ce_def->d_MISC_IS_DST_ADDR_ERR_MASK)
#define MISC_IS_SRC_LEN_ERR_MASK \
(scn->target_ce_def->d_MISC_IS_SRC_LEN_ERR_MASK)
#define MISC_IS_DST_MAX_LEN_VIO_MASK \
(scn->target_ce_def->d_MISC_IS_DST_MAX_LEN_VIO_MASK)
#define MISC_IS_DST_RING_OVERFLOW_MASK \
(scn->target_ce_def->d_MISC_IS_DST_RING_OVERFLOW_MASK)
#define MISC_IS_SRC_RING_OVERFLOW_MASK \
(scn->target_ce_def->d_MISC_IS_SRC_RING_OVERFLOW_MASK)
#define SRC_WATERMARK_LOW_LSB (scn->target_ce_def->d_SRC_WATERMARK_LOW_LSB)
#define SRC_WATERMARK_HIGH_LSB (scn->target_ce_def->d_SRC_WATERMARK_HIGH_LSB)
#define DST_WATERMARK_LOW_LSB (scn->target_ce_def->d_DST_WATERMARK_LOW_LSB)
#define DST_WATERMARK_HIGH_LSB (scn->target_ce_def->d_DST_WATERMARK_HIGH_LSB)
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \
(scn->target_ce_def->d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK)
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
(scn->target_ce_def->d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
#define CE_CTRL1_DMAX_LENGTH_LSB (scn->target_ce_def->d_CE_CTRL1_DMAX_LENGTH_LSB)
#define CE_CTRL1_IDX_UPD_EN (scn->target_ce_def->d_CE_CTRL1_IDX_UPD_EN_MASK)
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK \
(scn->target_ce_def->d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK \
(scn->target_ce_def->d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB \
(scn->target_ce_def->d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB \
(scn->target_ce_def->d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB)
#define WLAN_DEBUG_INPUT_SEL_OFFSET \
(scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_OFFSET)
#define WLAN_DEBUG_INPUT_SEL_SRC_MSB \
(scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_MSB)
#define WLAN_DEBUG_INPUT_SEL_SRC_LSB \
(scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_LSB)
#define WLAN_DEBUG_INPUT_SEL_SRC_MASK \
(scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_MASK)
#define WLAN_DEBUG_CONTROL_OFFSET (scn->targetdef->d_WLAN_DEBUG_CONTROL_OFFSET)
#define WLAN_DEBUG_CONTROL_ENABLE_MSB \
(scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_MSB)
#define WLAN_DEBUG_CONTROL_ENABLE_LSB \
(scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_LSB)
#define WLAN_DEBUG_CONTROL_ENABLE_MASK \
(scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_MASK)
#define WLAN_DEBUG_OUT_OFFSET (scn->targetdef->d_WLAN_DEBUG_OUT_OFFSET)
#define WLAN_DEBUG_OUT_DATA_MSB (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_MSB)
#define WLAN_DEBUG_OUT_DATA_LSB (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_LSB)
#define WLAN_DEBUG_OUT_DATA_MASK (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_MASK)
#define AMBA_DEBUG_BUS_OFFSET (scn->targetdef->d_AMBA_DEBUG_BUS_OFFSET)
#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB \
(scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB)
#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB \
(scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB)
#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK \
(scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK)
#define AMBA_DEBUG_BUS_SEL_MSB (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_MSB)
#define AMBA_DEBUG_BUS_SEL_LSB (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_LSB)
#define AMBA_DEBUG_BUS_SEL_MASK (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_MASK)
#define CE_WRAPPER_DEBUG_OFFSET (scn->target_ce_def->d_CE_WRAPPER_DEBUG_OFFSET)
#define CE_WRAPPER_DEBUG_SEL_MSB (scn->target_ce_def->d_CE_WRAPPER_DEBUG_SEL_MSB)
#define CE_WRAPPER_DEBUG_SEL_LSB (scn->target_ce_def->d_CE_WRAPPER_DEBUG_SEL_LSB)
#define CE_WRAPPER_DEBUG_SEL_MASK (scn->target_ce_def->d_CE_WRAPPER_DEBUG_SEL_MASK)
#define CE_DEBUG_OFFSET (scn->target_ce_def->d_CE_DEBUG_OFFSET)
#define CE_DEBUG_SEL_MSB (scn->target_ce_def->d_CE_DEBUG_SEL_MSB)
#define CE_DEBUG_SEL_LSB (scn->target_ce_def->d_CE_DEBUG_SEL_LSB)
#define CE_DEBUG_SEL_MASK (scn->target_ce_def->d_CE_DEBUG_SEL_MASK)
#define HOST_IE_ADDRESS (scn->target_ce_def->d_HOST_IE_ADDRESS)
#define HOST_IS_ADDRESS (scn->target_ce_def->d_HOST_IS_ADDRESS)
#define SRC_WATERMARK_LOW_SET(x) \
(((x) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
#define SRC_WATERMARK_HIGH_SET(x) \
(((x) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
#define DST_WATERMARK_LOW_SET(x) \
(((x) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
#define DST_WATERMARK_HIGH_SET(x) \
(((x) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
#define CE_CTRL1_DMAX_LENGTH_SET(x) \
(((x) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
(((x) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
(((x) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
#define WLAN_DEBUG_INPUT_SEL_SRC_GET(x) \
(((x) & WLAN_DEBUG_INPUT_SEL_SRC_MASK) >> \
WLAN_DEBUG_INPUT_SEL_SRC_LSB)
#define WLAN_DEBUG_INPUT_SEL_SRC_SET(x) \
(((x) << WLAN_DEBUG_INPUT_SEL_SRC_LSB) & \
WLAN_DEBUG_INPUT_SEL_SRC_MASK)
#define WLAN_DEBUG_CONTROL_ENABLE_GET(x) \
(((x) & WLAN_DEBUG_CONTROL_ENABLE_MASK) >> \
WLAN_DEBUG_CONTROL_ENABLE_LSB)
#define WLAN_DEBUG_CONTROL_ENABLE_SET(x) \
(((x) << WLAN_DEBUG_CONTROL_ENABLE_LSB) & \
WLAN_DEBUG_CONTROL_ENABLE_MASK)
#define WLAN_DEBUG_OUT_DATA_GET(x) \
(((x) & WLAN_DEBUG_OUT_DATA_MASK) >> WLAN_DEBUG_OUT_DATA_LSB)
#define WLAN_DEBUG_OUT_DATA_SET(x) \
(((x) << WLAN_DEBUG_OUT_DATA_LSB) & WLAN_DEBUG_OUT_DATA_MASK)
#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_GET(x) \
(((x) & AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK) >> \
AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB)
#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(x) \
(((x) << AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB) & \
AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK)
#define AMBA_DEBUG_BUS_SEL_GET(x) \
(((x) & AMBA_DEBUG_BUS_SEL_MASK) >> AMBA_DEBUG_BUS_SEL_LSB)
#define AMBA_DEBUG_BUS_SEL_SET(x) \
(((x) << AMBA_DEBUG_BUS_SEL_LSB) & AMBA_DEBUG_BUS_SEL_MASK)
#define CE_WRAPPER_DEBUG_SEL_GET(x) \
(((x) & CE_WRAPPER_DEBUG_SEL_MASK) >> CE_WRAPPER_DEBUG_SEL_LSB)
#define CE_WRAPPER_DEBUG_SEL_SET(x) \
(((x) << CE_WRAPPER_DEBUG_SEL_LSB) & CE_WRAPPER_DEBUG_SEL_MASK)
#define CE_DEBUG_SEL_GET(x) (((x) & CE_DEBUG_SEL_MASK) >> CE_DEBUG_SEL_LSB)
#define CE_DEBUG_SEL_SET(x) (((x) << CE_DEBUG_SEL_LSB) & CE_DEBUG_SEL_MASK)
uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct ol_softc *scn,
uint32_t CE_ctrl_addr);
uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct ol_softc *scn,
uint32_t CE_ctrl_addr);
#define BITS0_TO_31(val) ((uint32_t)((uint64_t)(paddr_rri_on_ddr)\
& (uint64_t)(0xFFFFFFFF)))
#define BITS32_TO_35(val) ((uint32_t)(((uint64_t)(paddr_rri_on_ddr)\
& (uint64_t)(0xF00000000))>>32))
#define VADDR_FOR_CE(scn, CE_ctrl_addr)\
((uint32_t *)((uint64_t)(scn->vaddr_rri_on_ddr) + \
COPY_ENGINE_ID(CE_ctrl_addr)*sizeof(uint32_t)))
#define SRRI_FROM_DDR_ADDR(addr) ((*(addr)) & 0xFFFF)
#define DRRI_FROM_DDR_ADDR(addr) (((*(addr))>>16) & 0xFFFF)
#ifdef ADRASTEA_RRI_ON_DDR
#ifdef SHADOW_REG_DEBUG
#define CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\
DEBUG_CE_SRC_RING_READ_IDX_GET(scn, CE_ctrl_addr)
#define CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\
DEBUG_CE_DEST_RING_READ_IDX_GET(scn, CE_ctrl_addr)
#else
#define CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\
SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr))
#define CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\
DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr))
#endif
unsigned int hif_get_src_ring_read_index(struct ol_softc *scn,
uint32_t CE_ctrl_addr);
unsigned int hif_get_dst_ring_read_index(struct ol_softc *scn,
uint32_t CE_ctrl_addr);
#define CE_SRC_RING_READ_IDX_GET(scn, CE_ctrl_addr)\
hif_get_src_ring_read_index(scn, CE_ctrl_addr)
#define CE_DEST_RING_READ_IDX_GET(scn, CE_ctrl_addr)\
hif_get_dst_ring_read_index(scn, CE_ctrl_addr)
#else
#define CE_SRC_RING_READ_IDX_GET(scn, CE_ctrl_addr) \
A_TARGET_READ(scn, (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS)
#define CE_DEST_RING_READ_IDX_GET(scn, CE_ctrl_addr)\
A_TARGET_READ(scn, (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS)
/**
* if RRI on DDR is not enabled, get idx from ddr defaults to
* using the register value & force wake must be used for
* non interrupt processing.
*/
#define CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\
A_TARGET_READ(scn, (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS)
#endif
#define CE_SRC_RING_BASE_ADDR_SET(scn, CE_ctrl_addr, addr) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + SR_BA_ADDRESS, (addr))
#define CE_SRC_RING_BASE_ADDR_HIGH_SET(scn, CE_ctrl_addr, addr) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + SR_BA_ADDRESS_HIGH, (addr))
#define CE_SRC_RING_BASE_ADDR_HIGH_GET(scn, CE_ctrl_addr) \
A_TARGET_READ(scn, (CE_ctrl_addr) + SR_BA_ADDRESS_HIGH)
#define CE_SRC_RING_SZ_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + SR_SIZE_ADDRESS, (n))
#define CE_SRC_RING_DMAX_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS, \
(A_TARGET_READ(scn, (CE_ctrl_addr) + \
CE_CTRL1_ADDRESS) & ~CE_CTRL1_DMAX_LENGTH_MASK) | \
CE_CTRL1_DMAX_LENGTH_SET(n))
#define CE_IDX_UPD_EN_SET(scn, CE_ctrl_addr) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS, \
(A_TARGET_READ(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS) \
| CE_CTRL1_IDX_UPD_EN))
#define CE_CMD_REGISTER_GET(scn, CE_ctrl_addr) \
A_TARGET_READ(scn, (CE_ctrl_addr) + CE_CMD_REGISTER)
#define CE_CMD_REGISTER_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CMD_REGISTER, n)
#define CE_MSI_ADDR_LOW_SET(scn, CE_ctrl_addr, addr) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_MSI_ADDRESS, (addr))
#define CE_MSI_ADDR_HIGH_SET(scn, CE_ctrl_addr, addr) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_MSI_ADDRESS_HIGH, (addr))
#define CE_MSI_DATA_SET(scn, CE_ctrl_addr, data) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_MSI_DATA, (data))
#define CE_CTRL_REGISTER1_SET(scn, CE_ctrl_addr, val) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS, val)
#define CE_CTRL_REGISTER1_GET(scn, CE_ctrl_addr) \
A_TARGET_READ(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS)
#define CE_SRC_RING_BYTE_SWAP_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS, \
(A_TARGET_READ((targid), \
(CE_ctrl_addr) + CE_CTRL1_ADDRESS) \
& ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) | \
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n))
#define CE_DEST_RING_BYTE_SWAP_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, (CE_ctrl_addr)+CE_CTRL1_ADDRESS, \
(A_TARGET_READ((targid), \
(CE_ctrl_addr) + CE_CTRL1_ADDRESS) \
& ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) | \
CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n))
#define CE_DEST_RING_BASE_ADDR_SET(scn, CE_ctrl_addr, addr) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + DR_BA_ADDRESS, (addr))
#define CE_DEST_RING_BASE_ADDR_HIGH_SET(scn, CE_ctrl_addr, addr) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + DR_BA_ADDRESS_HIGH, (addr))
#define CE_DEST_RING_BASE_ADDR_HIGH_GET(scn, CE_ctrl_addr) \
A_TARGET_READ(scn, (CE_ctrl_addr) + DR_BA_ADDRESS_HIGH)
#define CE_DEST_RING_SZ_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + DR_SIZE_ADDRESS, (n))
#define CE_SRC_RING_HIGHMARK_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + SRC_WATERMARK_ADDRESS, \
(A_TARGET_READ(scn, \
(CE_ctrl_addr) + SRC_WATERMARK_ADDRESS) \
& ~SRC_WATERMARK_HIGH_MASK) | \
SRC_WATERMARK_HIGH_SET(n))
#define CE_SRC_RING_LOWMARK_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + SRC_WATERMARK_ADDRESS, \
(A_TARGET_READ(scn, \
(CE_ctrl_addr) + SRC_WATERMARK_ADDRESS) \
& ~SRC_WATERMARK_LOW_MASK) | \
SRC_WATERMARK_LOW_SET(n))
#define CE_DEST_RING_HIGHMARK_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + DST_WATERMARK_ADDRESS, \
(A_TARGET_READ(scn, \
(CE_ctrl_addr) + DST_WATERMARK_ADDRESS) \
& ~DST_WATERMARK_HIGH_MASK) | \
DST_WATERMARK_HIGH_SET(n))
#define CE_DEST_RING_LOWMARK_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + DST_WATERMARK_ADDRESS, \
(A_TARGET_READ(scn, \
(CE_ctrl_addr) + DST_WATERMARK_ADDRESS) \
& ~DST_WATERMARK_LOW_MASK) | \
DST_WATERMARK_LOW_SET(n))
#define CE_COPY_COMPLETE_INTR_ENABLE(scn, CE_ctrl_addr) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IE_ADDRESS, \
A_TARGET_READ(scn, \
(CE_ctrl_addr) + HOST_IE_ADDRESS) | \
HOST_IE_COPY_COMPLETE_MASK)
#define CE_COPY_COMPLETE_INTR_DISABLE(scn, CE_ctrl_addr) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IE_ADDRESS, \
A_TARGET_READ(scn, \
(CE_ctrl_addr) + HOST_IE_ADDRESS) \
& ~HOST_IE_COPY_COMPLETE_MASK)
#define CE_BASE_ADDRESS(CE_id) \
CE0_BASE_ADDRESS + ((CE1_BASE_ADDRESS - \
CE0_BASE_ADDRESS)*(CE_id))
#define CE_WATERMARK_INTR_ENABLE(scn, CE_ctrl_addr) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IE_ADDRESS, \
A_TARGET_READ(scn, \
(CE_ctrl_addr) + HOST_IE_ADDRESS) | \
CE_WATERMARK_MASK)
#define CE_WATERMARK_INTR_DISABLE(scn, CE_ctrl_addr) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IE_ADDRESS, \
A_TARGET_READ(scn, \
(CE_ctrl_addr) + HOST_IE_ADDRESS) \
& ~CE_WATERMARK_MASK)
#define CE_ERROR_INTR_ENABLE(scn, CE_ctrl_addr) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + MISC_IE_ADDRESS, \
A_TARGET_READ(scn, \
(CE_ctrl_addr) + MISC_IE_ADDRESS) | CE_ERROR_MASK)
#define CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr) \
A_TARGET_READ(scn, (CE_ctrl_addr) + MISC_IS_ADDRESS)
#define CE_ENGINE_INT_STATUS_GET(scn, CE_ctrl_addr) \
A_TARGET_READ(scn, (CE_ctrl_addr) + HOST_IS_ADDRESS)
#define CE_ENGINE_INT_STATUS_CLEAR(scn, CE_ctrl_addr, mask) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IS_ADDRESS, (mask))
#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
MISC_IS_DST_ADDR_ERR_MASK | \
MISC_IS_SRC_LEN_ERR_MASK | \
MISC_IS_DST_MAX_LEN_VIO_MASK | \
MISC_IS_DST_RING_OVERFLOW_MASK | \
MISC_IS_SRC_RING_OVERFLOW_MASK)
#define CE_SRC_RING_TO_DESC(baddr, idx) \
(&(((struct CE_src_desc *)baddr)[idx]))
#define CE_DEST_RING_TO_DESC(baddr, idx) \
(&(((struct CE_dest_desc *)baddr)[idx]))
/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
(((int)(toidx)-(int)(fromidx)) & (nentries_mask))
#define CE_RING_IDX_INCR(nentries_mask, idx) \
(((idx) + 1) & (nentries_mask))
#define CE_RING_IDX_ADD(nentries_mask, idx, num) \
(((idx) + (num)) & (nentries_mask))
#define CE_INTERRUPT_SUMMARY(scn) \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
A_TARGET_READ(scn, CE_WRAPPER_BASE_ADDRESS + \
CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
#define READ_CE_DDR_ADDRESS_FOR_RRI_LOW(scn) \
(A_TARGET_READ(scn, \
CE_WRAPPER_BASE_ADDRESS + CE_DDR_ADDRESS_FOR_RRI_LOW))
#define READ_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn) \
(A_TARGET_READ(scn, \
CE_WRAPPER_BASE_ADDRESS + CE_DDR_ADDRESS_FOR_RRI_HIGH))
#define WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, val) \
(A_TARGET_WRITE(scn, \
CE_WRAPPER_BASE_ADDRESS + CE_DDR_ADDRESS_FOR_RRI_LOW, \
val))
#define WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, val) \
(A_TARGET_WRITE(scn, \
CE_WRAPPER_BASE_ADDRESS + CE_DDR_ADDRESS_FOR_RRI_HIGH, \
val))
/*Macro to increment CE packet errors*/
#define OL_ATH_CE_PKT_ERROR_COUNT_INCR(_scn, _ce_ecode) \
do { if (_ce_ecode == CE_RING_DELTA_FAIL) \
(_scn->pkt_stats.ce_ring_delta_fail_count) \
+= 1; } while (0)
/* Given a Copy Engine's ID, determine the interrupt number for that
* copy engine's interrupts.
*/
#define CE_ID_TO_INUM(id) (A_INUM_CE0_COPY_COMP_BASE + (id))
#define CE_INUM_TO_ID(inum) ((inum) - A_INUM_CE0_COPY_COMP_BASE)
#define CE0_BASE_ADDRESS (scn->target_ce_def->d_CE0_BASE_ADDRESS)
#define CE1_BASE_ADDRESS (scn->target_ce_def->d_CE1_BASE_ADDRESS)
#ifdef ADRASTEA_SHADOW_REGISTERS
#define NUM_SHADOW_REGISTERS 24
u32 shadow_sr_wr_ind_addr(struct ol_softc *scn, u32 ctrl_addr);
u32 shadow_dst_wr_ind_addr(struct ol_softc *scn, u32 ctrl_addr);
#define CE_SRC_RING_WRITE_IDX_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, shadow_sr_wr_ind_addr(scn, CE_ctrl_addr), n)
#define CE_SRC_RING_WRITE_IDX_GET(scn, CE_ctrl_addr) \
A_TARGET_READ(scn, shadow_sr_wr_ind_addr(scn, CE_ctrl_addr))
#define CE_DEST_RING_WRITE_IDX_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, shadow_dst_wr_ind_addr(scn, CE_ctrl_addr), n)
#define CE_DEST_RING_WRITE_IDX_GET(scn, CE_ctrl_addr) \
A_TARGET_READ(scn, shadow_dst_wr_ind_addr(scn, CE_ctrl_addr))
#else
#define CE_SRC_RING_WRITE_IDX_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + SR_WR_INDEX_ADDRESS, (n))
#define CE_SRC_RING_WRITE_IDX_GET(scn, CE_ctrl_addr) \
A_TARGET_READ(scn, (CE_ctrl_addr) + SR_WR_INDEX_ADDRESS)
#define CE_DEST_RING_WRITE_IDX_SET(scn, CE_ctrl_addr, n) \
A_TARGET_WRITE(scn, (CE_ctrl_addr) + DST_WR_INDEX_ADDRESS, (n))
#define CE_DEST_RING_WRITE_IDX_GET(scn, CE_ctrl_addr) \
A_TARGET_READ(scn, (CE_ctrl_addr) + DST_WR_INDEX_ADDRESS)
#endif
#endif /* __CE_REG_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,411 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include <osdep.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/if_arp.h>
#include "a_types.h"
#include "athdefs.h"
#include "cdf_lock.h"
#include "cdf_types.h"
#include "cdf_status.h"
#include "cds_api.h"
#include "regtable.h"
#include "hif.h"
#include "hif_io32.h"
#include "ce_main.h"
#include "ce_api.h"
#include "ce_reg.h"
#include "ce_internal.h"
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#ifdef HIF_PCI
#include "icnss_stub.h"
#else
#include <soc/qcom/icnss.h>
#endif /* HIF_PCI */
#endif
#include "hif_debug.h"
#include "hif_napi.h"
/**
* ce_irq_status() - read CE IRQ status
* @scn: struct ol_softc
* @ce_id: ce_id
* @host_status: host_status
*
* Return: IRQ status
*/
static inline void ce_irq_status(struct ol_softc *scn,
int ce_id, uint32_t *host_status)
{
uint32_t offset = HOST_IS_ADDRESS + CE_BASE_ADDRESS(ce_id);
*host_status = hif_read32_mb(scn->mem + offset);
}
/**
* reschedule_ce_tasklet_work_handler() - reschedule work
* @ce_id: ce_id
*
* Return: N/A
*/
static void reschedule_ce_tasklet_work_handler(int ce_id)
{
struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
struct HIF_CE_state *hif_ce_state;
if (NULL == scn) {
HIF_ERROR("%s: tasklet scn is null", __func__);
return;
}
hif_ce_state = (struct HIF_CE_state *)scn->hif_hdl;
if (scn->hif_init_done == false) {
HIF_ERROR("%s: wlan driver is unloaded", __func__);
return;
}
tasklet_schedule(&hif_ce_state->tasklets[ce_id].intr_tq);
return;
}
/**
* struct tasklet_work
*
* @id: ce_id
* @work: work
*/
struct tasklet_work {
enum ce_id_type id;
struct work_struct work;
};
static struct tasklet_work tasklet_workers[CE_ID_MAX];
static bool work_initialized;
/**
* work_handler() - work_handler
* @work: struct work_struct
*
* Return: N/A
*/
static void work_handler(struct work_struct *work)
{
struct tasklet_work *tmp;
tmp = container_of(work, struct tasklet_work, work);
reschedule_ce_tasklet_work_handler(tmp->id);
}
/**
* init_tasklet_work() - init_tasklet_work
* @work: struct work_struct
* @work_handler: work_handler
*
* Return: N/A
*/
#ifdef CONFIG_CNSS
static void init_tasklet_work(struct work_struct *work,
work_func_t work_handler)
{
cnss_init_work(work, work_handler);
}
#else
static void init_tasklet_work(struct work_struct *work,
work_func_t work_handler)
{
INIT_WORK(work, work_handler);
}
#endif
/**
* init_tasklet_workers() - init_tasklet_workers
*
* Return: N/A
*/
void init_tasklet_workers(void)
{
uint32_t id;
for (id = 0; id < CE_ID_MAX; id++) {
tasklet_workers[id].id = id;
init_tasklet_work(&tasklet_workers[id].work, work_handler);
}
work_initialized = true;
}
#ifdef CONFIG_SLUB_DEBUG_ON
/**
* ce_schedule_tasklet() - schedule ce tasklet
* @tasklet_entry: struct ce_tasklet_entry
*
* Return: N/A
*/
static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
{
if (work_initialized && (tasklet_entry->ce_id < CE_ID_MAX))
schedule_work(&tasklet_workers[tasklet_entry->ce_id].work);
else
HIF_ERROR("%s: work_initialized = %d, ce_id = %d",
__func__, work_initialized, tasklet_entry->ce_id);
}
#else
/**
* ce_schedule_tasklet() - schedule ce tasklet
* @tasklet_entry: struct ce_tasklet_entry
*
* Return: N/A
*/
static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
{
tasklet_schedule(&tasklet_entry->intr_tq);
}
#endif
/**
* ce_tasklet() - ce_tasklet
* @data: data
*
* Return: N/A
*/
static void ce_tasklet(unsigned long data)
{
struct ce_tasklet_entry *tasklet_entry =
(struct ce_tasklet_entry *)data;
struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
struct ol_softc *scn = hif_ce_state->scn;
struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
hif_record_ce_desc_event(tasklet_entry->ce_id, HIF_CE_TASKLET_ENTRY,
NULL, NULL, 0);
if (cdf_atomic_read(&scn->link_suspended)) {
HIF_ERROR("%s: ce %d tasklet fired after link suspend.",
__func__, tasklet_entry->ce_id);
CDF_BUG(0);
}
ce_per_engine_service(scn, tasklet_entry->ce_id);
if (CE_state->lro_flush_cb != NULL) {
CE_state->lro_flush_cb(CE_state->lro_data);
}
if (ce_check_rx_pending(scn, tasklet_entry->ce_id)) {
/*
* There are frames pending, schedule tasklet to process them.
* Enable the interrupt only when there is no pending frames in
* any of the Copy Engine pipes.
*/
hif_record_ce_desc_event(tasklet_entry->ce_id,
HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0);
ce_schedule_tasklet(tasklet_entry);
return;
}
if (scn->target_status != OL_TRGET_STATUS_RESET)
ce_irq_enable(scn, tasklet_entry->ce_id);
hif_record_ce_desc_event(tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
NULL, NULL, 0);
cdf_atomic_dec(&scn->active_tasklet_cnt);
}
/**
* ce_tasklet_init() - ce_tasklet_init
* @hif_ce_state: hif_ce_state
* @mask: mask
*
* Return: N/A
*/
void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
{
int i;
for (i = 0; i < CE_COUNT_MAX; i++) {
if (mask & (1 << i)) {
hif_ce_state->tasklets[i].ce_id = i;
hif_ce_state->tasklets[i].inited = true;
hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
ce_tasklet,
(unsigned long)&hif_ce_state->tasklets[i]);
}
}
}
/**
* ce_tasklet_kill() - ce_tasklet_kill
* @hif_ce_state: hif_ce_state
*
* Return: N/A
*/
void ce_tasklet_kill(struct HIF_CE_state *hif_ce_state)
{
int i;
struct ol_softc *scn = hif_ce_state->scn;
for (i = 0; i < CE_COUNT_MAX; i++)
if (hif_ce_state->tasklets[i].inited) {
tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
hif_ce_state->tasklets[i].inited = false;
}
cdf_atomic_set(&scn->active_tasklet_cnt, 0);
}
/**
* ce_irq_handler() - ce_irq_handler
* @ce_id: ce_id
* @context: context
*
* Return: N/A
*/
static irqreturn_t ce_irq_handler(int irq, void *context)
{
struct ce_tasklet_entry *tasklet_entry = context;
struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
struct ol_softc *scn = hif_ce_state->scn;
uint32_t host_status;
int ce_id = icnss_get_ce_id(irq);
if (tasklet_entry->ce_id != ce_id) {
HIF_ERROR("%s: ce_id (expect %d, received %d) does not match",
__func__, tasklet_entry->ce_id, ce_id);
return IRQ_NONE;
}
if (unlikely(ce_id >= CE_COUNT_MAX)) {
HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d",
__func__, tasklet_entry->ce_id, CE_COUNT_MAX);
return IRQ_NONE;
}
#ifndef HIF_PCI
disable_irq_nosync(irq);
#endif
ce_irq_disable(scn, ce_id);
ce_irq_status(scn, ce_id, &host_status);
cdf_atomic_inc(&scn->active_tasklet_cnt);
hif_record_ce_desc_event(ce_id, HIF_IRQ_EVENT, NULL, NULL, 0);
if (hif_napi_enabled(scn, ce_id))
hif_napi_schedule(scn, ce_id);
else
tasklet_schedule(&tasklet_entry->intr_tq);
return IRQ_HANDLED;
}
/**
* const char *ce_name
*
* @ce_name: ce_name
*/
const char *ce_name[ICNSS_MAX_IRQ_REGISTRATIONS] = {
"WLAN_CE_0",
"WLAN_CE_1",
"WLAN_CE_2",
"WLAN_CE_3",
"WLAN_CE_4",
"WLAN_CE_5",
"WLAN_CE_6",
"WLAN_CE_7",
"WLAN_CE_8",
"WLAN_CE_9",
"WLAN_CE_10",
"WLAN_CE_11",
};
/**
* ce_unregister_irq() - ce_unregister_irq
* @hif_ce_state: hif_ce_state copy engine device handle
* @mask: which coppy engines to unregister for.
*
* Unregisters copy engine irqs matching mask. If a 1 is set at bit x,
* unregister for copy engine x.
*
* Return: CDF_STATUS
*/
CDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
{
int id;
int ret;
if (hif_ce_state == NULL) {
HIF_WARN("%s: hif_ce_state = NULL", __func__);
return CDF_STATUS_SUCCESS;
}
for (id = 0; id < CE_COUNT_MAX; id++) {
if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
ret = icnss_ce_free_irq(id,
&hif_ce_state->tasklets[id]);
if (ret < 0)
HIF_ERROR(
"%s: icnss_unregister_irq error - ce_id = %d, ret = %d",
__func__, id, ret);
}
}
return CDF_STATUS_SUCCESS;
}
/**
* ce_register_irq() - ce_register_irq
* @hif_ce_state: hif_ce_state
* @mask: which coppy engines to unregister for.
*
* Registers copy engine irqs matching mask. If a 1 is set at bit x,
* Register for copy engine x.
*
* Return: CDF_STATUS
*/
CDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
{
int id;
int ret;
unsigned long irqflags = IRQF_TRIGGER_RISING;
uint32_t done_mask = 0;
for (id = 0; id < CE_COUNT_MAX; id++) {
if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
ret = icnss_ce_request_irq(id, ce_irq_handler,
irqflags, ce_name[id],
&hif_ce_state->tasklets[id]);
if (ret) {
HIF_ERROR(
"%s: cannot register CE %d irq handler, ret = %d",
__func__, id, ret);
ce_unregister_irq(hif_ce_state, done_mask);
return CDF_STATUS_E_FAULT;
} else {
done_mask |= 1 << id;
}
}
}
#ifndef HIF_PCI
/* move to hif_configure_irq */
ce_enable_irq_in_group_reg(hif_ce_state->scn, done_mask);
#endif
return CDF_STATUS_SUCCESS;
}

View File

@ -1,36 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __CE_TASKLET_H__
#define __CE_TASKLET_H__
#include "ce_main.h"
void init_tasklet_workers(void);
void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask);
void ce_tasklet_kill(struct HIF_CE_state *hif_ce_state);
CDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask);
CDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask);
#endif /* __CE_TASKLET_H__ */

View File

@ -1,42 +0,0 @@
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __HIF_DEBUG_H__
#define __HIF_DEBUG_H__
#include "cdf_trace.h"
#define HIF_ERROR(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_ERROR, ## args)
#define HIF_WARN(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_WARN, ## args)
#define HIF_INFO(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_INFO, ## args)
#define HIF_INFO_HI(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_INFO_HIGH, ## args)
#define HIF_INFO_MED(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_INFO_MED, ## args)
#define HIF_INFO_LO(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_INFO_LOW, ## args)
#define HIF_TRACE(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_ERROR, ## args)
#define HIF_DBG(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_DEBUG, ## args)
#endif /* __HIF_DEBUG_H__ */

View File

@ -1,93 +0,0 @@
/*
* Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef HIF_HW_VERSION_H
#define HIF_HW_VERSION_H
#define AR6320_REV1_VERSION 0x5000000
#define AR6320_REV1_1_VERSION 0x5000001
#define AR6320_REV1_3_VERSION 0x5000003
#define AR6320_REV2_1_VERSION 0x5010000
#define AR6320_REV3_VERSION 0x5020000
#define AR6320_REV3_2_VERSION 0x5030000
struct qwlan_hw {
u32 id;
u32 subid;
const char *name;
};
static const struct qwlan_hw qwlan_hw_list[] = {
{
.id = AR6320_REV1_VERSION,
.subid = 0,
.name = "QCA6174_REV1",
},
{
.id = AR6320_REV1_1_VERSION,
.subid = 0x1,
.name = "QCA6174_REV1_1",
},
{
.id = AR6320_REV1_3_VERSION,
.subid = 0x2,
.name = "QCA6174_REV1_3",
},
{
.id = AR6320_REV2_1_VERSION,
.subid = 0x4,
.name = "QCA6174_REV2_1",
},
{
.id = AR6320_REV2_1_VERSION,
.subid = 0x5,
.name = "QCA6174_REV2_2",
},
{
.id = AR6320_REV3_VERSION,
.subid = 0x6,
.name = "QCA6174_REV2.3",
},
{
.id = AR6320_REV3_VERSION,
.subid = 0x8,
.name = "QCA6174_REV3",
},
{
.id = AR6320_REV3_VERSION,
.subid = 0x9,
.name = "QCA6174_REV3_1",
},
{
.id = AR6320_REV3_2_VERSION,
.subid = 0xA,
.name = "AR6320_REV3_2_VERSION",
}
};
#endif /* HIF_HW_VERSION_H */

View File

@ -1,39 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __HIF_IO32_H__
#define __HIF_IO32_H__
#include <linux/io.h>
#include "ol_if_athvar.h"
#include "hif.h"
#ifdef HIF_PCI
#include "hif_io32_pci.h"
#else
#include "hif_io32_snoc.h"
#endif /* HIF_PCI */
#endif /* __HIF_IO32_H__ */

View File

@ -1,881 +0,0 @@
/*
* Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include <osdep.h>
#include "a_types.h"
#include "athdefs.h"
#include "osapi_linux.h"
#include "targcfg.h"
#include "cdf_lock.h"
#include "cdf_status.h"
#include <cdf_atomic.h> /* cdf_atomic_read */
#include <targaddrs.h>
#include <bmi_msg.h>
#include "hif_io32.h"
#include <hif.h>
#include <htc_services.h>
#include "regtable.h"
#define ATH_MODULE_NAME hif
#include <a_debug.h>
#include "hif_main.h"
#include "hif_hw_version.h"
#include "ce_api.h"
#include "ce_tasklet.h"
#include "cdf_trace.h"
#include "cdf_status.h"
#include "cds_api.h"
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#endif
#include "epping_main.h"
#include "hif_debug.h"
#include "mp_dev.h"
#ifdef HIF_PCI
#include "icnss_stub.h"
#else
#include <soc/qcom/icnss.h>
#endif
#ifndef REMOVE_PKT_LOG
#include "pktlog_ac.h"
#endif
#include "cds_concurrency.h"
#define AGC_DUMP 1
#define CHANINFO_DUMP 2
#define BB_WATCHDOG_DUMP 3
#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
#define PCIE_ACCESS_DUMP 4
#endif
void hif_dump(struct ol_softc *scn, uint8_t cmd_id, bool start)
{
switch (cmd_id) {
case AGC_DUMP:
if (start)
priv_start_agc(scn);
else
priv_dump_agc(scn);
break;
case CHANINFO_DUMP:
if (start)
priv_start_cap_chaninfo(scn);
else
priv_dump_chaninfo(scn);
break;
case BB_WATCHDOG_DUMP:
priv_dump_bbwatchdog(scn);
break;
#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
case PCIE_ACCESS_DUMP:
hif_target_dump_access_log();
break;
#endif
default:
HIF_ERROR("%s: Invalid htc dump command", __func__);
break;
}
}
/**
* hif_shut_down_device() - hif_shut_down_device
*
* SThis fucntion shuts down the device
*
* @scn: ol_softc
*
* Return: void
*/
void hif_shut_down_device(struct ol_softc *scn)
{
if (scn && scn->hif_hdl) {
struct HIF_CE_state *hif_state =
(struct HIF_CE_state *)scn->hif_hdl;
hif_stop(scn);
cdf_mem_free(hif_state);
scn->hif_hdl = NULL;
}
}
/**
* hif_cancel_deferred_target_sleep() - cancel deferred target sleep
*
* This function cancels the defered target sleep
*
* @scn: ol_softc
*
* Return: void
*/
void hif_cancel_deferred_target_sleep(struct ol_softc *scn)
{
hif_pci_cancel_deferred_target_sleep(scn);
}
/**
* hif_get_target_id(): hif_get_target_id
*
* Return the virtual memory base address to the caller
*
* @scn: ol_softc
*
* Return: A_target_id_t
*/
A_target_id_t hif_get_target_id(struct ol_softc *scn)
{
return scn->mem;
}
/**
* hif_set_target_sleep(): hif_set_target_sleep
* @scn: scn
* @sleep_ok: sleep_ok
* @wait_for_it: wait
*
* Return: void
*/
void hif_set_target_sleep(struct ol_softc *scn,
bool sleep_ok, bool wait_for_it)
{
hif_target_sleep_state_adjust(scn,
sleep_ok, wait_for_it);
}
/**
* hif_target_forced_awake(): hif_target_forced_awake
* @scn: scn
*
* Return: bool
*/
bool hif_target_forced_awake(struct ol_softc *scn)
{
A_target_id_t addr = scn->mem;
bool awake;
bool forced_awake;
awake = hif_targ_is_awake(scn, addr);
forced_awake =
!!(hif_read32_mb
(addr + PCIE_LOCAL_BASE_ADDRESS +
PCIE_SOC_WAKE_ADDRESS) & PCIE_SOC_WAKE_V_MASK);
return awake && forced_awake;
}
static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
{
struct hif_msg_callbacks *msg_callbacks =
&hif_state->msg_callbacks_current;
if (!msg_callbacks->fwEventHandler)
return;
msg_callbacks->fwEventHandler(msg_callbacks->Context,
CDF_STATUS_E_FAILURE);
}
/**
* hif_fw_interrupt_handler(): FW interrupt handler
*
* This function is the FW interrupt handlder
*
* @irq: irq number
* @arg: the user pointer
*
* Return: bool
*/
#ifndef QCA_WIFI_3_0
irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
{
struct ol_softc *scn = arg;
struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
uint32_t fw_indicator_address, fw_indicator;
A_TARGET_ACCESS_BEGIN_RET(scn);
fw_indicator_address = hif_state->fw_indicator_address;
/* For sudden unplug this will return ~0 */
fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
/* ACK: clear Target-side pending event */
A_TARGET_WRITE(scn, fw_indicator_address,
fw_indicator & ~FW_IND_EVENT_PENDING);
A_TARGET_ACCESS_END_RET(scn);
if (hif_state->started) {
hif_fw_event_handler(hif_state);
} else {
/*
* Probable Target failure before we're prepared
* to handle it. Generally unexpected.
*/
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("%s: Early firmware event indicated\n",
__func__));
}
} else {
A_TARGET_ACCESS_END_RET(scn);
}
return ATH_ISR_SCHED;
}
#else
irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
{
return ATH_ISR_SCHED;
}
#endif /* #ifdef QCA_WIFI_3_0 */
/**
* hif_get_targetdef(): hif_get_targetdef
* @scn: scn
*
* Return: void *
*/
void *hif_get_targetdef(struct ol_softc *scn)
{
return scn->targetdef;
}
/**
* hif_vote_link_down(): unvote for link up
*
* Call hif_vote_link_down to release a previous request made using
* hif_vote_link_up. A hif_vote_link_down call should only be made
* after a corresponding hif_vote_link_up, otherwise you could be
* negating a vote from another source. When no votes are present
* hif will not guarantee the linkstate after hif_bus_suspend.
*
* SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
* and initialization deinitialization sequencences.
*
* Return: n/a
*/
void hif_vote_link_down(void)
{
struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
CDF_BUG(scn);
scn->linkstate_vote--;
if (scn->linkstate_vote == 0)
hif_bus_prevent_linkdown(scn, false);
}
/**
* hif_vote_link_up(): vote to prevent bus from suspending
*
* Makes hif guarantee that fw can message the host normally
* durring suspend.
*
* SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
* and initialization deinitialization sequencences.
*
* Return: n/a
*/
void hif_vote_link_up(void)
{
struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
CDF_BUG(scn);
scn->linkstate_vote++;
if (scn->linkstate_vote == 1)
hif_bus_prevent_linkdown(scn, true);
}
/**
* hif_can_suspend_link(): query if hif is permitted to suspend the link
*
* Hif will ensure that the link won't be suspended if the upperlayers
* don't want it to.
*
* SYNCHRONIZATION: MC thread is stopped before bus suspend thus
* we don't need extra locking to ensure votes dont change while
* we are in the process of suspending or resuming.
*
* Return: false if hif will guarantee link up durring suspend.
*/
bool hif_can_suspend_link(void)
{
struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
CDF_BUG(scn);
return scn->linkstate_vote == 0;
}
/**
* hif_hia_item_address(): hif_hia_item_address
* @target_type: target_type
* @item_offset: item_offset
*
* Return: n/a
*/
uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
{
switch (target_type) {
case TARGET_TYPE_AR6002:
return AR6002_HOST_INTEREST_ADDRESS + item_offset;
case TARGET_TYPE_AR6003:
return AR6003_HOST_INTEREST_ADDRESS + item_offset;
case TARGET_TYPE_AR6004:
return AR6004_HOST_INTEREST_ADDRESS + item_offset;
case TARGET_TYPE_AR6006:
return AR6006_HOST_INTEREST_ADDRESS + item_offset;
case TARGET_TYPE_AR9888:
return AR9888_HOST_INTEREST_ADDRESS + item_offset;
case TARGET_TYPE_AR6320:
case TARGET_TYPE_AR6320V2:
return AR6320_HOST_INTEREST_ADDRESS + item_offset;
case TARGET_TYPE_QCA6180:
return QCA6180_HOST_INTEREST_ADDRESS + item_offset;
case TARGET_TYPE_ADRASTEA:
/* ADRASTEA doesn't have a host interest address */
ASSERT(0);
return 0;
default:
ASSERT(0);
return 0;
}
}
/**
* hif_max_num_receives_reached() - check max receive is reached
* @count: unsigned int.
*
* Output check status as bool
*
* Return: bool
*/
bool hif_max_num_receives_reached(unsigned int count)
{
if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()))
return count > 120;
else
return count > MAX_NUM_OF_RECEIVES;
}
/**
* init_buffer_count() - initial buffer count
* @maxSize: cdf_size_t
*
* routine to modify the initial buffer count to be allocated on an os
* platform basis. Platform owner will need to modify this as needed
*
* Return: cdf_size_t
*/
cdf_size_t init_buffer_count(cdf_size_t maxSize)
{
return maxSize;
}
/**
* hif_init_cdf_ctx(): hif_init_cdf_ctx
* @hif_ctx: hif_ctx
*
* Return: int
*/
int hif_init_cdf_ctx(void *hif_ctx)
{
cdf_device_t cdf_ctx;
struct ol_softc *scn = (struct ol_softc *)hif_ctx;
cdf_ctx = cds_get_context(CDF_MODULE_ID_CDF_DEVICE);
if (!cdf_ctx) {
HIF_ERROR("%s: CDF is NULL", __func__);
return -ENOMEM;
}
cdf_ctx->drv = &scn->aps_osdev;
cdf_ctx->drv_hdl = scn->aps_osdev.bdev;
cdf_ctx->dev = scn->aps_osdev.device;
scn->cdf_dev = cdf_ctx;
return 0;
}
/**
* hif_deinit_cdf_ctx(): hif_deinit_cdf_ctx
* @hif_ctx: hif_ctx
*
* Return: void
*/
void hif_deinit_cdf_ctx(void *hif_ctx)
{
struct ol_softc *scn = (struct ol_softc *)hif_ctx;
if (scn == NULL || !scn->cdf_dev)
return;
scn->cdf_dev = NULL;
}
/**
* hif_save_htc_htt_config_endpoint():
* hif_save_htc_htt_config_endpoint
* @htc_endpoint: htc_endpoint
*
* Return: void
*/
void hif_save_htc_htt_config_endpoint(int htc_endpoint)
{
struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
if (!scn) {
HIF_ERROR("%s: error: scn or scn->hif_sc is NULL!",
__func__);
return;
}
scn->htc_endpoint = htc_endpoint;
}
/**
* hif_get_hw_name(): get a human readable name for the hardware
*
* Return: human readible name for the underlying wifi hardware.
*/
const char *hif_get_hw_name(struct ol_softc *scn)
{
int i;
for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
if (scn->target_version == qwlan_hw_list[i].id &&
scn->target_revision == qwlan_hw_list[i].subid) {
return qwlan_hw_list[i].name;
}
}
return "Unknown Device";
}
/**
* hif_get_hw_info(): hif_get_hw_info
* @scn: scn
* @version: version
* @revision: revision
*
* Return: n/a
*/
void hif_get_hw_info(void *scn, u32 *version, u32 *revision,
const char **target_name)
{
*version = ((struct ol_softc *)scn)->target_version;
*revision = ((struct ol_softc *)scn)->target_revision;
*target_name = hif_get_hw_name((struct ol_softc *)scn);
}
/**
* hif_set_fw_info(): set the target_fw_version
* @scn: scn
* @target_fw_version: target_fw_version
*
* Return: n/a
*/
void hif_set_fw_info(void *scn, uint32_t target_fw_version)
{
((struct ol_softc *)scn)->target_fw_version = target_fw_version;
}
/**
* hif_open(): hif_open
*
* Return: scn
*/
CDF_STATUS hif_open(enum ath_hal_bus_type bus_type)
{
struct ol_softc *scn;
v_CONTEXT_t cds_context;
CDF_STATUS status = CDF_STATUS_SUCCESS;
cds_context = cds_get_global_context();
status = cds_alloc_context(cds_context, CDF_MODULE_ID_HIF,
(void **)&scn, sizeof(*scn));
if (status != CDF_STATUS_SUCCESS) {
HIF_ERROR("%s: cannot alloc ol_sc", __func__);
return status;
}
cdf_mem_zero(scn, sizeof(*scn));
scn->enableuartprint = 0;
scn->enablefwlog = 0;
scn->max_no_of_peers = 1;
scn->pkt_log_init = false;
cdf_atomic_init(&scn->wow_done);
cdf_atomic_init(&scn->active_tasklet_cnt);
cdf_atomic_init(&scn->link_suspended);
cdf_atomic_init(&scn->tasklet_from_intr);
init_waitqueue_head(&scn->aps_osdev.event_queue);
scn->linkstate_vote = 0;
status = hif_bus_open(scn, bus_type);
if (status != CDF_STATUS_SUCCESS) {
HIF_ERROR("%s: hif_bus_open error = %d, bus_type = %d",
__func__, status, bus_type);
cds_free_context(cds_context, CDF_MODULE_ID_HIF, scn);
}
return status;
}
/**
* hif_close(): hif_close
* @hif_ctx: hif_ctx
*
* Return: n/a
*/
void hif_close(void *hif_ctx)
{
struct ol_softc *scn = hif_ctx;
if (scn == NULL) {
HIF_ERROR("%s: ol_softc is NULL", __func__);
return;
}
if (scn->athdiag_procfs_inited) {
athdiag_procfs_remove();
scn->athdiag_procfs_inited = false;
}
if (scn->hif_hdl) {
cdf_mem_free(scn->hif_hdl);
scn->hif_hdl = NULL;
}
hif_bus_close(scn);
cds_free_context(cds_get_global_context(),
CDF_MODULE_ID_HIF, hif_ctx);
}
/**
* hif_enable(): hif_enable
* @hif_ctx: hif_ctx
* @dev: dev
* @bdev: bus dev
* @bid: bus ID
* @bus_type: bus type
* @type: enable type
*
* Return: CDF_STATUS
*/
CDF_STATUS hif_enable(void *hif_ctx, struct device *dev,
void *bdev, const hif_bus_id *bid,
enum ath_hal_bus_type bus_type,
enum hif_enable_type type)
{
CDF_STATUS status;
struct ol_softc *scn = hif_ctx;
if (scn == NULL) {
HIF_ERROR("%s: hif_ctx = NULL", __func__);
return CDF_STATUS_E_NULL_VALUE;
}
status = hif_enable_bus(scn, dev, bdev, bid, type);
if (status != CDF_STATUS_SUCCESS) {
HIF_ERROR("%s: hif_enable_bus error = %d",
__func__, status);
return status;
}
if (ADRASTEA_BU)
hif_vote_link_up();
if (hif_config_ce(scn)) {
HIF_ERROR("%s: Target probe failed.", __func__);
hif_disable_bus(scn->aps_osdev.bdev);
status = CDF_STATUS_E_FAILURE;
return status;
}
/*
* Flag to avoid potential unallocated memory access from MSI
* interrupt handler which could get scheduled as soon as MSI
* is enabled, i.e to take care of the race due to the order
* in where MSI is enabled before the memory, that will be
* in interrupt handlers, is allocated.
*/
#ifdef HIF_PCI
status = hif_configure_irq(scn->hif_sc);
if (status < 0) {
HIF_ERROR("%s: ERROR - configure_IRQ_and_CE failed, status = %d",
__func__, status);
return CDF_STATUS_E_FAILURE;
}
#endif
scn->hif_init_done = true;
HIF_TRACE("%s: X OK", __func__);
return CDF_STATUS_SUCCESS;
}
/**
* hif_pktlogmod_exit(): hif_pktlogmod_exit
* @scn: scn
*
* Return: n/a
*/
#ifndef REMOVE_PKT_LOG
void hif_pktlogmod_exit(void *hif_ctx)
{
struct ol_softc *scn = hif_ctx;
if (scn && cds_get_conparam() != CDF_GLOBAL_FTM_MODE &&
!WLAN_IS_EPPING_ENABLED(cds_get_conparam()) && scn->pkt_log_init) {
pktlogmod_exit(scn);
scn->pkt_log_init = false;
}
}
#else
void hif_pktlogmod_exit(void *hif_ctx)
{
}
#endif
/**
* hif_wlan_disable(): call the platform driver to disable wlan
*
* This function passes the con_mode to platform driver to disable
* wlan.
*
* Return: void
*/
void hif_wlan_disable(void)
{
enum icnss_driver_mode mode;
uint32_t con_mode = cds_get_conparam();
if (CDF_GLOBAL_FTM_MODE == con_mode)
mode = ICNSS_FTM;
else if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()))
mode = ICNSS_EPPING;
else
mode = ICNSS_MISSION;
icnss_wlan_disable(mode);
}
void hif_disable(void *hif_ctx, enum hif_disable_type type)
{
struct ol_softc *scn = hif_ctx;
if (!scn)
return;
hif_nointrs(scn);
if (scn->hif_init_done == false)
hif_shut_down_device(scn);
else
hif_stop(scn);
if (ADRASTEA_BU)
hif_vote_link_down();
if (scn->aps_osdev.bdev)
hif_disable_bus(scn->aps_osdev.bdev);
hif_wlan_disable();
scn->notice_send = false;
HIF_INFO("%s: X", __func__);
}
/**
* hif_crash_shutdown_dump_bus_register() - dump bus registers
* @hif_ctx: hif_ctx
*
* Return: n/a
*/
#if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) \
&& defined(HIF_PCI) && defined(DEBUG)
static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
{
struct ol_softc *scn = hif_ctx;
if (hif_check_soc_status(scn))
return;
if (hif_dump_registers(scn))
HIF_ERROR("Failed to dump bus registers!");
}
/**
* hif_crash_shutdown(): hif_crash_shutdown
*
* This function is called by the platform driver to dump CE registers
*
* @hif_ctx: hif_ctx
*
* Return: n/a
*/
void hif_crash_shutdown(void *hif_ctx)
{
struct ol_softc *scn = hif_ctx;
struct HIF_CE_state *hif_state;
if (!scn)
return;
hif_state = (struct HIF_CE_state *)scn->hif_hdl;
if (!hif_state)
return;
if (OL_TRGET_STATUS_RESET == scn->target_status) {
HIF_INFO_MED("%s: Target is already asserted, ignore!",
__func__);
return;
}
if (cds_is_load_or_unload_in_progress()) {
HIF_ERROR("%s: Load/unload is in progress, ignore!", __func__);
return;
}
hif_crash_shutdown_dump_bus_register(hif_ctx);
if (ol_copy_ramdump(scn))
goto out;
HIF_INFO_MED("%s: RAM dump collecting completed!", __func__);
out:
return;
}
#else
void hif_crash_shutdown(void *hif_ctx)
{
HIF_INFO_MED("%s: Collecting target RAM dump disabled",
__func__);
return;
}
#endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
#ifdef QCA_WIFI_3_0
/**
* hif_check_fw_reg(): hif_check_fw_reg
* @scn: scn
* @state:
*
* Return: int
*/
int hif_check_fw_reg(struct ol_softc *scn)
{
return 0;
}
#endif
#ifdef IPA_OFFLOAD
/**
* hif_read_phy_mem_base(): hif_read_phy_mem_base
* @scn: scn
* @phy_mem_base: physical mem base
*
* Return: n/a
*/
void hif_read_phy_mem_base(struct ol_softc *scn, cdf_dma_addr_t *phy_mem_base)
{
*phy_mem_base = scn->mem_pa;
}
#endif /* IPA_OFFLOAD */
/**
* hif_get_device_type(): hif_get_device_type
* @device_id: device_id
* @revision_id: revision_id
* @hif_type: returned hif_type
* @target_type: returned target_type
*
* Return: int
*/
int hif_get_device_type(uint32_t device_id,
uint32_t revision_id,
uint32_t *hif_type, uint32_t *target_type)
{
int ret = 0;
switch (device_id) {
#ifdef QCA_WIFI_3_0_ADRASTEA
case ADRASTEA_DEVICE_ID:
case ADRASTEA_DEVICE_ID_P2_E12:
*hif_type = HIF_TYPE_ADRASTEA;
*target_type = TARGET_TYPE_ADRASTEA;
break;
#else
case QCA6180_DEVICE_ID:
*hif_type = HIF_TYPE_QCA6180;
*target_type = TARGET_TYPE_QCA6180;
break;
#endif
case AR9888_DEVICE_ID:
*hif_type = HIF_TYPE_AR9888;
*target_type = TARGET_TYPE_AR9888;
break;
case AR6320_DEVICE_ID:
switch (revision_id) {
case AR6320_FW_1_1:
case AR6320_FW_1_3:
*hif_type = HIF_TYPE_AR6320;
*target_type = TARGET_TYPE_AR6320;
break;
case AR6320_FW_2_0:
case AR6320_FW_3_0:
case AR6320_FW_3_2:
*hif_type = HIF_TYPE_AR6320V2;
*target_type = TARGET_TYPE_AR6320V2;
break;
default:
HIF_ERROR("%s: error - dev_id = 0x%x, rev_id = 0x%x",
__func__, device_id, revision_id);
ret = -ENODEV;
goto end;
}
break;
default:
HIF_ERROR("%s: Unsupported device ID!", __func__);
ret = -ENODEV;
break;
}
end:
return ret;
}

View File

@ -1,136 +0,0 @@
/*
* Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/*
* NB: Inappropriate references to "HTC" are used in this (and other)
* HIF implementations. HTC is typically the calling layer, but it
* theoretically could be some alternative.
*/
/*
* This holds all state needed to process a pending send/recv interrupt.
* The information is saved here as soon as the interrupt occurs (thus
* allowing the underlying CE to re-use the ring descriptor). The
* information here is eventually processed by a completion processing
* thread.
*/
#ifndef __HIF_MAIN_H__
#define __HIF_MAIN_H__
#include <cdf_atomic.h> /* cdf_atomic_read */
#include "cdf_lock.h"
#include "cepci.h"
#include "hif.h"
#define HIF_MIN_SLEEP_INACTIVITY_TIME_MS 50
#define HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS 60
/*
* This macro implementation is exposed for efficiency only.
* The implementation may change and callers should
* consider the targid to be a completely opaque handle.
*/
#define TARGID_TO_PCI_ADDR(targid) (*((A_target_id_t *)(targid)))
A_target_id_t hif_get_target_id(struct ol_softc *scn);
bool hif_target_forced_awake(struct ol_softc *scn);
#ifdef QCA_WIFI_3_0
#define DISABLE_L1SS_STATES 1
#endif
#ifdef CONFIG_SLUB_DEBUG_ON
#define MAX_NUM_OF_RECEIVES 100 /* Maximum number of Rx buf to process before*
* break out in SLUB debug builds */
#elif defined(FEATURE_NAPI)
#define MAX_NUM_OF_RECEIVES HIF_NAPI_MAX_RECEIVES
#else /* no SLUBS, no NAPI */
/* Maximum number of Rx buf to process before break out */
#define MAX_NUM_OF_RECEIVES 1000
#endif /* SLUB_DEBUG_ON / FEATURE_NAPI */
#ifdef QCA_WIFI_3_0_ADRASTEA
#define ADRASTEA_BU 1
#else
#define ADRASTEA_BU 0
#endif
#ifdef QCA_WIFI_3_0
#define HAS_FW_INDICATOR 0
#else
#define HAS_FW_INDICATOR 1
#endif
#define AR9888_DEVICE_ID (0x003c)
#define AR6320_DEVICE_ID (0x003e)
#define AR6320_FW_1_1 (0x11)
#define AR6320_FW_1_3 (0x13)
#define AR6320_FW_2_0 (0x20)
#define AR6320_FW_3_0 (0x30)
#define AR6320_FW_3_2 (0x32)
#define ADRASTEA_DEVICE_ID (0xabcd)
#define ADRASTEA_DEVICE_ID_P2_E12 (0x7021)
#if (defined(QVIT))
#define QCA6180_DEVICE_ID (0xabcd)
#else
#define QCA6180_DEVICE_ID (0x041)
#endif
A_target_id_t hif_get_target_id(struct ol_softc *scn);
void hif_dump_pipe_debug_count(struct ol_softc *scn);
bool hif_max_num_receives_reached(unsigned int count);
int hif_config_ce(hif_handle_t hif_hdl);
int athdiag_procfs_init(void *scn);
void athdiag_procfs_remove(void);
/* routine to modify the initial buffer count to be allocated on an os
* platform basis. Platform owner will need to modify this as needed
*/
cdf_size_t init_buffer_count(cdf_size_t maxSize);
irqreturn_t hif_fw_interrupt_handler(int irq, void *arg);
int hif_get_target_type(struct ol_softc *ol_sc, struct device *dev,
void *bdev, const hif_bus_id *bid, uint32_t *hif_type,
uint32_t *target_type);
int hif_get_device_type(uint32_t device_id,
uint32_t revision_id,
uint32_t *hif_type, uint32_t *target_type);
/*These functions are exposed to HDD*/
int hif_init_cdf_ctx(void *ol_sc);
void hif_deinit_cdf_ctx(void *ol_sc);
bool hif_targ_is_awake(struct ol_softc *scn, void *__iomem *mem);
void hif_nointrs(struct ol_softc *scn);
void hif_bus_close(struct ol_softc *ol_sc);
CDF_STATUS hif_bus_open(struct ol_softc *ol_sc,
enum ath_hal_bus_type bus_type);
CDF_STATUS hif_enable_bus(struct ol_softc *ol_sc, struct device *dev,
void *bdev, const hif_bus_id *bid, enum hif_enable_type type);
void hif_disable_bus(void *bdev);
void hif_bus_prevent_linkdown(struct ol_softc *scn, bool flag);
#endif /* __HIF_MAIN_H__ */

View File

@ -1,464 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: hif_napi.c
*
* HIF NAPI interface implementation
*/
#include <string.h> /* memset */
#include <cds_api.h>
#include <hif_napi.h>
#include <hif_debug.h>
#include <hif_io32.h>
#include <ce_api.h>
#include <ce_internal.h>
enum napi_decision_vector {
HIF_NAPI_NOEVENT = 0,
HIF_NAPI_INITED = 1,
HIF_NAPI_CONF_UP = 2
};
#define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP)
/**
* hif_napi_create() - creates the NAPI structures for a given CE
* @hif : pointer to hif context
* @pipe_id: the CE id on which the instance will be created
* @poll : poll function to be used for this NAPI instance
* @budget : budget to be registered with the NAPI instance
* @scale : scale factor on the weight (to scaler budget to 1000)
*
* Description:
* Creates NAPI instances. This function is called
* unconditionally during initialization. It creates
* napi structures through the proper HTC/HIF calls.
* The structures are disabled on creation.
* Note that for each NAPI instance a separate dummy netdev is used
*
* Return:
* < 0: error
* = 0: <should never happen>
* > 0: id of the created object (for multi-NAPI, number of objects created)
*/
int hif_napi_create(struct ol_softc *hif,
uint8_t pipe_id,
int (*poll)(struct napi_struct *, int),
int budget,
int scale)
{
struct qca_napi_data *napid;
struct qca_napi_info *napii;
NAPI_DEBUG("-->(pipe=%d, budget=%d, scale=%d)\n",
pipe_id, budget, scale);
NAPI_DEBUG("hif->napi_data.state = 0x%08x\n",
hif->napi_data.state);
NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x\n",
hif->napi_data.ce_map);
napid = &(hif->napi_data);
if (0 == (napid->state & HIF_NAPI_INITED)) {
memset(napid, 0, sizeof(struct qca_napi_data));
mutex_init(&(napid->mutex));
init_dummy_netdev(&(napid->netdev));
napid->state |= HIF_NAPI_INITED;
HIF_INFO("%s: NAPI structures initialized\n", __func__);
NAPI_DEBUG("NAPI structures initialized\n");
}
napii = &(napid->napis[pipe_id]);
memset(napii, 0, sizeof(struct qca_napi_info));
napii->scale = scale;
napii->id = NAPI_PIPE2ID(pipe_id);
NAPI_DEBUG("adding napi=%p to netdev=%p (poll=%p, bdgt=%d)\n",
&(napii->napi), &(napid->netdev), poll, budget);
netif_napi_add(&(napid->netdev), &(napii->napi), poll, budget);
NAPI_DEBUG("after napi_add\n");
NAPI_DEBUG("napi=0x%p, netdev=0x%p\n",
&(napii->napi), &(napid->netdev));
NAPI_DEBUG("napi.dev_list.prev=0x%p, next=0x%p\n",
napii->napi.dev_list.prev, napii->napi.dev_list.next);
NAPI_DEBUG("dev.napi_list.prev=0x%p, next=0x%p\n",
napid->netdev.napi_list.prev, napid->netdev.napi_list.next);
/* It is OK to change the state variable below without protection
* as there should be no-one around yet
*/
napid->ce_map |= (0x01 << pipe_id);
HIF_INFO("%s: NAPI id %d created for pipe %d\n", __func__,
napii->id, pipe_id);
NAPI_DEBUG("NAPI id %d created for pipe %d\n", napii->id, pipe_id);
NAPI_DEBUG("<--napi_id=%d]\n", napii->id);
return napii->id;
}
/**
*
* hif_napi_destroy() - destroys the NAPI structures for a given instance
* @hif : pointer to hif context
* @ce_id : the CE id whose napi instance will be destroyed
* @force : if set, will destroy even if entry is active (de-activates)
*
* Description:
* Destroy a given NAPI instance. This function is called
* unconditionally during cleanup.
* Refuses to destroy an entry of it is still enabled (unless force=1)
* Marks the whole napi_data invalid if all instances are destroyed.
*
* Return:
* -EINVAL: specific entry has not been created
* -EPERM : specific entry is still active
* 0 < : error
* 0 = : success
*/
int hif_napi_destroy(struct ol_softc *hif,
uint8_t id,
int force)
{
uint8_t ce = NAPI_ID2PIPE(id);
int rc = 0;
NAPI_DEBUG("-->(id=%d, force=%d)\n", id, force);
if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) {
HIF_ERROR("%s: NAPI not initialized or entry %d not created\n",
__func__, id);
rc = -EINVAL;
} else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) {
HIF_ERROR("%s: NAPI instance %d (pipe %d) not created\n",
__func__, id, ce);
rc = -EINVAL;
} else {
struct qca_napi_data *napid;
struct qca_napi_info *napii;
napid = &(hif->napi_data);
napii = &(napid->napis[ce]);
if (hif->napi_data.state == HIF_NAPI_CONF_UP) {
if (force) {
napi_disable(&(napii->napi));
HIF_INFO("%s: NAPI entry %d force disabled\n",
__func__, id);
NAPI_DEBUG("NAPI %d force disabled\n", id);
} else {
HIF_ERROR("%s: Cannot destroy active NAPI %d\n",
__func__, id);
rc = -EPERM;
}
}
if (0 == rc) {
NAPI_DEBUG("before napi_del\n");
NAPI_DEBUG("napi.dlist.prv=0x%p, next=0x%p\n",
napii->napi.dev_list.prev,
napii->napi.dev_list.next);
NAPI_DEBUG("dev.napi_l.prv=0x%p, next=0x%p\n",
napid->netdev.napi_list.prev,
napid->netdev.napi_list.next);
netif_napi_del(&(napii->napi));
napid->ce_map &= ~(0x01 << ce);
napii->scale = 0;
HIF_INFO("%s: NAPI %d destroyed\n", __func__, id);
/* if there are no active instances and
* if they are all destroyed,
* set the whole structure to uninitialized state
*/
if (napid->ce_map == 0) {
/* hif->napi_data.state = 0; */
memset(napid,
0, sizeof(struct qca_napi_data));
HIF_INFO("%s: no NAPI instances. Zapped.\n",
__func__);
}
}
}
return rc;
}
/**
*
* hif_napi_get_all() - returns the address of the whole HIF NAPI structure
* @hif: pointer to hif context
*
* Description:
* Returns the address of the whole structure
*
* Return:
* <addr>: address of the whole HIF NAPI structure
*/
inline struct qca_napi_data *hif_napi_get_all(struct ol_softc *hif)
{
return &(hif->napi_data);
}
/**
*
* hif_napi_event() - Decision-maker to enable/disable NAPI.
* @hif : pointer to hif context
* @evnt: event that has been detected
* @data: more data regarding the event
*
* Description:
* This function decides whether or not NAPI should be enabled.
* NAPI will be enabled, if all the following is satisfied.
* 1- has been enabled administratively:
* the .ini file has the enabled setting and it has not been disabled
* by an vendor command override later
*
* Return:
* < 0: some error
* = 0: NAPI is now disabled
* = 1: NAPI is now enabled
*/
int hif_napi_event(struct ol_softc *hif, enum qca_napi_event event, void *data)
{
int rc;
uint32_t prev_state;
int i;
struct napi_struct *napi;
NAPI_DEBUG("-->(event=%d, aux=%p)\n", event, data);
mutex_lock(&(hif->napi_data.mutex));
prev_state = hif->napi_data.state;
switch (event) {
case NAPI_EVT_INI_FILE:
case NAPI_EVT_CMD_STATE: {
int on = (data != ((void *)0));
HIF_INFO("%s: received evnt: CONF %s; v = %d (state=0x%0x)\n",
__func__,
(event == NAPI_EVT_INI_FILE)?".ini file":"cmd",
on, prev_state);
if (on)
if (prev_state & HIF_NAPI_CONF_UP) {
HIF_INFO("%s: duplicate NAPI conf ON msg\n",
__func__);
} else {
HIF_INFO("%s: setting configuration to ON\n",
__func__);
hif->napi_data.state |= HIF_NAPI_CONF_UP;
}
else /* off request */
if (prev_state & HIF_NAPI_CONF_UP) {
HIF_INFO("%s: setting configuration to OFF\n",
__func__);
hif->napi_data.state &= ~HIF_NAPI_CONF_UP;
} else {
HIF_INFO("%s: duplicate NAPI conf OFF msg\n",
__func__);
}
break;
}
/* case NAPI_INIT_FILE/CMD_STATE */
default: {
HIF_ERROR("%s: unknown event: %d (data=0x%0lx)\n",
__func__, event, (unsigned long) data);
break;
} /* default */
}; /* switch */
mutex_unlock(&(hif->napi_data.mutex));
if (prev_state != hif->napi_data.state) {
if (hif->napi_data.state == ENABLE_NAPI_MASK) {
rc = 1;
for (i = 0; i < CE_COUNT_MAX; i++)
if ((hif->napi_data.ce_map & (0x01 << i))) {
napi = &(hif->napi_data.napis[i].napi);
NAPI_DEBUG("enabling NAPI %d\n", i);
napi_enable(napi);
}
} else {
rc = 0;
for (i = 0; i < CE_COUNT_MAX; i++)
if (hif->napi_data.ce_map & (0x01 << i)) {
napi = &(hif->napi_data.napis[i].napi);
NAPI_DEBUG("disabling NAPI %d\n", i);
napi_disable(napi);
}
}
} else {
HIF_INFO("%s: no change in hif napi state (still %d)\n",
__func__, prev_state);
rc = (hif->napi_data.state == ENABLE_NAPI_MASK);
}
NAPI_DEBUG("<--[rc=%d]\n", rc);
return rc;
}
/**
* hif_napi_enabled() - checks whether NAPI is enabled for given ce or not
* @hif: hif context
* @ce : CE instance (or -1, to check if any CEs are enabled)
*
* Return: bool
*/
int hif_napi_enabled(struct ol_softc *hif, int ce)
{
int rc;
if (-1 == ce)
rc = ((hif->napi_data.state == ENABLE_NAPI_MASK));
else
rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) &&
(hif->napi_data.ce_map & (0x01 << ce)));
return rc;
};
/**
* hif_napi_enable_irq() - enables bus interrupts after napi_complete
*
* @hif: hif context
* @id : id of NAPI instance calling this (used to determine the CE)
*
* Return: void
*/
inline void hif_napi_enable_irq(struct ol_softc *hif, int id)
{
ce_irq_enable(hif, NAPI_ID2PIPE(id));
}
/**
* hif_napi_schedule() - schedules napi, updates stats
* @scn: hif context
* @ce_id: index of napi instance
*
* Return: void
*/
int hif_napi_schedule(struct ol_softc *scn, int ce_id)
{
int cpu = smp_processor_id();
scn->napi_data.napis[ce_id].stats[cpu].napi_schedules++;
NAPI_DEBUG("scheduling napi %d (ce:%d)\n",
scn->napi_data.napis[ce_id].id, ce_id);
napi_schedule(&(scn->napi_data.napis[ce_id].napi));
return true;
}
/**
* hif_napi_poll() - NAPI poll routine
* @napi : pointer to NAPI struct as kernel holds it
* @budget:
*
* This is the body of the poll function.
* The poll function is called by kernel. So, there is a wrapper
* function in HDD, which in turn calls this function.
* Two main reasons why the whole thing is not implemented in HDD:
* a) references to things like ce_service that HDD is not aware of
* b) proximity to the implementation of ce_tasklet, which the body
* of this function should be very close to.
*
* NOTE TO THE MAINTAINER:
* Consider this function and ce_tasklet very tightly coupled pairs.
* Any changes to ce_tasklet or this function may likely need to be
* reflected in the counterpart.
*
* Returns:
* int: the amount of work done in this poll ( <= budget)
*/
int hif_napi_poll(struct napi_struct *napi, int budget)
{
int rc = 0; /* default: no work done, also takes care of error */
int normalized, bucket;
int cpu = smp_processor_id();
struct ol_softc *hif;
struct qca_napi_info *napi_info;
struct CE_state *ce_state;
NAPI_DEBUG("%s -->(.., budget=%d)\n", budget);
napi_info = (struct qca_napi_info *)
container_of(napi, struct qca_napi_info, napi);
napi_info->stats[cpu].napi_polls++;
hif = (struct ol_softc *)cds_get_context(CDF_MODULE_ID_HIF);
if (unlikely(NULL == hif))
CDF_ASSERT(hif != NULL); /* emit a warning if hif NULL */
else {
rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id));
HIF_INFO_HI("%s: ce_per_engine_service processed %d msgs",
__func__, rc);
}
napi_info->stats[cpu].napi_workdone += rc;
normalized = (rc / napi_info->scale);
if (NULL != hif) {
ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)];
if (ce_state->lro_flush_cb != NULL) {
ce_state->lro_flush_cb(ce_state->lro_data);
}
}
/* do not return 0, if there was some work done,
* even if it is below the scale
*/
if (rc)
normalized++;
bucket = (normalized / QCA_NAPI_DEF_SCALE);
napi_info->stats[cpu].napi_budget_uses[bucket]++;
/* if ce_per engine reports 0, then poll should be terminated */
if (0 == rc)
NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI\n",
__func__, __LINE__);
if (rc <= HIF_NAPI_MAX_RECEIVES) {
napi_info->stats[cpu].napi_completes++;
/* enable interrupts */
napi_complete(napi);
if (NULL != hif) {
hif_napi_enable_irq(hif, napi_info->id);
/* support suspend/resume */
cdf_atomic_dec(&(hif->active_tasklet_cnt));
}
NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts\n",
__func__, __LINE__);
}
NAPI_DEBUG("%s <--[normalized=%d]\n", _func__, normalized);
return normalized;
}

View File

@ -1,369 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifdef HIF_PCI
#include "icnss_stub.h"
#include "hif_io32.h"
#include <hif.h>
#include "regtable.h"
#include "hif_debug.h"
#include "cds_api.h"
#include "cdf_status.h"
#include "qwlan_version.h"
#include <net/cnss.h>
static int icnss_get_irq_num(int ce_id);
/**
* struct icnss_stub_entry
*
* @irq_handler: irq_handler
* @data: data
* @name: name
* @ce_id: ce_id
*/
struct icnss_stub_entry {
irqreturn_t (*irq_handler)(int, void *);
void *data;
const char *name;
int ce_id;
};
/**
* struct icnss_stub_context
*
* @stub: icnss_stub_entry
* @regged_irq: regged_irq
*/
struct icnss_stub_context {
struct icnss_stub_entry stub[ICNSS_MAX_IRQ_REGISTRATIONS];
uint32_t regged_irq;
};
static struct icnss_stub_context cnss_stub;
#ifndef QCA_WIFI_3_0_ADRASTEA
/**
* icnss_wlan_enable() - icnss_wlan_enable
* @config: ce configuration information
* @mode: driver_mode
* @host_version: version string to send to the fw
*
* Return: int
*/
int icnss_wlan_enable(struct icnss_wlan_enable_cfg *config,
enum icnss_driver_mode mode, const char *host_version)
{
return 0;
}
/**
* icnss_wlan_disable() - icnss_wlan_disable
* @mode: driver_mode
*
* Return: int
*/
int icnss_wlan_disable(enum icnss_driver_mode mode)
{
return 0;
}
/**
* icnss_set_fw_debug_mode() - icnss_set_fw_debug_mode
* @mode: fw debug mode, 0 for QXDM, 1 for WMI
*
* Return: int
*/
int icnss_set_fw_debug_mode(bool mode)
{
return 0;
}
#else
/**
* icnss_wlan_enable(): call the platform driver to enable wlan
* @config: ce configuration information
* @mode: driver_mode
* @host_version: version string to send to the fw
*
* This function passes the con_mode and CE configuration to
* platform driver to enable wlan.
* cnss_wlan_enable has been hacked to do a qmi handshake with fw.
* this is not needed for rome.
*
* Return: 0 on success, error number otherwise.
*/
int icnss_wlan_enable(struct icnss_wlan_enable_cfg *config,
enum icnss_driver_mode mode, const char *host_version)
{
struct cnss_wlan_enable_cfg cfg;
enum cnss_driver_mode cnss_mode;
cfg.num_ce_tgt_cfg = config->num_ce_tgt_cfg;
cfg.ce_tgt_cfg = (struct cnss_ce_tgt_pipe_cfg *)
config->ce_tgt_cfg;
cfg.num_ce_svc_pipe_cfg = config->num_ce_svc_pipe_cfg;
cfg.ce_svc_cfg = (struct cnss_ce_svc_pipe_cfg *)
config->ce_svc_cfg;
cfg.num_shadow_reg_cfg = config->num_shadow_reg_cfg;
cfg.shadow_reg_cfg = (struct cnss_shadow_reg_cfg *)
config->shadow_reg_cfg;
switch (mode) {
case ICNSS_FTM:
cnss_mode = CNSS_FTM;
break;
case ICNSS_EPPING:
cnss_mode = CNSS_EPPING;
break;
default:
cnss_mode = CNSS_MISSION;
break;
}
return cnss_wlan_enable(&cfg, cnss_mode, host_version);
}
/**
* icnss_wlan_disable(): call the platform driver to disable wlan
*
* This function passes the con_mode to platform driver to disable wlan.
* cnss_wlan_disable has been hacked to do a qmi handshake with fw.
* this is not needed for rome.
*
* Return: void
*/
int icnss_wlan_disable(enum icnss_driver_mode con_mode)
{
enum cnss_driver_mode mode;
switch (con_mode) {
case ICNSS_FTM:
mode = CNSS_FTM;
break;
case ICNSS_EPPING:
mode = CNSS_EPPING;
break;
default:
mode = CNSS_MISSION;
break;
}
cnss_wlan_disable(mode);
return 0;
}
/**
* icnss_set_fw_debug_mode() - call the platform driver to set fw
* debug mode
* @mode: fw debug mode, 0 for QXDM, 1 for WMI
*
* This function passes the fw debug mode to platform driver.
* cnss_set_fw_debug_mode has been hacked to do a qmi handshake with fw.
* This is not needed for rome.
*
* Return: int
*/
int icnss_set_fw_debug_mode(bool mode)
{
return cnss_set_fw_debug_mode(mode);
}
#endif
/**
* icnss_ce_request_irq() - register an irq handler
* @ce_id: ce_id
* @handler: handler
* @flags: flags to pass to the kernel api
* @name: name
* @context: context to pass to the irq handler
*
* Return: integer status
*/
int icnss_ce_request_irq(int ce_id,
irqreturn_t (*handler)(int, void *),
unsigned long flags, const char *name,
void *context)
{
if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
HIF_ERROR("%s: invalid ce_id = %d", __func__, ce_id);
return -EINVAL;
}
cnss_stub.stub[ce_id].irq_handler = handler;
cnss_stub.stub[ce_id].ce_id = ce_id;
cnss_stub.stub[ce_id].data = context;
cnss_stub.stub[ce_id].name = name;
cnss_stub.regged_irq |= (1 << ce_id);
return 0;
}
/**
* icnss_ce_free_irq() - icnss_unregister_irq
* @ce_id: the ce_id that the irq belongs to
* @context: context with witch the irq was requested.
* Return: integer status
*/
int icnss_ce_free_irq(int ce_id, void *context)
{
if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
HIF_ERROR("%s: invalid ce_id = %d", __func__, ce_id);
return -EINVAL;
}
if (cnss_stub.stub[ce_id].data != context) {
HIF_ERROR("%s: context match failure for ce_id %d",
__func__, ce_id);
return -EINVAL;
}
if (cnss_stub.regged_irq & (1 << ce_id)) {
cnss_stub.stub[ce_id].irq_handler = NULL;
cnss_stub.stub[ce_id].ce_id = 0;
cnss_stub.stub[ce_id].data = 0;
cnss_stub.stub[ce_id].name = NULL;
cnss_stub.regged_irq &= ~(1 << ce_id);
}
return 0;
}
/**
* icnss_dispatch_one_ce_irq() - icnss_dispatch_one_ce_irq
* @ce_id: ce_id
*
* Return: irqreturn_t
*/
static irqreturn_t icnss_dispatch_one_ce_irq(int ce_id)
{
irqreturn_t ret = IRQ_NONE;
if (cnss_stub.stub[ce_id].irq_handler)
ret = cnss_stub.stub[ce_id].irq_handler(
icnss_get_irq_num(ce_id),
(void *)cnss_stub.stub[ce_id].data);
else
HIF_ERROR(
"%sd: error - ce_id = %d, no IRQ handler",
__func__, ce_id);
return ret;
}
/**
* icnss_dispatch_ce_irq() - icnss_dispatch_ce_irq
* @scn: scn
*
* Return: N/A
*/
void icnss_dispatch_ce_irq(struct ol_softc *scn)
{
uint32_t intr_summary;
int id;
irqreturn_t ret;
if (scn->hif_init_done != true)
return;
A_TARGET_ACCESS_BEGIN(scn);
intr_summary = CE_INTERRUPT_SUMMARY(scn);
if (intr_summary == 0) {
if ((scn->target_status != OL_TRGET_STATUS_RESET) &&
(!cdf_atomic_read(&scn->link_suspended))) {
hif_write32_mb(scn->mem +
(SOC_CORE_BASE_ADDRESS |
PCIE_INTR_ENABLE_ADDRESS),
HOST_GROUP0_MASK);
hif_read32_mb(scn->mem +
(SOC_CORE_BASE_ADDRESS |
PCIE_INTR_ENABLE_ADDRESS));
}
A_TARGET_ACCESS_END(scn);
return;
} else {
A_TARGET_ACCESS_END(scn);
}
scn->ce_irq_summary = intr_summary;
for (id = 0; intr_summary && (id < scn->ce_count); id++) {
if (intr_summary & (1 << id)) {
intr_summary &= ~(1 << id);
ret = icnss_dispatch_one_ce_irq(id);
}
}
}
/**
* icnss_get_soc_info() - get soc info
*
* This function query the soc information from the platform
* driver
*
* @info: struct icnss_soc_info
*
* Return: 0 for success
*/
int icnss_get_soc_info(struct icnss_soc_info *info)
{
struct ol_softc *scn = cds_get_context(CDF_MODULE_ID_HIF);
if (!scn) {
HIF_ERROR("%s: SCN = NULL", __func__);
return -EINVAL;
}
info->v_addr = scn->mem;
info->p_addr = scn->mem_pa;
info->version = 0;
return 0;
}
/* icnss_get_irq_num() - generate a number to represent an irq number
*/
static int icnss_get_irq_num(int ce_id)
{
if (ce_id < CE_COUNT_MAX && ce_id >= 0)
return ce_id + 100;
pr_err("icnss: No irq registered for CE id %d\n", ce_id);
return -EINVAL;
}
int icnss_get_ce_id(int irq)
{
int ce_id = irq - 100;
if (ce_id < CE_COUNT_MAX && ce_id >= 0)
return ce_id;
pr_err("icnss: No matching CE id for irq %d\n", irq);
return -EINVAL;
}
#endif /* HIF_PCI */

View File

@ -1,135 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifdef HIF_PCI
#ifndef _ICNSS_WLAN_H_
#define _ICNSS_WLAN_H_
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/irqreturn.h>
#define ICNSS_MAX_IRQ_REGISTRATIONS 12
/**
* struct ce_tgt_pipe_cfg
*
* @pipenum: pipe_num
* @pipedir: pipe_dir
* @nentries: nentries
* @nbytes_max: nbytes_max
* @flags: flags
* @reserved: reserved
*/
struct ce_tgt_pipe_cfg {
uint32_t pipe_num;
uint32_t pipe_dir;
uint32_t nentries;
uint32_t nbytes_max;
uint32_t flags;
uint32_t reserved;
};
/**
* struct ce_svc_pipe_cfg
*
* @service_id: service_id
* @pipedir: pipedir
* @pipenum: pipenum
*/
struct ce_svc_pipe_cfg {
uint32_t service_id;
uint32_t pipedir;
uint32_t pipenum;
};
/**
* struct icnss_shadow_reg_cfg
*
* @ce_id: Copy engine id
* @reg_offset: Register offset
*/
struct icnss_shadow_reg_cfg {
u16 ce_id;
u16 reg_offset;
};
/**
* struct icnss_wlan_enable_cfg
*
* @num_ce_tgt_cfg: num_ce_tgt_cfg
* @ce_tgt_cfg: ce_tgt_cfg
* @num_ce_svc_pipe_cfg: num_ce_svc_pipe_cfg
* @ce_svc_cfg: ce_svc_cfg
*/
struct icnss_wlan_enable_cfg {
uint32_t num_ce_tgt_cfg;
struct ce_tgt_pipe_cfg *ce_tgt_cfg;
uint32_t num_ce_svc_pipe_cfg;
struct ce_svc_pipe_cfg *ce_svc_cfg;
u32 num_shadow_reg_cfg;
struct icnss_shadow_reg_cfg *shadow_reg_cfg;
};
/**
* enum driver_mode
*
* @driver_mode: driver_mode
*/
enum icnss_driver_mode {
ICNSS_MISSION,
ICNSS_FTM,
ICNSS_EPPING,
};
/**
* struct icnss_soc_info
*
* @v_addr: virtual address
* @p_addr: physical address
* @ver: version
*/
struct icnss_soc_info {
void __iomem *v_addr;
phys_addr_t p_addr;
uint32_t version;
};
int icnss_wlan_enable(struct icnss_wlan_enable_cfg *config,
enum icnss_driver_mode mode, const char *host_version);
int icnss_wlan_disable(enum icnss_driver_mode mode);
int icnss_set_fw_debug_mode(bool mode);
int icnss_ce_request_irq(int ce_id,
irqreturn_t (*handler)(int ce_id, void *arg),
unsigned long flags, const char *name, void *context);
int icnss_ce_free_irq(int irq, void *context);
void icnss_enable_irq(unsigned int ce_id);
void icnss_disable_irq(unsigned int ce_id);
int icnss_get_soc_info(struct icnss_soc_info *info);
int icnss_get_ce_id(int irq);
#endif /* _ICNSS_WLAN_H_ */
#endif /* HIF_PCI */

View File

@ -1,327 +0,0 @@
/*
* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include "hif_io32.h"
#include "hif_debug.h"
/*chaninfo*/
#define CHANINFOMEM_S2_READ_MASK 0x00000008
#define CHANINFO_CTRL_CAPTURE_CHAN_INFO_MASK 0x00000001
#define CHANINFO_CTRL_CHANINFOMEM_BW_MASK 0x00000030
#define MULTICHAIN_ENABLE_RX_CHAIN_MASK_MASK 0x00000007
/*agc*/
#define GAINS_MIN_OFFSETS_CF_AGC_HIST_ENABLE_MASK 0x00040000
#define GAINS_MIN_OFFSETS_CF_AGC_HIST_GC_MASK 0x00080000
#define GAINS_MIN_OFFSETS_CF_AGC_HIST_VOTING_MASK 0x00100000
#define GAINS_MIN_OFFSETS_CF_AGC_HIST_PHY_ERR_MASK 0x00200000
#define AGC_HISTORY_DUMP_MASK (\
GAINS_MIN_OFFSETS_CF_AGC_HIST_ENABLE_MASK| \
GAINS_MIN_OFFSETS_CF_AGC_HIST_GC_MASK| \
GAINS_MIN_OFFSETS_CF_AGC_HIST_VOTING_MASK| \
GAINS_MIN_OFFSETS_CF_AGC_HIST_PHY_ERR_MASK \
)
#define BB_chaninfo_ctrl 0x1a370
#define BB_multichain_enable 0x1a2a0
#define BB_chn_tables_intf_addr 0x19894
#define BB_chn1_tables_intf_addr 0x1a894
#define BB_chn_tables_intf_data 0x19898
#define BB_chn1_tables_intf_data 0x1a898
#define BB_gains_min_offsets 0x19e08
#define BB_chaninfo_tab_b0 0x03200
#define BB_chaninfo_tab_b1 0x03300
#define BB_watchdog_status 0x1a7c0
#define BB_watchdog_ctrl_1 0x1a7c4
#define BB_watchdog_ctrl_2 0x1a7c8
#define BB_watchdog_status_B 0x1a7e0
#define PHY_BB_CHN_TABLES_INTF_ADDR 0x19894
#define PHY_BB_CHN_TABLES_INTF_DATA 0x19898
#define PHY_BB_CHN1_TABLES_INTF_ADDR 0x1a894
#define PHY_BB_CHN1_TABLES_INTF_DATA 0x1a898
struct priv_ctrl_ctx {
uint32_t chaninfo_ctrl_orig;
uint32_t gain_min_offsets_orig;
uint32_t anyreg_start;
uint32_t anyreg_len;
};
static struct priv_ctrl_ctx g_priv_dump_ctx;
static INLINE void set_target_reg_bits(void __iomem *mem, uint32_t reg,
uint32_t bitmask, uint32_t val)
{
uint32_t value = hif_read32_mb(mem + (reg));
uint32_t shift = 0;
value &= ~(bitmask);
while (!((bitmask >> shift) & 0x01))
shift++;
value |= (((val) << shift) & (bitmask));
hif_write32_mb(mem + (reg), value);
}
static INLINE uint32_t get_target_reg_bits(void __iomem *mem,
uint32_t reg, uint32_t bitmask)
{
uint32_t value = hif_read32_mb(mem + (reg));
uint32_t shift = 0;
while (!((bitmask >> shift) & 0x01))
shift++;
return (value >> shift) & bitmask;
}
void priv_start_cap_chaninfo(struct ol_softc *scn)
{
set_target_reg_bits(scn->mem, BB_chaninfo_ctrl,
CHANINFO_CTRL_CAPTURE_CHAN_INFO_MASK, 1);
}
void priv_start_agc(struct ol_softc *scn)
{
g_priv_dump_ctx.gain_min_offsets_orig =
hif_read32_mb(scn->mem + BB_gains_min_offsets);
set_target_reg_bits(scn->mem, BB_gains_min_offsets,
AGC_HISTORY_DUMP_MASK,
0x0f);
}
void priv_stop_agc(struct ol_softc *scn)
{
set_target_reg_bits(scn->mem, BB_gains_min_offsets,
AGC_HISTORY_DUMP_MASK,
0);
}
void priv_dump_chaninfo(struct ol_softc *scn)
{
uint32_t bw, val;
uint32_t len, i, tmp;
uint32_t chain_mask;
uint32_t chain0, chain1;
chain_mask =
get_target_reg_bits(scn->mem, BB_multichain_enable,
MULTICHAIN_ENABLE_RX_CHAIN_MASK_MASK);
chain0 = chain_mask & 1;
chain1 = chain_mask & 2;
HIF_TRACE("%s: E", __func__);
bw = get_target_reg_bits(scn->mem, BB_chaninfo_ctrl,
CHANINFO_CTRL_CHANINFOMEM_BW_MASK);
if (bw == 0)
len = 53;
else if (bw == 1)
len = 57;
else if (bw == 2)
len = 59 * 2 - 1;
else
len = 60 * 2 + 61 * 2;
/*
* each tone is 16 bit valid, write to 32bit buffer each.
* bw==0(legacy20): 53 tones.
* bw==1(ht/vht20): 57 tones.
* bw==2(ht/vht40): 59+58 tones.
* bw==3(vht80): 60*2+61*2 tones.
*/
if (chain0) {
hif_write32_mb(scn->mem + BB_chn_tables_intf_addr,
0x80003200);
}
if (chain1) {
hif_write32_mb(scn->mem + BB_chn1_tables_intf_addr,
0x80003200);
}
set_target_reg_bits(scn->mem, BB_chaninfo_ctrl,
CHANINFOMEM_S2_READ_MASK, 0);
if (chain0) {
if (bw < 2) {
len = (bw == 0) ? 53 : 57;
for (i = 0; i < len; i++) {
val =
hif_read32_mb(scn->mem +
BB_chn_tables_intf_data) &
0x0000ffff;
cdf_print("0x%x\t", val);
if (i % 4 == 0)
cdf_print("\n");
}
} else {
len = (bw == 2) ? 59 : 60;
for (i = 0; i < len; i++) {
tmp =
hif_read32_mb(scn->mem +
BB_chn_tables_intf_data);
cdf_print("0x%x\t", ((tmp >> 16) & 0x0000ffff));
cdf_print("0x%x\t", (tmp & 0x0000ffff));
if (i % 2 == 0)
cdf_print("\n");
}
if (bw > 2) {
/* bw == 3 for vht80 */
hif_write32_mb(scn->mem +
BB_chn_tables_intf_addr,
0x80003300);
len = 61;
for (i = 0; i < len; i++) {
tmp =
hif_read32_mb(scn->mem +
BB_chn_tables_intf_data);
cdf_print("0x%x\t",
((tmp >> 16) & 0x0000ffff));
cdf_print("0x%x\t", (tmp & 0x0000ffff));
if (i % 2 == 0)
cdf_print("\n");
}
}
}
}
if (chain1) {
if (bw < 2) {
len = (bw == 0) ? 53 : 57;
for (i = 0; i < len; i++) {
val =
hif_read32_mb(scn->mem +
BB_chn1_tables_intf_data) &
0x0000ffff;
cdf_print("0x%x\t", val);
if (i % 4 == 0)
cdf_print("\n");
}
} else {
len = (bw == 2) ? 59 : 60;
for (i = 0; i < len; i++) {
tmp =
hif_read32_mb(scn->mem +
BB_chn1_tables_intf_data);
cdf_print("0x%x\n", (tmp >> 16) & 0x0000ffff);
cdf_print("0x%x\n", tmp & 0x0000ffff);
if (i % 2 == 0)
cdf_print("\n");
}
if (bw > 2) {
/* bw == 3 for vht80 */
hif_write32_mb(scn->mem +
BB_chn1_tables_intf_addr,
0x80003300);
len = 61;
for (i = 0; i < len; i++) {
tmp =
hif_read32_mb(scn->mem +
BB_chn1_tables_intf_data);
cdf_print("0x%x\t",
((tmp >> 16) & 0x0000ffff));
cdf_print("0x%x\t", (tmp & 0x0000ffff));
if (i % 2 == 0)
cdf_print("\n");
}
}
}
}
HIF_TRACE("%s: X", __func__);
}
void priv_dump_agc(struct ol_softc *scn)
{
int i, len = 30; /* check this value for Rome and Peregrine */
uint32_t chain0, chain1, chain_mask, val;
A_TARGET_ACCESS_BEGIN(scn);
chain_mask =
get_target_reg_bits(scn->mem, BB_multichain_enable,
MULTICHAIN_ENABLE_RX_CHAIN_MASK_MASK);
chain0 = chain_mask & 1;
chain1 = chain_mask & 2;
len = len << 1; /* each agc item is 64bit, total*2 */
priv_stop_agc(scn);
set_target_reg_bits(scn->mem, BB_chaninfo_ctrl,
CHANINFOMEM_S2_READ_MASK, 0);
HIF_TRACE("%s: AGC history buffer dump: E", __func__);
if (chain0) {
for (i = 0; i < len; i++) {
hif_write32_mb(scn->mem +
PHY_BB_CHN_TABLES_INTF_ADDR,
BB_chaninfo_tab_b0 + i * 4);
val = hif_read32_mb(scn->mem +
PHY_BB_CHN_TABLES_INTF_DATA);
cdf_print("0x%x\t", val);
if (i % 4 == 0)
cdf_print("\n");
}
}
if (chain1) {
for (i = 0; i < len; i++) {
hif_write32_mb(scn->mem +
PHY_BB_CHN1_TABLES_INTF_ADDR,
BB_chaninfo_tab_b0 + i * 4);
val = hif_read32_mb(scn->mem +
PHY_BB_CHN1_TABLES_INTF_DATA);
cdf_print("0x%x\t", val);
if (i % 4 == 0)
cdf_print("\n");
}
}
HIF_TRACE("%s: AGC history buffer dump X", __func__);
/* restore original value */
hif_write32_mb(scn->mem + BB_gains_min_offsets,
g_priv_dump_ctx.gain_min_offsets_orig);
A_TARGET_ACCESS_END(scn);
return;
}
void priv_dump_bbwatchdog(struct ol_softc *scn)
{
uint32_t val;
HIF_TRACE("%s: BB watchdog dump E", __func__);
val = hif_read32_mb(scn->mem + BB_watchdog_status);
cdf_print("0x%x\t", val);
val = hif_read32_mb(scn->mem + BB_watchdog_ctrl_1);
cdf_print("0x%x\t", val);
val = hif_read32_mb(scn->mem + BB_watchdog_ctrl_2);
cdf_print("0x%x\t", val);
val = hif_read32_mb(scn->mem + BB_watchdog_status_B);
cdf_print("0x%x", val);
HIF_TRACE("%s: BB watchdog dump X", __func__);
}

View File

@ -1,36 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __MP_DEV_H__
#define __MP_DEV_H__
void priv_start_agc(struct ol_softc *scn);
void priv_dump_agc(struct ol_softc *scn);
void priv_start_cap_chaninfo(struct ol_softc *scn);
void priv_dump_chaninfo(struct ol_softc *scn);
void priv_dump_bbwatchdog(struct ol_softc *scn);
void hif_shut_down_device(struct ol_softc *scn);
#endif /* __MP_DEV_H__ */

View File

@ -1,40 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __CNSS_STUB_H__
#define __CNSS_STUB_H__
#ifndef CONFIG_CNSS
inline void cnss_wlan_pci_link_down(void) {}
inline int cnss_pcie_shadow_control(struct pci_dev *dev, bool enable)
{
return 0;
}
#endif
#endif /* __CNSS_STUB_H__ */

View File

@ -1,312 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __HIF_IO32_PCI_H__
#define __HIF_IO32_PCI_H__
#ifdef HIF_PCI
#include "hif.h"
#include "regtable.h"
#include "ce_reg.h"
#include "cdf_atomic.h"
#include "if_pci.h"
/*
* For maximum performance and no power management, set this to 1.
* For power management at the cost of performance, set this to 0.
*/
#define CONFIG_ATH_PCIE_MAX_PERF 0
/*
* For keeping the target awake till the driver is
* loaded, set this to 1
*/
#define CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD 1
/*
* When CONFIG_ATH_PCIE_MAX_PERF is 0:
* To use LIKELY hints, set this to 1 (slightly better performance, more power)
* To ignore "LIKELY" hints, set this to 0 (slightly worse performance,
* less power)
*/
#if defined(CONFIG_ATH_PCIE_MAX_PERF)
#define CONFIG_ATH_PCIE_ACCESS_LIKELY 0
#else
#define CONFIG_ATH_PCIE_ACCESS_LIKELY 1
#endif
/*
* PCI-E L1 ASPPM sub-states
* To enable clock gating in L1 state, set this to 1.
* (less power, slightly more wakeup latency)
* To disable clock gating in L1 state, set this to 0. (slighly more power)
*/
#define CONFIG_PCIE_ENABLE_L1_CLOCK_GATE 1
/*
* PCIE_ACCESS_LOG_NUM specifies the number of
* read/write records to store
*/
#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
#define PCIE_ACCESS_LOG_NUM 500
#endif
/* 64-bit MSI support */
#define CONFIG_PCIE_64BIT_MSI 0
/* BAR0 ready checking for AR6320v2 */
#define PCIE_BAR0_READY_CHECKING 0
/* AXI gating when L1, L2 to reduce power consumption */
#define CONFIG_PCIE_ENABLE_AXI_CLK_GATE 0
#define hif_read32_mb(addr) ioread32((void __iomem *)addr)
#define hif_write32_mb(addr, value) \
iowrite32((u32)(value), (void __iomem *)(addr))
extern int hif_target_sleep_state_adjust(struct ol_softc *scn,
bool sleep_ok,
bool wait_for_it);
#if CONFIG_ATH_PCIE_MAX_PERF
#define A_TARGET_ACCESS_BEGIN(scn) \
do {struct ol_softc *unused = scn; \
unused = unused; } while (0)
#define A_TARGET_ACCESS_END(scn) \
do {struct ol_softc *unused = scn; \
unused = unused; } while (0)
#define A_TARGET_ACCESS_OK(scn) 1
#define A_TARGET_ACCESS_LIKELY(scn) \
do {struct ol_softc *unused = scn; \
unused = unused; } while (0)
#define A_TARGET_ACCESS_UNLIKELY(scn) \
do {struct ol_softc *unused = scn; \
unused = unused; } while (0)
#define A_TARGET_READ(scn, offset) \
hif_read32_mb(scn->mem + (offset))
void war_pci_write32(char *addr, u32 offset, u32 value);
#define A_TARGET_WRITE(scn, offset, value) \
war_pci_write32(scn->mem, (offset), (value))
#define A_TARGET_ACCESS_BEGIN_RET(scn) \
do {struct ol_softc *unused = scn; \
unused = unused; } while (0)
#define A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val) \
do {struct ol_softc *unused = scn; \
unused = unused; } while (0)
#define A_TARGET_ACCESS_BEGIN_RET_PTR(scn) \
do {struct ol_softc *unused = scn; \
unused = unused; } while (0)
#define A_TARGET_ACCESS_END_RET(scn) \
do {struct ol_softc *unused = scn; \
unused = unused; } while (0)
#define A_TARGET_ACCESS_END_RET_EXT(scn, val) \
do {struct ol_softc *unused = scn; \
unused = unused; } while (0)
#define A_TARGET_ACCESS_END_RET_PTR(scn) \
do {struct ol_softc *unused = scn; \
unused = unused; } while (0)
#else /* CONFIG_ATH_PCIE_MAX_PERF */
void war_pci_write32(char *addr, u32 offset, u32 value);
#define A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val) \
do { \
if (!WLAN_IS_EPPING_ENABLED(cds_get_conparam()) && \
Q_TARGET_ACCESS_BEGIN(scn) < 0) \
val = -1; \
} while (0)
#define A_TARGET_ACCESS_BEGIN_RET(scn) \
do { \
if (!WLAN_IS_EPPING_ENABLED(cds_get_conparam()) && \
Q_TARGET_ACCESS_BEGIN(scn) < 0) \
return ATH_ISR_NOSCHED; \
} while (0)
#define A_TARGET_ACCESS_BEGIN_RET_PTR(scn) \
do { \
if (!WLAN_IS_EPPING_ENABLED(cds_get_conparam()) && \
Q_TARGET_ACCESS_BEGIN(scn) < 0) \
return NULL; \
} while (0)
#define A_TARGET_ACCESS_BEGIN(scn) \
do { \
if (Q_TARGET_ACCESS_BEGIN(scn) < 0) \
return; \
} while (0)
#define Q_TARGET_ACCESS_BEGIN(scn) \
hif_target_sleep_state_adjust(scn, false, true)
#define A_TARGET_ACCESS_END_RET(scn) \
do { \
if (!WLAN_IS_EPPING_ENABLED(cds_get_conparam()) && \
Q_TARGET_ACCESS_END(scn) < 0) \
return ATH_ISR_NOSCHED; \
} while (0)
#define A_TARGET_ACCESS_END_RET_EXT(scn, val) \
do { \
if (!WLAN_IS_EPPING_ENABLED(cds_get_conparam()) && \
Q_TARGET_ACCESS_END(scn) < 0) \
val = -1; \
} while (0)
#define A_TARGET_ACCESS_END_RET_PTR(scn) \
do { \
if (!WLAN_IS_EPPING_ENABLED(cds_get_conparam()) && \
Q_TARGET_ACCESS_END(scn) < 0) \
return NULL; \
} while (0)
#define A_TARGET_ACCESS_END(scn) \
do { \
if (Q_TARGET_ACCESS_END(scn) < 0) \
return; \
} while (0)
#define Q_TARGET_ACCESS_END(scn) \
hif_target_sleep_state_adjust(scn, true, false)
#define A_TARGET_ACCESS_OK(scn) hif_target_forced_awake(scn)
#if CONFIG_ATH_PCIE_ACCESS_LIKELY
#define A_TARGET_ACCESS_LIKELY(scn) \
hif_target_sleep_state_adjust(scn, false, false)
#define A_TARGET_ACCESS_UNLIKELY(scn) \
hif_target_sleep_state_adjust(scn, true, false)
#else /* CONFIG_ATH_PCIE_ACCESS_LIKELY */
#define A_TARGET_ACCESS_LIKELY(scn) \
do { \
unsigned long unused = (unsigned long)(scn); \
unused = unused; \
} while (0)
#define A_TARGET_ACCESS_UNLIKELY(scn) \
do { \
unsigned long unused = (unsigned long)(scn); \
unused = unused; \
} while (0)
#endif /* CONFIG_ATH_PCIE_ACCESS_LIKELY */
#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
extern uint32_t hif_target_read_checked(struct ol_softc *scn,
uint32_t offset);
extern void hif_target_write_checked(struct ol_softc *scn, uint32_t offset,
uint32_t value);
#define A_TARGET_READ(scn, offset) \
hif_target_read_checked(scn, (offset))
#define A_TARGET_WRITE(scn, offset, value) \
hif_target_write_checked(scn, (offset), (value))
#else /* CONFIG_ATH_PCIE_ACCESS_DEBUG */
#define A_TARGET_READ(scn, offset) \
hif_read32_mb(scn->mem + (offset))
#define A_TARGET_WRITE(scn, offset, value) \
war_pci_write32(scn->mem, (offset), (value))
#endif
#endif /* CONFIG_ATH_PCIE_MAX_PERF */
irqreturn_t hif_fw_interrupt_handler(int irq, void *arg);
/**
* ce_irq_enable() - ce_irq_enable
* @scn: ol_softc
* @ce_id: ce_id
*
* Return: void
*/
static inline void ce_irq_enable(struct ol_softc *scn, int ce_id)
{
uint32_t tmp = 1 << ce_id;
struct hif_pci_softc *sc = scn->hif_sc;
cdf_spin_lock_irqsave(&scn->irq_lock);
scn->ce_irq_summary &= ~tmp;
if (scn->ce_irq_summary == 0) {
/* Enable Legacy PCI line interrupts */
if (LEGACY_INTERRUPTS(sc) &&
(scn->target_status != OL_TRGET_STATUS_RESET) &&
(!cdf_atomic_read(&scn->link_suspended))) {
hif_write32_mb(scn->mem +
(SOC_CORE_BASE_ADDRESS |
PCIE_INTR_ENABLE_ADDRESS),
HOST_GROUP0_MASK);
hif_read32_mb(scn->mem +
(SOC_CORE_BASE_ADDRESS |
PCIE_INTR_ENABLE_ADDRESS));
}
}
if (scn->hif_init_done == true)
A_TARGET_ACCESS_END(scn);
cdf_spin_unlock_irqrestore(&scn->irq_lock);
/* check for missed firmware crash */
hif_fw_interrupt_handler(0, scn);
}
/**
* ce_irq_disable() - ce_irq_disable
* @scn: ol_softc
* @ce_id: ce_id
*
* Return: void
*/
static inline void ce_irq_disable(struct ol_softc *scn, int ce_id)
{
/* For Rome only need to wake up target */
A_TARGET_ACCESS_BEGIN(scn);
}
/**
* soc_wake_reset() - soc_wake_reset
* @scn: ol_softc
*
* Return: void
*/
static inline void soc_wake_reset(struct ol_softc *scn)
{
hif_write32_mb(scn->mem +
PCIE_LOCAL_BASE_ADDRESS +
PCIE_SOC_WAKE_ADDRESS,
PCIE_SOC_WAKE_RESET);
}
#endif /* HIF_PCI */
#endif /* __HIF_IO32_PCI_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,234 +0,0 @@
/*
* Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __ATH_PCI_H__
#define __ATH_PCI_H__
#include <linux/version.h>
#include <linux/semaphore.h>
#include <linux/interrupt.h>
#define ATH_DBG_DEFAULT 0
#include <osdep.h>
#include <ol_if_athvar.h>
#include <athdefs.h>
#include "osapi_linux.h"
#include "hif.h"
#include "cepci.h"
struct CE_state;
struct ol_softc;
/* An address (e.g. of a buffer) in Copy Engine space. */
#define HIF_MAX_TASKLET_NUM 11
struct hif_tasklet_entry {
uint8_t id; /* 0 - 9: maps to CE, 10: fw */
void *hif_handler; /* struct hif_pci_softc */
};
/**
* enum hif_pm_runtime_state - Driver States for Runtime Power Management
* HIF_PM_RUNTIME_STATE_NONE: runtime pm is off
* HIF_PM_RUNTIME_STATE_ON: runtime pm is active and link is active
* HIF_PM_RUNTIME_STATE_INPROGRESS: a runtime suspend or resume is in progress
* HIF_PM_RUNTIME_STATE_SUSPENDED: the driver is runtime suspended
*/
enum hif_pm_runtime_state {
HIF_PM_RUNTIME_STATE_NONE,
HIF_PM_RUNTIME_STATE_ON,
HIF_PM_RUNTIME_STATE_INPROGRESS,
HIF_PM_RUNTIME_STATE_SUSPENDED,
};
#ifdef FEATURE_RUNTIME_PM
/**
* struct hif_pm_runtime_lock - data structure for preventing runtime suspend
* @list - global list of runtime locks
* @active - true if this lock is preventing suspend
* @name - character string for tracking this lock
*/
struct hif_pm_runtime_lock {
struct list_head list;
bool active;
uint32_t timeout;
const char *name;
};
/* Debugging stats for Runtime PM */
struct hif_pci_pm_stats {
u32 suspended;
u32 suspend_err;
u32 resumed;
u32 runtime_get;
u32 runtime_put;
u32 request_resume;
u32 allow_suspend;
u32 prevent_suspend;
u32 prevent_suspend_timeout;
u32 allow_suspend_timeout;
u32 runtime_get_err;
void *last_resume_caller;
unsigned long suspend_jiffies;
};
#endif
struct hif_pci_softc {
void __iomem *mem; /* PCI address. */
/* For efficiency, should be first in struct */
struct device *dev;
struct pci_dev *pdev;
struct ol_softc *ol_sc;
int num_msi_intrs; /* number of MSI interrupts granted */
/* 0 --> using legacy PCI line interrupts */
struct tasklet_struct intr_tq; /* tasklet */
int irq;
int irq_event;
int cacheline_sz;
u16 devid;
cdf_dma_addr_t soc_pcie_bar0;
struct hif_tasklet_entry tasklet_entries[HIF_MAX_TASKLET_NUM];
bool pci_enabled;
#ifdef FEATURE_RUNTIME_PM
atomic_t pm_state;
uint32_t prevent_suspend_cnt;
struct hif_pci_pm_stats pm_stats;
struct work_struct pm_work;
spinlock_t runtime_lock;
struct timer_list runtime_timer;
struct list_head prevent_suspend_list;
unsigned long runtime_timer_expires;
struct hif_pm_runtime_lock *prevent_linkdown_lock;
#ifdef WLAN_OPEN_SOURCE
struct dentry *pm_dentry;
#endif
#endif
};
bool hif_pci_targ_is_present(struct ol_softc *scn, void *__iomem *mem);
void icnss_dispatch_ce_irq(struct ol_softc *scn);
int hif_configure_irq(struct hif_pci_softc *sc);
void hif_pci_cancel_deferred_target_sleep(struct ol_softc *scn);
/*
* A firmware interrupt to the Host is indicated by the
* low bit of SCRATCH_3_ADDRESS being set.
*/
#define FW_EVENT_PENDING_REG_ADDRESS SCRATCH_3_ADDRESS
/*
* Typically, MSI Interrupts are used with PCIe. To force use of legacy
* "ABCD" PCI line interrupts rather than MSI, define
* FORCE_LEGACY_PCI_INTERRUPTS.
* Even when NOT forced, the driver may attempt to use legacy PCI interrupts
* MSI allocation fails
*/
#define LEGACY_INTERRUPTS(sc) ((sc)->num_msi_intrs == 0)
/*
* There may be some pending tx frames during platform suspend.
* Suspend operation should be delayed until those tx frames are
* transfered from the host to target. This macro specifies how
* long suspend thread has to sleep before checking pending tx
* frame count.
*/
#define OL_ATH_TX_DRAIN_WAIT_DELAY 50 /* ms */
#define HIF_CE_DRAIN_WAIT_DELAY 10 /* ms */
/*
* Wait time (in unit of OL_ATH_TX_DRAIN_WAIT_DELAY) for pending
* tx frame completion before suspend. Refer: hif_pci_suspend()
*/
#ifndef QCA_WIFI_3_0_EMU
#define OL_ATH_TX_DRAIN_WAIT_CNT 10
#else
#define OL_ATH_TX_DRAIN_WAIT_CNT 60
#endif
#define HIF_CE_DRAIN_WAIT_CNT 20
#ifdef FEATURE_RUNTIME_PM
#include <linux/pm_runtime.h>
#ifdef WLAN_OPEN_SOURCE
static inline int hif_pm_request_resume(struct device *dev)
{
return pm_request_resume(dev);
}
static inline int __hif_pm_runtime_get(struct device *dev)
{
return pm_runtime_get(dev);
}
static inline int hif_pm_runtime_put_auto(struct device *dev)
{
return pm_runtime_put_autosuspend(dev);
}
static inline void hif_pm_runtime_mark_last_busy(struct device *dev)
{
pm_runtime_mark_last_busy(dev);
}
static inline int hif_pm_runtime_resume(struct device *dev)
{
return pm_runtime_resume(dev);
}
#else
static inline int hif_pm_request_resume(struct device *dev)
{
return cnss_pm_runtime_request(dev, CNSS_PM_REQUEST_RESUME);
}
static inline int __hif_pm_runtime_get(struct device *dev)
{
return cnss_pm_runtime_request(dev, CNSS_PM_RUNTIME_GET);
}
static inline int hif_pm_runtime_put_auto(struct device *dev)
{
return cnss_pm_runtime_request(dev, CNSS_PM_RUNTIME_PUT_AUTO);
}
static inline void hif_pm_runtime_mark_last_busy(struct device *dev)
{
cnss_pm_runtime_request(dev, CNSS_PM_RUNTIME_MARK_LAST_BUSY);
}
static inline int hif_pm_runtime_resume(struct device *dev)
{
return cnss_pm_runtime_request(dev, CNSS_PM_RUNTIME_RESUME);
}
#endif /* WLAN_OPEN_SOURCE */
#else
static inline void hif_pm_runtime_mark_last_busy(struct device *dev) { }
#endif /* FEATURE_RUNTIME_PM */
#endif /* __ATH_PCI_H__ */

View File

@ -1,110 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __IF_PCI_INTERNAL_H__
#define __IF_PCI_INTERNAL_H__
#ifdef CONFIG_CNSS
#define HIF_REGISTER_DRIVER(wlan_drv_id) \
cnss_wlan_register_driver(wlan_drv_id)
#define HIF_UNREGISTER_DRIVER(wlan_drv_id) \
cnss_wlan_unregister_driver(wlan_drv_id)
#else
#define HIF_REGISTER_DRIVER(wlan_drv_id) \
pci_register_driver(wlan_drv_id)
#define HIF_UNREGISTER_DRIVER(wlan_drv_id) \
pci_unregister_driver(wlan_drv_id)
#endif
#ifdef DISABLE_L1SS_STATES
#define PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, addr) \
{ \
uint32_t lcr_val; \
pci_read_config_dword(pdev, addr, &lcr_val); \
pci_write_config_dword(pdev, addr, (lcr_val & ~0x0000000f)); \
}
#else
#define PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, addr)
#endif
#if defined(CONFIG_CNSS) && !defined(QCA_WIFI_3_0)
#define GET_VIRT_RAMDUMP_MEM(ol_sc) \
{ \
ol_sc->ramdump_base = cnss_get_virt_ramdump_mem(&ol_sc->ramdump_size); \
if (ol_sc->ramdump_base == NULL || !ol_sc->ramdump_size) \
HIF_ERROR("%s: Failed to get RAM dump memory addr or size!", \
__func__); \
}
#else
#define GET_VIRT_RAMDUMP_MEM(ol_sc)
#endif
#ifdef QCA_WIFI_3_0
#define PCI_CLR_CAUSE0_REGISTER(sc) \
{ \
uint32_t tmp_cause0; \
tmp_cause0 = hif_read32_mb(sc->mem + PCIE_INTR_CAUSE_ADDRESS); \
hif_write32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS, \
PCIE_INTR_FIRMWARE_MASK | tmp_cause0); \
hif_read32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS); \
hif_write32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS, 0); \
hif_read32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS); \
}
#define HIF_PCI_TARG_IS_AWAKE(scn, mem) \
{ \
return 1; \
}
#else
#define PCI_CLR_CAUSE0_REGISTER(sc)
#define HIF_PCI_TARG_IS_AWAKE(scn, mem) \
{ \
uint32_t val; \
if (scn->recovery) \
return false; \
val = hif_read32_mb(mem + PCIE_LOCAL_BASE_ADDRESS \
+ RTC_STATE_ADDRESS); \
return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; \
}
#endif
#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
#define PKT_LOG_MOD_INIT(ol_sc) \
{ \
ol_txrx_pdev_handle pdev_txrx_handle; \
pdev_txrx_handle = cds_get_context(CDF_MODULE_ID_TXRX); \
if (cds_get_conparam() != CDF_GLOBAL_FTM_MODE && \
!WLAN_IS_EPPING_ENABLED(cds_get_conparam())) { \
ol_pl_sethandle(&pdev_txrx_handle->pl_dev, ol_sc); \
if (pktlogmod_init(ol_sc)) \
HIF_ERROR("%s: pktlogmod_init failed", __func__); \
} \
}
#else
#define PKT_LOG_MOD_INIT(ol_sc)
#endif
#endif /* __IF_PCI_INTERNAL_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,91 +0,0 @@
/*
* Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include "bmi_msg.h"
#include "targaddrs.h"
#include "cepci.h"
#include "regtable.h"
#include "ar9888def.h"
#include "ar6320def.h"
#include "ar6320v2def.h"
#include "qca6180def.h"
#include "ol_if_athvar.h"
#include "hif.h"
#include "adrastea_reg_def.h"
void target_register_tbl_attach(struct ol_softc *scn, u32 target_type)
{
switch (target_type) {
case TARGET_TYPE_AR9888:
scn->targetdef = &ar9888_targetdef;
scn->target_ce_def = &ar9888_ce_targetdef;
break;
case TARGET_TYPE_AR6320:
scn->targetdef = &ar6320_targetdef;
scn->target_ce_def = &ar6320_ce_targetdef;
break;
case TARGET_TYPE_AR6320V2:
scn->targetdef = &ar6320v2_targetdef;
scn->target_ce_def = &ar6320v2_ce_targetdef;
break;
case TARGET_TYPE_QCA6180:
scn->targetdef = &qca6180_targetdef;
scn->target_ce_def = &qca6180_ce_targetdef;
break;
case TARGET_TYPE_ADRASTEA:
scn->targetdef = &adrastea_targetdef;
scn->target_ce_def = &adrastea_ce_targetdef;
break;
default:
break;
}
}
void hif_register_tbl_attach(struct ol_softc *scn, u32 hif_type)
{
switch (hif_type) {
case HIF_TYPE_AR9888:
scn->hostdef = &ar9888_hostdef;
break;
case HIF_TYPE_AR6320:
scn->hostdef = &ar6320_hostdef;
break;
case HIF_TYPE_AR6320V2:
scn->hostdef = &ar6320v2_hostdef;
break;
case HIF_TYPE_QCA6180:
scn->hostdef = &qca6180_hostdef;
scn->host_shadow_regs = &qca6180_host_shadow_regs;
break;
case HIF_TYPE_ADRASTEA:
scn->hostdef = &adrastea_hostdef;
scn->host_shadow_regs = &adrastea_host_shadow_regs;
break;
default:
break;
}
}

View File

@ -1,236 +0,0 @@
/*
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: hif_io32_snoc.h
*
* snoc specific implementations and configurations
*/
#ifndef __HIF_IO32_SNOC_H__
#define __HIF_IO32_SNOC_H__
#ifdef HIF_PCI
#error snoc and pci cannot be supported in parrallel at this time
#endif
#include "hif.h"
#include "regtable.h"
#include "ce_reg.h"
#include "cdf_atomic.h"
#include <soc/qcom/icnss.h>
#include "hif_main.h"
#include "hif_debug.h"
/**
* Following features are not supported for snoc bus
* Force 0 and consider moving corresponding code into
* pci specific files
*/
#define CONFIG_ATH_PCIE_MAX_PERF 0
#define CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD 0
#define CONFIG_ATH_PCIE_ACCESS_LIKELY 0
#define CONFIG_PCIE_ENABLE_L1_CLOCK_GATE 0
#define A_TARGET_ACCESS_LIKELY(scn)
#define A_TARGET_ACCESS_UNLIKELY(scn)
#define A_TARGET_ACCESS_BEGIN_RET_PTR(scn)
#define A_TARGET_ACCESS_END_RET_PTR(scn)
#define A_TARGET_ACCESS_BEGIN(scn)
#define A_TARGET_ACCESS_END(scn)
#define A_TARGET_ACCESS_BEGIN_RET(scn)
#define A_TARGET_ACCESS_END_RET(scn)
#define A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val)
#define A_TARGET_ACCESS_END_RET_EXT(scn, val)
#define Q_TARGET_ACCESS_BEGIN(scn) 0
#define Q_TARGET_ACCESS_END(scn) 0
static inline void hif_pci_cancel_deferred_target_sleep(struct ol_softc *scn)
{
return;
}
static inline void hif_target_sleep_state_adjust(struct ol_softc *scn,
bool sleep_ok, bool wait_for_it)
{
return;
}
/**
* soc_wake_reset() - soc_wake_reset
* @scn: ol_softc
*
* Return: void
*/
static inline void soc_wake_reset(struct ol_softc *scn)
{
}
/**
* hif_write32_mb - SNOC write 32
* @addr: physical address
* @value: value
*
* Return: N/A
*/
static inline void hif_write32_mb(void __iomem *addr, uint32_t value)
{
wmb(); /* write memory barrier */
writel_relaxed((value), (addr));
wmb(); /* write memory barrier */
}
/**
* hif_read32_mb - SNOC read 32
* @addr: physical address
*
* Return: N/A
*/
static inline uint32_t hif_read32_mb(void __iomem *addr)
{
uint32_t tmp;
rmb(); /* read memory barrier */
tmp = readl_relaxed(addr);
rmb(); /* read memory barrier */
return tmp;
}
#define A_TARGET_READ(scn, offset) \
hif_read32_mb(scn->mem + (offset))
#define A_TARGET_WRITE(scn, offset, value) \
hif_write32_mb((scn->mem + offset), (value))
#define ADRASTEA_CE_INTR_ENABLES 0x002F00A8
#define ADRASTEA_CE_INTR_ENABLES_SET "COMING IN REGISTER SET36"
#define ADRASTEA_CE_INTR_ENABLES_CLEAR "COMING IN REGISTER SET36"
#define ADRASTEA_CE_INTR_STATUS 0x002F00AC
static inline void ce_enable_irq_in_individual_register(struct ol_softc *scn,
int ce_id)
{
uint32_t offset;
offset = HOST_IE_ADDRESS + CE_BASE_ADDRESS(ce_id);
hif_write32_mb(scn->mem + offset, 1);
hif_read32_mb(scn->mem + offset);
}
static inline void ce_disable_irq_in_individual_register(struct ol_softc *scn,
int ce_id)
{
uint32_t offset;
offset = HOST_IE_ADDRESS + CE_BASE_ADDRESS(ce_id);
hif_write32_mb(scn->mem + offset, 0);
hif_read32_mb(scn->mem + offset);
}
static inline void ce_read_irq_group_status(struct ol_softc *scn)
{
uint32_t group_status = 0;
group_status = hif_read32_mb(scn->mem +
ADRASTEA_CE_INTR_STATUS);
}
static inline void ce_clear_irq_group_status(struct ol_softc *scn, int mask)
{
uint32_t group_status = 0;
group_status = hif_read32_mb(scn->mem +
ADRASTEA_CE_INTR_STATUS);
hif_write32_mb(scn->mem +
ADRASTEA_CE_INTR_STATUS, mask);
group_status = hif_read32_mb(scn->mem +
ADRASTEA_CE_INTR_STATUS);
}
/* this will need to be changed when we move to reg set 36
* because we will have set & clear registers provided
*/
static inline void ce_enable_irq_in_group_reg(struct ol_softc *scn,
int mask)
{
int new_mask = 0;
new_mask = hif_read32_mb(scn->mem +
ADRASTEA_CE_INTR_ENABLES);
new_mask |= mask;
hif_write32_mb(scn->mem +
ADRASTEA_CE_INTR_ENABLES, new_mask);
mask = hif_read32_mb(scn->mem +
ADRASTEA_CE_INTR_ENABLES);
}
/* this will need to be changed when we move to reg set 36
* because we will have set & clear registers provided
*/
static inline void ce_disable_irq_in_group_reg(struct ol_softc *scn,
int mask)
{
int new_mask = 0;
new_mask = hif_read32_mb(scn->mem +
ADRASTEA_CE_INTR_ENABLES);
new_mask &= ~mask;
hif_write32_mb(scn->mem +
ADRASTEA_CE_INTR_ENABLES, new_mask);
mask = hif_read32_mb(scn->mem +
ADRASTEA_CE_INTR_ENABLES);
}
/**
* ce_irq_enable() - enable copy engine IRQ
* @scn: struct ol_softc
* @ce_id: ce_id
*
* Return: N/A
*/
static inline void ce_irq_enable(struct ol_softc *scn,
int ce_id)
{
icnss_enable_irq(ce_id);
ce_enable_irq_in_individual_register(scn, ce_id);
ce_enable_irq_in_group_reg(scn, 1<<ce_id);
}
/**
* ce_irq_disable() - disable copy engine IRQ
* @scn: struct ol_softc
* @ce_id: ce_id
*
* Return: N/A
*/
static inline void ce_irq_disable(struct ol_softc *scn, int ce_id)
{
ce_disable_irq_in_group_reg(scn, 1<<ce_id);
ce_clear_irq_group_status(scn, 1<<ce_id);
ce_disable_irq_in_individual_register(scn, ce_id);
}
#endif

View File

@ -1,315 +0,0 @@
/*
* Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: if_snoc.c
*
* c file for snoc specif implementations.
*/
#include "hif.h"
#include "hif_main.h"
#include "hif_debug.h"
#include "hif_io32.h"
#include "ce_main.h"
#include "ce_tasklet.h"
/**
* hif_bus_prevent_linkdown(): prevent linkdown
*
* Dummy function for busses and platforms that do not support
* link down. This may need to be replaced with a wakelock.
*
* This is duplicated here because CONFIG_CNSS can be defined
* even though it is not used for the snoc bus.
*/
void hif_bus_prevent_linkdown(struct ol_softc *scn, bool flag)
{
HIF_ERROR("wlan: %s pcie power collapse ignored",
(flag ? "disable" : "enable"));
}
/**
* hif_targ_is_awake(): check if target is awake
*
* This function returns true if the target is awake
*
* @scn: struct ol_softc
* @mem: mapped mem base
*
* Return: bool
*/
bool hif_targ_is_awake(struct ol_softc *scn, void *__iomem *mem)
{
return true;
}
/**
* hif_reset_soc(): reset soc
*
* this function resets soc
*
* @hif_ctx: HIF context
*
* Return: void
*/
/* Function to reset SoC */
void hif_reset_soc(void *hif_ctx)
{
}
/**
* hif_disable_isr(): disable isr
*
* This function disables isr and kills tasklets
*
* @hif_ctx: struct ol_softc
*
* Return: void
*/
void hif_disable_isr(void *hif_ctx)
{
struct ol_softc *scn = (struct ol_softc *)hif_ctx;
hif_nointrs(scn);
ce_tasklet_kill(scn->hif_hdl);
cdf_atomic_set(&scn->active_tasklet_cnt, 0);
}
/**
* hif_dump_snoc_registers(): dump CE debug registers
*
* This function dumps CE debug registers
*
* @scn: struct ol_softc
*
* Return: void
*/
static void hif_dump_snoc_registers(struct ol_softc *scn)
{
return;
}
/**
* hif_dump_registers(): dump bus debug registers
*
* This function dumps hif bus debug registers
*
* @scn: struct ol_softc
*
* Return: 0 for success or error code
*/
int hif_dump_registers(struct ol_softc *scn)
{
int status;
status = hif_dump_ce_registers(scn);
if (status)
return status;
hif_dump_snoc_registers(scn);
return 0;
}
/**
* hif_bus_suspend() - suspend the bus
*
* This function suspends the bus, but snoc doesn't need to suspend.
* Therefore do nothing.
*
* Return: 0 for success and non-zero for failure
*/
int hif_bus_suspend(void)
{
return 0;
}
/**
* hif_bus_resume() - hif resume API
*
* This function resumes the bus. but snoc doesn't need to resume.
* Therefore do nothing.
*
* Return: 0 for success and non-zero for failure
*/
int hif_bus_resume(void)
{
return 0;
}
/**
* hif_enable_power_gating(): enable HW power gating
*
* Return: n/a
*/
void hif_enable_power_gating(void *hif_ctx)
{
}
/**
* hif_disable_aspm(): hif_disable_aspm
*
* Return: n/a
*/
void hif_disable_aspm(void)
{
}
/**
* hif_bus_close(): hif_bus_close
*
* Return: n/a
*/
void hif_bus_close(struct ol_softc *scn)
{
}
/**
* hif_bus_open(): hif_bus_open
* @scn: scn
* @bus_type: bus type
*
* Return: n/a
*/
CDF_STATUS hif_bus_open(struct ol_softc *scn, enum ath_hal_bus_type bus_type)
{
return CDF_STATUS_SUCCESS;
}
/**
* hif_get_target_type(): Get the target type
*
* This function is used to query the target type.
*
* @ol_sc: ol_softc struct pointer
* @dev: device pointer
* @bdev: bus dev pointer
* @bid: bus id pointer
* @hif_type: HIF type such as HIF_TYPE_QCA6180
* @target_type: target type such as TARGET_TYPE_QCA6180
*
* Return: 0 for success
*/
int hif_get_target_type(struct ol_softc *ol_sc, struct device *dev,
void *bdev, const hif_bus_id *bid, uint32_t *hif_type,
uint32_t *target_type)
{
/* TODO: need to use CNSS's HW version. Hard code for now */
#ifdef QCA_WIFI_3_0_ADRASTEA
*hif_type = HIF_TYPE_ADRASTEA;
*target_type = TARGET_TYPE_ADRASTEA;
#else
*hif_type = 0;
*target_type = 0;
#endif
return 0;
}
/**
* hif_enable_bus(): hif_enable_bus
* @dev: dev
* @bdev: bus dev
* @bid: bus id
* @type: bus type
*
* Return: CDF_STATUS
*/
CDF_STATUS hif_enable_bus(struct ol_softc *ol_sc,
struct device *dev, void *bdev,
const hif_bus_id *bid,
enum hif_enable_type type)
{
int ret;
int hif_type;
int target_type;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
HIF_ERROR("%s: failed to set dma mask error = %d",
__func__, ret);
return ret;
}
if (!ol_sc) {
HIF_ERROR("%s: hif_ctx is NULL", __func__);
return CDF_STATUS_E_NOMEM;
}
ol_sc->aps_osdev.device = dev;
ol_sc->aps_osdev.bc.bc_handle = (void *)ol_sc->mem;
ol_sc->aps_osdev.bc.bc_bustype = type;
ret = hif_get_target_type(ol_sc, dev, bdev, bid,
&hif_type, &target_type);
if (ret < 0) {
HIF_ERROR("%s: invalid device id/revision_id", __func__);
return CDF_STATUS_E_FAILURE;
}
hif_register_tbl_attach(ol_sc, hif_type);
target_register_tbl_attach(ol_sc, target_type);
HIF_TRACE("%s: X - hif_type = 0x%x, target_type = 0x%x",
__func__, hif_type, target_type);
ret = hif_init_cdf_ctx(ol_sc);
if (ret != 0) {
HIF_ERROR("%s: cannot init CDF", __func__);
return CDF_STATUS_E_FAILURE;
}
return CDF_STATUS_SUCCESS;
}
/**
* hif_disable_bus(): hif_disable_bus
*
* This function disables the bus
*
* @bdev: bus dev
*
* Return: none
*/
void hif_disable_bus(void *bdev)
{
}
/**
* hif_nointrs(): disable IRQ
*
* This function stops interrupt(s)
*
* @scn: struct ol_softc
*
* Return: none
*/
void hif_nointrs(struct ol_softc *scn)
{
if (scn->request_irq_done) {
ce_unregister_irq(scn->hif_hdl, 0xfff);
scn->request_irq_done = false;
}
}

View File

@ -1,208 +0,0 @@
/*
* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/* ============================================================================== */
/* Double-link list definitions (adapted from Atheros SDIO stack) */
/* */
/* Author(s): ="Atheros" */
/* ============================================================================== */
#ifndef __DL_LIST_H___
#define __DL_LIST_H___
#define A_CONTAINING_STRUCT(address, struct_type, field_name) \
((struct_type *)((char *)(address) - (char *)(&((struct_type *)0)->field_name)))
/* list functions */
/* pointers for the list */
typedef struct _DL_LIST {
struct _DL_LIST *pPrev;
struct _DL_LIST *pNext;
} DL_LIST, *PDL_LIST;
/*
* DL_LIST_INIT , initialize doubly linked list
*/
#define DL_LIST_INIT(pList) \
{(pList)->pPrev = pList; (pList)->pNext = pList; }
/* faster macro to init list and add a single item */
#define DL_LIST_INIT_AND_ADD(pList,pItem) \
{ (pList)->pPrev = (pItem); \
(pList)->pNext = (pItem); \
(pItem)->pNext = (pList); \
(pItem)->pPrev = (pList); \
}
#define DL_LIST_IS_EMPTY(pList) (((pList)->pPrev == (pList)) && ((pList)->pNext == (pList)))
#define DL_LIST_GET_ITEM_AT_HEAD(pList) (pList)->pNext
#define DL_LIST_GET_ITEM_AT_TAIL(pList) (pList)->pPrev
/*
* ITERATE_OVER_LIST pStart is the list, pTemp is a temp list member
* NOT: do not use this function if the items in the list are deleted inside the
* iteration loop
*/
#define ITERATE_OVER_LIST(pStart, pTemp) \
for((pTemp) =(pStart)->pNext; pTemp != (pStart); (pTemp) = (pTemp)->pNext)
static __inline bool dl_list_is_entry_in_list(const DL_LIST *pList,
const DL_LIST *pEntry)
{
const DL_LIST *pTmp;
if (pList == pEntry)
return true;
ITERATE_OVER_LIST(pList, pTmp) {
if (pTmp == pEntry) {
return true;
}
}
return false;
}
/* safe iterate macro that allows the item to be removed from the list
* the iteration continues to the next item in the list
*/
#define ITERATE_OVER_LIST_ALLOW_REMOVE(pStart,pItem,st,offset) \
{ \
PDL_LIST pTemp; \
pTemp = (pStart)->pNext; \
while (pTemp != (pStart)) { \
(pItem) = A_CONTAINING_STRUCT(pTemp,st,offset); \
pTemp = pTemp->pNext; \
#define ITERATE_IS_VALID(pStart) dl_list_is_entry_in_list(pStart, pTemp)
#define ITERATE_RESET(pStart) pTemp=(pStart)->pNext
#define ITERATE_END }}
/*
* dl_list_insert_tail - insert pAdd to the end of the list
*/
static __inline PDL_LIST dl_list_insert_tail(PDL_LIST pList, PDL_LIST pAdd)
{
/* insert at tail */
pAdd->pPrev = pList->pPrev;
pAdd->pNext = pList;
if (pList->pPrev) {
pList->pPrev->pNext = pAdd;
}
pList->pPrev = pAdd;
return pAdd;
}
/*
* dl_list_insert_head - insert pAdd into the head of the list
*/
static __inline PDL_LIST dl_list_insert_head(PDL_LIST pList, PDL_LIST pAdd)
{
/* insert at head */
pAdd->pPrev = pList;
pAdd->pNext = pList->pNext;
pList->pNext->pPrev = pAdd;
pList->pNext = pAdd;
return pAdd;
}
#define DL_ListAdd(pList,pItem) dl_list_insert_head((pList),(pItem))
/*
* dl_list_remove - remove pDel from list
*/
static __inline PDL_LIST dl_list_remove(PDL_LIST pDel)
{
if (pDel->pNext != NULL) {
pDel->pNext->pPrev = pDel->pPrev;
}
if (pDel->pPrev != NULL) {
pDel->pPrev->pNext = pDel->pNext;
}
/* point back to itself just to be safe, incase remove is called again */
pDel->pNext = pDel;
pDel->pPrev = pDel;
return pDel;
}
/*
* dl_list_remove_item_from_head - get a list item from the head
*/
static __inline PDL_LIST dl_list_remove_item_from_head(PDL_LIST pList)
{
PDL_LIST pItem = NULL;
if (pList->pNext != pList) {
pItem = pList->pNext;
/* remove the first item from head */
dl_list_remove(pItem);
}
return pItem;
}
static __inline PDL_LIST dl_list_remove_item_from_tail(PDL_LIST pList)
{
PDL_LIST pItem = NULL;
if (pList->pPrev != pList) {
pItem = pList->pPrev;
/* remove the item from tail */
dl_list_remove(pItem);
}
return pItem;
}
/* transfer src list items to the tail of the destination list */
static __inline void dl_list_transfer_items_to_tail(PDL_LIST pDest, PDL_LIST pSrc)
{
/* only concatenate if src is not empty */
if (!DL_LIST_IS_EMPTY(pSrc)) {
/* cut out circular list in src and re-attach to end of dest */
pSrc->pPrev->pNext = pDest;
pSrc->pNext->pPrev = pDest->pPrev;
pDest->pPrev->pNext = pSrc->pNext;
pDest->pPrev = pSrc->pPrev;
/* terminate src list, it is now empty */
pSrc->pPrev = pSrc;
pSrc->pNext = pSrc;
}
}
/* transfer src list items to the head of the destination list */
static __inline void dl_list_transfer_items_to_head(PDL_LIST pDest, PDL_LIST pSrc)
{
/* only concatenate if src is not empty */
if (!DL_LIST_IS_EMPTY(pSrc)) {
/* cut out circular list in src and re-attach to start of dest */
pSrc->pNext->pPrev = pDest;
pDest->pNext->pPrev = pSrc->pPrev;
pSrc->pPrev->pNext = pDest->pNext;
pDest->pNext = pSrc->pNext;
/* terminate src list, it is now empty */
pSrc->pPrev = pSrc;
pSrc->pNext = pSrc;
}
}
#endif /* __DL_LIST_H___ */

View File

@ -1,881 +0,0 @@
/*
* Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include "ol_if_athvar.h"
#include "htc_debug.h"
#include "htc_internal.h"
#include <cdf_nbuf.h> /* cdf_nbuf_t */
#include <cdf_types.h> /* cdf_print */
#include <hif.h>
#include "epping_main.h"
#include "hif_io32.h"
#include "cds_concurrency.h"
#include <cds_api.h>
#ifdef DEBUG
static ATH_DEBUG_MASK_DESCRIPTION g_htc_debug_description[] = {
{ATH_DEBUG_SEND, "Send"},
{ATH_DEBUG_RECV, "Recv"},
{ATH_DEBUG_SYNC, "Sync"},
{ATH_DEBUG_DUMP, "Dump Data (RX or TX)"},
{ATH_DEBUG_SETUP, "Setup"},
};
ATH_DEBUG_INSTANTIATE_MODULE_VAR(htc,
"htc",
"Host Target Communications",
ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_INFO |
ATH_DEBUG_SETUP,
ATH_DEBUG_DESCRIPTION_COUNT
(g_htc_debug_description),
g_htc_debug_description);
#endif
extern unsigned int htc_credit_flow;
static void reset_endpoint_states(HTC_TARGET *target);
static void destroy_htc_tx_ctrl_packet(HTC_PACKET *pPacket)
{
cdf_nbuf_t netbuf;
netbuf = (cdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket);
AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("free ctrl netbuf :0x%p \n", netbuf));
if (netbuf != NULL) {
cdf_nbuf_free(netbuf);
}
cdf_mem_free(pPacket);
}
static HTC_PACKET *build_htc_tx_ctrl_packet(cdf_device_t osdev)
{
HTC_PACKET *pPacket = NULL;
cdf_nbuf_t netbuf;
do {
pPacket = (HTC_PACKET *) cdf_mem_malloc(sizeof(HTC_PACKET));
if (NULL == pPacket) {
break;
}
A_MEMZERO(pPacket, sizeof(HTC_PACKET));
netbuf =
cdf_nbuf_alloc(osdev, HTC_CONTROL_BUFFER_SIZE, 20, 4, true);
if (NULL == netbuf) {
cdf_mem_free(pPacket);
pPacket = NULL;
cdf_print("%s: nbuf alloc failed\n", __func__);
break;
}
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
("alloc ctrl netbuf :0x%p \n", netbuf));
SET_HTC_PACKET_NET_BUF_CONTEXT(pPacket, netbuf);
} while (false);
return pPacket;
}
void htc_free_control_tx_packet(HTC_TARGET *target, HTC_PACKET *pPacket)
{
#ifdef TODO_FIXME
LOCK_HTC(target);
HTC_PACKET_ENQUEUE(&target->ControlBufferTXFreeList, pPacket);
UNLOCK_HTC(target);
/* TODO_FIXME netbufs cannot be RESET! */
#else
destroy_htc_tx_ctrl_packet(pPacket);
#endif
}
HTC_PACKET *htc_alloc_control_tx_packet(HTC_TARGET *target)
{
#ifdef TODO_FIXME
HTC_PACKET *pPacket;
LOCK_HTC(target);
pPacket = htc_packet_dequeue(&target->ControlBufferTXFreeList);
UNLOCK_HTC(target);
return pPacket;
#else
return build_htc_tx_ctrl_packet(target->osdev);
#endif
}
/* Set the target failure handling callback */
void htc_set_target_failure_callback(HTC_HANDLE HTCHandle,
HTC_TARGET_FAILURE Callback)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
target->HTCInitInfo.TargetFailure = Callback;
}
void htc_dump(HTC_HANDLE HTCHandle, uint8_t CmdId, bool start)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
hif_dump(target->hif_dev, CmdId, start);
}
/* cleanup the HTC instance */
static void htc_cleanup(HTC_TARGET *target)
{
HTC_PACKET *pPacket;
/* cdf_nbuf_t netbuf; */
if (target->hif_dev != NULL) {
hif_detach_htc(target->hif_dev);
target->hif_dev = NULL;
}
while (true) {
pPacket = allocate_htc_packet_container(target);
if (NULL == pPacket) {
break;
}
cdf_mem_free(pPacket);
}
pPacket = target->pBundleFreeList;
while (pPacket) {
HTC_PACKET *pPacketTmp = (HTC_PACKET *) pPacket->ListLink.pNext;
cdf_mem_free(pPacket);
pPacket = pPacketTmp;
}
#ifdef TODO_FIXME
while (true) {
pPacket = htc_alloc_control_tx_packet(target);
if (NULL == pPacket) {
break;
}
netbuf = (cdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket);
if (netbuf != NULL) {
cdf_nbuf_free(netbuf);
}
cdf_mem_free(pPacket);
}
#endif
cdf_spinlock_destroy(&target->HTCLock);
cdf_spinlock_destroy(&target->HTCRxLock);
cdf_spinlock_destroy(&target->HTCTxLock);
cdf_spinlock_destroy(&target->HTCCreditLock);
/* free our instance */
cdf_mem_free(target);
}
/* registered target arrival callback from the HIF layer */
HTC_HANDLE htc_create(void *ol_sc, HTC_INIT_INFO *pInfo, cdf_device_t osdev)
{
struct hif_msg_callbacks htcCallbacks;
HTC_ENDPOINT *pEndpoint = NULL;
HTC_TARGET *target = NULL;
int i;
if (ol_sc == NULL) {
HTC_ERROR("%s: ol_sc = NULL", __func__);
return NULL;
}
HTC_TRACE("+htc_create .. HIF :%p", ol_sc);
A_REGISTER_MODULE_DEBUG_INFO(htc);
target = (HTC_TARGET *) cdf_mem_malloc(sizeof(HTC_TARGET));
if (target == NULL) {
HTC_ERROR("%s: Unable to allocate memory", __func__);
return NULL;
}
A_MEMZERO(target, sizeof(HTC_TARGET));
htc_runtime_pm_init(target);
cdf_spinlock_init(&target->HTCLock);
cdf_spinlock_init(&target->HTCRxLock);
cdf_spinlock_init(&target->HTCTxLock);
cdf_spinlock_init(&target->HTCCreditLock);
do {
A_MEMCPY(&target->HTCInitInfo, pInfo, sizeof(HTC_INIT_INFO));
target->host_handle = pInfo->pContext;
target->osdev = osdev;
reset_endpoint_states(target);
INIT_HTC_PACKET_QUEUE(&target->ControlBufferTXFreeList);
for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
HTC_PACKET *pPacket =
(HTC_PACKET *) cdf_mem_malloc(sizeof(HTC_PACKET));
if (pPacket != NULL) {
A_MEMZERO(pPacket, sizeof(HTC_PACKET));
free_htc_packet_container(target, pPacket);
}
}
#ifdef TODO_FIXME
for (i = 0; i < NUM_CONTROL_TX_BUFFERS; i++) {
pPacket = build_htc_tx_ctrl_packet();
if (NULL == pPacket) {
break;
}
htc_free_control_tx_packet(target, pPacket);
}
#endif
/* setup HIF layer callbacks */
cdf_mem_zero(&htcCallbacks, sizeof(struct hif_msg_callbacks));
htcCallbacks.Context = target;
htcCallbacks.rxCompletionHandler = htc_rx_completion_handler;
htcCallbacks.txCompletionHandler = htc_tx_completion_handler;
htcCallbacks.txResourceAvailHandler = htc_tx_resource_avail_handler;
htcCallbacks.fwEventHandler = htc_fw_event_handler;
target->hif_dev = ol_sc;
/* Get HIF default pipe for HTC message exchange */
pEndpoint = &target->endpoint[ENDPOINT_0];
hif_post_init(target->hif_dev, target, &htcCallbacks);
hif_get_default_pipe(target->hif_dev, &pEndpoint->UL_PipeID,
&pEndpoint->DL_PipeID);
} while (false);
htc_recv_init(target);
HTC_TRACE("-htc_create: (0x%p)", target);
return (HTC_HANDLE) target;
}
void htc_destroy(HTC_HANDLE HTCHandle)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
("+htc_destroy .. Destroying :0x%p\n", target));
hif_stop(htc_get_hif_device(HTCHandle));
if (target)
htc_cleanup(target);
AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-htc_destroy\n"));
}
/* get the low level HIF device for the caller , the caller may wish to do low level
* HIF requests */
void *htc_get_hif_device(HTC_HANDLE HTCHandle)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
return target->hif_dev;
}
void htc_control_tx_complete(void *Context, HTC_PACKET *pPacket)
{
HTC_TARGET *target = (HTC_TARGET *) Context;
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
("+-htc_control_tx_complete 0x%p (l:%d) \n", pPacket,
pPacket->ActualLength));
htc_free_control_tx_packet(target, pPacket);
}
/* TODO, this is just a temporary max packet size */
#define MAX_MESSAGE_SIZE 1536
/**
* htc_setup_target_buffer_assignments() - setup target buffer assignments
* @target: HTC Target Pointer
*
* Return: A_STATUS
*/
A_STATUS htc_setup_target_buffer_assignments(HTC_TARGET *target)
{
HTC_SERVICE_TX_CREDIT_ALLOCATION *pEntry;
A_STATUS status;
int credits;
int creditsPerMaxMsg;
creditsPerMaxMsg = MAX_MESSAGE_SIZE / target->TargetCreditSize;
if (MAX_MESSAGE_SIZE % target->TargetCreditSize) {
creditsPerMaxMsg++;
}
/* TODO, this should be configured by the caller! */
credits = target->TotalTransmitCredits;
pEntry = &target->ServiceTxAllocTable[0];
/*
* Allocate all credists/HTC buffers to WMI.
* no buffers are used/required for data. data always
* remains on host.
*/
status = A_OK;
pEntry++;
pEntry->service_id = WMI_CONTROL_SVC;
pEntry->CreditAllocation = credits;
if (WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
/* endpoint ping is a testing tool directly on top of HTC in
* both target and host sides.
* In target side, the endppint ping fw has no wlan stack and the
* FW mboxping app directly sits on HTC and it simply drops
* or loops back TX packets. For rx perf, FW mboxping app
* generates packets and passes packets to HTC to send to host.
* There is no WMI mesage exchanges between host and target
* in endpoint ping case.
* In host side, the endpoint ping driver is a Ethernet driver
* and it directly sits on HTC. Only HIF, HTC, CDF, ADF are
* used by the endpoint ping driver. There is no wifi stack
* at all in host side also. For tx perf use case,
* the user space mboxping app sends the raw packets to endpoint
* ping driver and it directly forwards to HTC for transmission
* to stress the bus. For the rx perf, HTC passes the received
* packets to endpoint ping driver and it is passed to the user
* space through the Ethernet interface.
* For credit allocation, in SDIO bus case, only BE service is
* used for tx/rx perf testing so that all credits are given
* to BE service. In PCIe and USB bus case, endpoint ping uses both
* BE and BK services to stress the bus so that the total credits
* are equally distributed to BE and BK services.
*/
pEntry->service_id = WMI_DATA_BE_SVC;
pEntry->CreditAllocation = (credits >> 1);
pEntry++;
pEntry->service_id = WMI_DATA_BK_SVC;
pEntry->CreditAllocation = (credits >> 1);
}
if (A_SUCCESS(status)) {
int i;
for (i = 0; i < HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
if (target->ServiceTxAllocTable[i].service_id != 0) {
AR_DEBUG_PRINTF(ATH_DEBUG_INIT,
("HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
i,
target->ServiceTxAllocTable[i].
service_id,
target->ServiceTxAllocTable[i].
CreditAllocation));
}
}
}
return status;
}
A_UINT8 htc_get_credit_allocation(HTC_TARGET *target, A_UINT16 service_id)
{
A_UINT8 allocation = 0;
int i;
for (i = 0; i < HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
if (target->ServiceTxAllocTable[i].service_id == service_id) {
allocation =
target->ServiceTxAllocTable[i].CreditAllocation;
}
}
if (0 == allocation) {
AR_DEBUG_PRINTF(ATH_DEBUG_INIT,
("HTC Service TX : 0x%2.2X : allocation is zero!\n",
service_id));
}
return allocation;
}
A_STATUS htc_wait_target(HTC_HANDLE HTCHandle)
{
A_STATUS status = A_OK;
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
HTC_READY_EX_MSG *pReadyMsg;
HTC_SERVICE_CONNECT_REQ connect;
HTC_SERVICE_CONNECT_RESP resp;
HTC_READY_MSG *rdy_msg;
A_UINT16 htc_rdy_msg_id;
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
("htc_wait_target - Enter (target:0x%p) \n", HTCHandle));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("+HWT\n"));
do {
status = hif_start(target->hif_dev);
if (A_FAILED(status)) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("hif_start failed\n"));
break;
}
status = htc_wait_recv_ctrl_message(target);
if (A_FAILED(status)) {
break;
}
if (target->CtrlResponseLength < (sizeof(HTC_READY_EX_MSG))) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("Invalid HTC Ready Msg Len:%d! \n",
target->CtrlResponseLength));
status = A_ECOMM;
break;
}
pReadyMsg = (HTC_READY_EX_MSG *) target->CtrlResponseBuffer;
rdy_msg = &pReadyMsg->Version2_0_Info;
htc_rdy_msg_id =
HTC_GET_FIELD(rdy_msg, HTC_READY_MSG, MESSAGEID);
if (htc_rdy_msg_id != HTC_MSG_READY_ID) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("Invalid HTC Ready Msg : 0x%X ! \n",
htc_rdy_msg_id));
status = A_ECOMM;
break;
}
target->TotalTransmitCredits =
HTC_GET_FIELD(rdy_msg, HTC_READY_MSG, CREDITCOUNT);
target->TargetCreditSize =
(int)HTC_GET_FIELD(rdy_msg, HTC_READY_MSG, CREDITSIZE);
target->MaxMsgsPerHTCBundle =
(A_UINT8) pReadyMsg->MaxMsgsPerHTCBundle;
/* for old fw this value is set to 0. But the minimum value should be 1,
* i.e., no bundling */
if (target->MaxMsgsPerHTCBundle < 1)
target->MaxMsgsPerHTCBundle = 1;
AR_DEBUG_PRINTF(ATH_DEBUG_INIT,
("Target Ready! : transmit resources : %d size:%d, MaxMsgsPerHTCBundle = %d\n",
target->TotalTransmitCredits,
target->TargetCreditSize,
target->MaxMsgsPerHTCBundle));
if ((0 == target->TotalTransmitCredits)
|| (0 == target->TargetCreditSize)) {
status = A_ECOMM;
break;
}
/* done processing */
target->CtrlResponseProcessing = false;
htc_setup_target_buffer_assignments(target);
/* setup our pseudo HTC control endpoint connection */
A_MEMZERO(&connect, sizeof(connect));
A_MEMZERO(&resp, sizeof(resp));
connect.EpCallbacks.pContext = target;
connect.EpCallbacks.EpTxComplete = htc_control_tx_complete;
connect.EpCallbacks.EpRecv = htc_control_rx_complete;
connect.MaxSendQueueDepth = NUM_CONTROL_TX_BUFFERS;
connect.service_id = HTC_CTRL_RSVD_SVC;
/* connect fake service */
status = htc_connect_service((HTC_HANDLE) target,
&connect, &resp);
} while (false);
AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("htc_wait_target - Exit (%d)\n", status));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("-HWT\n"));
return status;
}
/* start HTC, this is called after all services are connected */
static A_STATUS htc_config_target_hif_pipe(HTC_TARGET *target)
{
return A_OK;
}
static void reset_endpoint_states(HTC_TARGET *target)
{
HTC_ENDPOINT *pEndpoint;
int i;
for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
pEndpoint = &target->endpoint[i];
pEndpoint->service_id = 0;
pEndpoint->MaxMsgLength = 0;
pEndpoint->MaxTxQueueDepth = 0;
pEndpoint->Id = i;
INIT_HTC_PACKET_QUEUE(&pEndpoint->TxQueue);
INIT_HTC_PACKET_QUEUE(&pEndpoint->TxLookupQueue);
INIT_HTC_PACKET_QUEUE(&pEndpoint->RxBufferHoldQueue);
pEndpoint->target = target;
/* pEndpoint->TxCreditFlowEnabled = (A_BOOL)htc_credit_flow; */
pEndpoint->TxCreditFlowEnabled = (A_BOOL) 1;
cdf_atomic_init(&pEndpoint->TxProcessCount);
}
}
A_STATUS htc_start(HTC_HANDLE HTCHandle)
{
cdf_nbuf_t netbuf;
A_STATUS status = A_OK;
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
HTC_SETUP_COMPLETE_EX_MSG *pSetupComp;
HTC_PACKET *pSendPacket;
AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("htc_start Enter\n"));
do {
htc_config_target_hif_pipe(target);
/* allocate a buffer to send */
pSendPacket = htc_alloc_control_tx_packet(target);
if (NULL == pSendPacket) {
AR_DEBUG_ASSERT(false);
cdf_print("%s: allocControlTxPacket failed\n",
__func__);
status = A_NO_MEMORY;
break;
}
netbuf =
(cdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pSendPacket);
/* assemble setup complete message */
cdf_nbuf_put_tail(netbuf, sizeof(HTC_SETUP_COMPLETE_EX_MSG));
pSetupComp =
(HTC_SETUP_COMPLETE_EX_MSG *) cdf_nbuf_data(netbuf);
A_MEMZERO(pSetupComp, sizeof(HTC_SETUP_COMPLETE_EX_MSG));
HTC_SET_FIELD(pSetupComp, HTC_SETUP_COMPLETE_EX_MSG,
MESSAGEID, HTC_MSG_SETUP_COMPLETE_EX_ID);
if (!htc_credit_flow) {
AR_DEBUG_PRINTF(ATH_DEBUG_INIT,
("HTC will not use TX credit flow control\n"));
pSetupComp->SetupFlags |=
HTC_SETUP_COMPLETE_FLAGS_DISABLE_TX_CREDIT_FLOW;
} else {
AR_DEBUG_PRINTF(ATH_DEBUG_INIT,
("HTC using TX credit flow control\n"));
}
#ifdef HIF_SDIO
#if ENABLE_BUNDLE_RX
if (HTC_ENABLE_BUNDLE(target))
pSetupComp->SetupFlags |=
HTC_SETUP_COMPLETE_FLAGS_ENABLE_BUNDLE_RECV;
#endif /* ENABLE_BUNDLE_RX */
#endif /* HIF_SDIO */
SET_HTC_PACKET_INFO_TX(pSendPacket,
NULL,
(A_UINT8 *) pSetupComp,
sizeof(HTC_SETUP_COMPLETE_EX_MSG),
ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
status = htc_send_pkt((HTC_HANDLE) target, pSendPacket);
if (A_FAILED(status)) {
break;
}
} while (false);
AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("htc_start Exit\n"));
return status;
}
/*flush all queued buffers for surpriseremove case*/
void htc_flush_surprise_remove(HTC_HANDLE HTCHandle)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
int i;
HTC_ENDPOINT *pEndpoint;
#ifdef RX_SG_SUPPORT
cdf_nbuf_t netbuf;
cdf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue;
#endif
AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+htc_flush_surprise_remove \n"));
/* cleanup endpoints */
for (i = 0; i < ENDPOINT_MAX; i++) {
pEndpoint = &target->endpoint[i];
htc_flush_rx_hold_queue(target, pEndpoint);
htc_flush_endpoint_tx(target, pEndpoint, HTC_TX_PACKET_TAG_ALL);
}
hif_flush_surprise_remove(target->hif_dev);
#ifdef RX_SG_SUPPORT
LOCK_HTC_RX(target);
while ((netbuf = cdf_nbuf_queue_remove(rx_sg_queue)) != NULL) {
cdf_nbuf_free(netbuf);
}
RESET_RX_SG_CONFIG(target);
UNLOCK_HTC_RX(target);
#endif
reset_endpoint_states(target);
AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-htc_flush_surprise_remove \n"));
}
/* stop HTC communications, i.e. stop interrupt reception, and flush all queued buffers */
void htc_stop(HTC_HANDLE HTCHandle)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
int i;
HTC_ENDPOINT *pEndpoint;
#ifdef RX_SG_SUPPORT
cdf_nbuf_t netbuf;
cdf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue;
#endif
AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+htc_stop \n"));
/* cleanup endpoints */
for (i = 0; i < ENDPOINT_MAX; i++) {
pEndpoint = &target->endpoint[i];
htc_flush_rx_hold_queue(target, pEndpoint);
htc_flush_endpoint_tx(target, pEndpoint, HTC_TX_PACKET_TAG_ALL);
if (pEndpoint->ul_is_polled) {
cdf_softirq_timer_cancel(&pEndpoint->ul_poll_timer);
cdf_softirq_timer_free(&pEndpoint->ul_poll_timer);
}
}
/* Note: htc_flush_endpoint_tx for all endpoints should be called before
* hif_stop - otherwise htc_tx_completion_handler called from
* hif_send_buffer_cleanup_on_pipe for residual tx frames in HIF layer,
* might queue the packet again to HIF Layer - which could cause tx
* buffer leak
*/
hif_stop(target->hif_dev);
#ifdef RX_SG_SUPPORT
LOCK_HTC_RX(target);
while ((netbuf = cdf_nbuf_queue_remove(rx_sg_queue)) != NULL) {
cdf_nbuf_free(netbuf);
}
RESET_RX_SG_CONFIG(target);
UNLOCK_HTC_RX(target);
#endif
reset_endpoint_states(target);
AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-htc_stop\n"));
}
/**
* htc_runtime_pm_init(): runtime pm related intialization
*
* need to initialize a work item.
*/
void htc_runtime_pm_init(HTC_TARGET *target)
{
cdf_create_work(&target->queue_kicker, htc_kick_queues, target);
}
/**
* htc_runtime_suspend(): ensure htc is ready to suspend
*
* htc is ready to suspend if there are no pending pactets
* in the txrx queues.
*
* Return: 0 on success or -EBUSY if there are queued packets.
*/
int htc_runtime_suspend(void)
{
ol_txrx_pdev_handle txrx_pdev = cds_get_context(CDF_MODULE_ID_TXRX);
if (txrx_pdev == NULL) {
HTC_ERROR("%s: txrx context null", __func__);
return CDF_STATUS_E_FAULT;
}
if (ol_txrx_get_tx_pending(txrx_pdev))
return -EBUSY;
else
return 0;
}
/**
* htc_runtime_resume(): resume htc
*
* The htc message queue needs to be kicked off after
* a runtime resume. Otherwise messages would get stuck.
*
* Return: 0 for success;
*/
int htc_runtime_resume(void)
{
HTC_HANDLE htc_ctx = cds_get_context(CDF_MODULE_ID_HTC);
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_ctx);
if (target == NULL)
return 0;
cdf_schedule_work(&target->queue_kicker);
return 0;
}
void htc_dump_credit_states(HTC_HANDLE HTCHandle)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
HTC_ENDPOINT *pEndpoint;
int i;
for (i = 0; i < ENDPOINT_MAX; i++) {
pEndpoint = &target->endpoint[i];
if (0 == pEndpoint->service_id)
continue;
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
("--- EP : %d service_id: 0x%X --------------\n",
pEndpoint->Id, pEndpoint->service_id));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
(" TxCredits : %d\n",
pEndpoint->TxCredits));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
(" TxCreditSize : %d\n",
pEndpoint->TxCreditSize));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
(" TxCreditsPerMaxMsg : %d\n",
pEndpoint->TxCreditsPerMaxMsg));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
(" TxQueueDepth : %d\n",
HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue)));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
("----------------------------------------------------\n"));
}
}
A_BOOL htc_get_endpoint_statistics(HTC_HANDLE HTCHandle,
HTC_ENDPOINT_ID Endpoint,
HTC_ENDPOINT_STAT_ACTION Action,
HTC_ENDPOINT_STATS *pStats)
{
#ifdef HTC_EP_STAT_PROFILING
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
A_BOOL clearStats = false;
A_BOOL sample = false;
switch (Action) {
case HTC_EP_STAT_SAMPLE:
sample = true;
break;
case HTC_EP_STAT_SAMPLE_AND_CLEAR:
sample = true;
clearStats = true;
break;
case HTC_EP_STAT_CLEAR:
clearStats = true;
break;
default:
break;
}
A_ASSERT(Endpoint < ENDPOINT_MAX);
/* lock out TX and RX while we sample and/or clear */
LOCK_HTC_TX(target);
LOCK_HTC_RX(target);
if (sample) {
A_ASSERT(pStats != NULL);
/* return the stats to the caller */
A_MEMCPY(pStats, &target->endpoint[Endpoint].endpoint_stats,
sizeof(HTC_ENDPOINT_STATS));
}
if (clearStats) {
/* reset stats */
A_MEMZERO(&target->endpoint[Endpoint].endpoint_stats,
sizeof(HTC_ENDPOINT_STATS));
}
UNLOCK_HTC_RX(target);
UNLOCK_HTC_TX(target);
return true;
#else
return false;
#endif
}
void *htc_get_targetdef(HTC_HANDLE htc_handle)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle);
return hif_get_targetdef(target->hif_dev);
}
/**
* htc_set_target_to_sleep() - set target to sleep
* @context: ol_softc context
*
* Return: none
*/
void htc_set_target_to_sleep(void *context)
{
struct ol_softc *scn = (struct ol_softc *)context;
hif_set_target_sleep(scn, true, false);
}
/**
* htc_cancel_deferred_target_sleep() - cancel deferred target sleep
* @context: ol_softc context
*
* Return: none
*/
void htc_cancel_deferred_target_sleep(void *context)
{
struct ol_softc *scn = (struct ol_softc *)context;
hif_cancel_deferred_target_sleep(scn);
}
#ifdef IPA_OFFLOAD
/**
* htc_ipa_get_ce_resource() - get uc resource on lower layer
* @htc_handle: htc context
* @ce_sr_base_paddr: copyengine source ring base physical address
* @ce_sr_ring_size: copyengine source ring size
* @ce_reg_paddr: copyengine register physical address
*
* Return: None
*/
void htc_ipa_get_ce_resource(HTC_HANDLE htc_handle,
cdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle);
if (target->hif_dev != NULL) {
hif_ipa_get_ce_resource(target->hif_dev,
ce_sr_base_paddr,
ce_sr_ring_size, ce_reg_paddr);
}
}
#endif /* IPA_OFFLOAD */

View File

@ -1,718 +0,0 @@
/*
* Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _HTC_API_H_
#define _HTC_API_H_
#include <athdefs.h>
#include "osapi_linux.h"
#include "htc_packet.h"
#include <htc.h>
#include <htc_services.h>
#include <cdf_types.h> /* cdf_device_t */
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/* TODO.. for BMI */
#define ENDPOINT1 0
/* TODO -remove me, but we have to fix BMI first */
#define HTC_MAILBOX_NUM_MAX 4
/* this is the amount of header room required by users of HTC */
#define HTC_HEADER_LEN HTC_HDR_LENGTH
typedef void *HTC_HANDLE;
typedef A_UINT16 HTC_SERVICE_ID;
typedef void (*HTC_TARGET_FAILURE)(void *Instance, CDF_STATUS Status);
typedef struct _HTC_INIT_INFO {
void *pContext; /* context for target notifications */
void (*TargetFailure)(void *Instance, CDF_STATUS Status);
void (*TargetSendSuspendComplete)(void *ctx);
} HTC_INIT_INFO;
/* Struct for HTC layer packet stats*/
struct ol_ath_htc_stats {
int htc_get_pkt_q_fail_count;
int htc_pkt_q_empty_count;
int htc_send_q_empty_count;
};
/* To resume HTT Tx queue during runtime resume */
typedef void (*HTC_EP_RESUME_TX_QUEUE)(void *);
/* per service connection send completion */
typedef void (*HTC_EP_SEND_PKT_COMPLETE)(void *, HTC_PACKET *);
/* per service connection callback when a plurality of packets have been sent
* The HTC_PACKET_QUEUE is a temporary queue object (e.g. freed on return from the callback)
* to hold a list of completed send packets.
* If the handler cannot fully traverse the packet queue before returning, it should
* transfer the items of the queue into the caller's private queue using:
* HTC_PACKET_ENQUEUE() */
typedef void (*HTC_EP_SEND_PKT_COMP_MULTIPLE)(void *,
HTC_PACKET_QUEUE *);
/* per service connection pkt received */
typedef void (*HTC_EP_RECV_PKT)(void *, HTC_PACKET *);
/* per service connection callback when a plurality of packets are received
* The HTC_PACKET_QUEUE is a temporary queue object (e.g. freed on return from the callback)
* to hold a list of recv packets.
* If the handler cannot fully traverse the packet queue before returning, it should
* transfer the items of the queue into the caller's private queue using:
* HTC_PACKET_ENQUEUE() */
typedef void (*HTC_EP_RECV_PKT_MULTIPLE)(void *, HTC_PACKET_QUEUE *);
/* Optional per service connection receive buffer re-fill callback,
* On some OSes (like Linux) packets are allocated from a global pool and indicated up
* to the network stack. The driver never gets the packets back from the OS. For these OSes
* a refill callback can be used to allocate and re-queue buffers into HTC.
*
* On other OSes, the network stack can call into the driver's OS-specifc "return_packet" handler and
* the driver can re-queue these buffers into HTC. In this regard a refill callback is
* unnecessary */
typedef void (*HTC_EP_RECV_REFILL)(void *, HTC_ENDPOINT_ID Endpoint);
/* Optional per service connection receive buffer allocation callback.
* On some systems packet buffers are an extremely limited resource. Rather than
* queue largest-possible-sized buffers to HTC, some systems would rather
* allocate a specific size as the packet is received. The trade off is
* slightly more processing (callback invoked for each RX packet)
* for the benefit of committing fewer buffer resources into HTC.
*
* The callback is provided the length of the pending packet to fetch. This includes the
* HTC header length plus the length of payload. The callback can return a pointer to
* the allocated HTC packet for immediate use.
*
* Alternatively a variant of this handler can be used to allocate large receive packets as needed.
* For example an application can use the refill mechanism for normal packets and the recv-alloc mechanism to
* handle the case where a large packet buffer is required. This can significantly reduce the
* amount of "committed" memory used to receive packets.
*
* */
typedef HTC_PACKET *(*HTC_EP_RECV_ALLOC)(void *,
HTC_ENDPOINT_ID Endpoint,
int Length);
typedef enum _HTC_SEND_FULL_ACTION {
HTC_SEND_FULL_KEEP = 0, /* packet that overflowed should be kept in the queue */
HTC_SEND_FULL_DROP = 1, /* packet that overflowed should be dropped */
} HTC_SEND_FULL_ACTION;
/* Optional per service connection callback when a send queue is full. This can occur if the
* host continues queueing up TX packets faster than credits can arrive
* To prevent the host (on some Oses like Linux) from continuously queueing packets
* and consuming resources, this callback is provided so that that the host
* can disable TX in the subsystem (i.e. network stack).
* This callback is invoked for each packet that "overflows" the HTC queue. The callback can
* determine whether the new packet that overflowed the queue can be kept (HTC_SEND_FULL_KEEP) or
* dropped (HTC_SEND_FULL_DROP). If a packet is dropped, the EpTxComplete handler will be called
* and the packet's status field will be set to A_NO_RESOURCE.
* Other OSes require a "per-packet" indication for each completed TX packet, this
* closed loop mechanism will prevent the network stack from overunning the NIC
* The packet to keep or drop is passed for inspection to the registered handler the handler
* must ONLY inspect the packet, it may not free or reclaim the packet. */
typedef HTC_SEND_FULL_ACTION (*HTC_EP_SEND_QUEUE_FULL)(void *,
HTC_PACKET *
pPacket);
typedef struct _HTC_EP_CALLBACKS {
void *pContext; /* context for each callback */
HTC_EP_SEND_PKT_COMPLETE EpTxComplete; /* tx completion callback for connected endpoint */
HTC_EP_RECV_PKT EpRecv; /* receive callback for connected endpoint */
HTC_EP_RECV_REFILL EpRecvRefill; /* OPTIONAL receive re-fill callback for connected endpoint */
HTC_EP_SEND_QUEUE_FULL EpSendFull; /* OPTIONAL send full callback */
HTC_EP_RECV_ALLOC EpRecvAlloc; /* OPTIONAL recv allocation callback */
HTC_EP_RECV_ALLOC EpRecvAllocThresh; /* OPTIONAL recv allocation callback based on a threshold */
HTC_EP_SEND_PKT_COMP_MULTIPLE EpTxCompleteMultiple; /* OPTIONAL completion handler for multiple complete
indications (EpTxComplete must be NULL) */
HTC_EP_RECV_PKT_MULTIPLE EpRecvPktMultiple; /* OPTIONAL completion handler for multiple
recv packet indications (EpRecv must be NULL) */
HTC_EP_RESUME_TX_QUEUE ep_resume_tx_queue;
int RecvAllocThreshold; /* if EpRecvAllocThresh is non-NULL, HTC will compare the
threshold value to the current recv packet length and invoke
the EpRecvAllocThresh callback to acquire a packet buffer */
int RecvRefillWaterMark; /* if a EpRecvRefill handler is provided, this value
can be used to set a trigger refill callback
when the recv queue drops below this value
if set to 0, the refill is only called when packets
are empty */
} HTC_EP_CALLBACKS;
/* service connection information */
typedef struct _HTC_SERVICE_CONNECT_REQ {
HTC_SERVICE_ID service_id; /* service ID to connect to */
A_UINT16 ConnectionFlags; /* connection flags, see htc protocol definition */
A_UINT8 *pMetaData; /* ptr to optional service-specific meta-data */
A_UINT8 MetaDataLength; /* optional meta data length */
HTC_EP_CALLBACKS EpCallbacks; /* endpoint callbacks */
int MaxSendQueueDepth; /* maximum depth of any send queue */
A_UINT32 LocalConnectionFlags; /* HTC flags for the host-side (local) connection */
unsigned int MaxSendMsgSize; /* override max message size in send direction */
} HTC_SERVICE_CONNECT_REQ;
#define HTC_LOCAL_CONN_FLAGS_ENABLE_SEND_BUNDLE_PADDING (1 << 0) /* enable send bundle padding for this endpoint */
/* service connection response information */
typedef struct _HTC_SERVICE_CONNECT_RESP {
A_UINT8 *pMetaData; /* caller supplied buffer to optional meta-data */
A_UINT8 BufferLength; /* length of caller supplied buffer */
A_UINT8 ActualLength; /* actual length of meta data */
HTC_ENDPOINT_ID Endpoint; /* endpoint to communicate over */
unsigned int MaxMsgLength; /* max length of all messages over this endpoint */
A_UINT8 ConnectRespCode; /* connect response code from target */
} HTC_SERVICE_CONNECT_RESP;
/* endpoint distribution structure */
typedef struct _HTC_ENDPOINT_CREDIT_DIST {
struct _HTC_ENDPOINT_CREDIT_DIST *pNext;
struct _HTC_ENDPOINT_CREDIT_DIST *pPrev;
HTC_SERVICE_ID service_id; /* Service ID (set by HTC) */
HTC_ENDPOINT_ID Endpoint; /* endpoint for this distribution struct (set by HTC) */
A_UINT32 DistFlags; /* distribution flags, distribution function can
set default activity using SET_EP_ACTIVE() macro */
int TxCreditsNorm; /* credits for normal operation, anything above this
indicates the endpoint is over-subscribed, this field
is only relevant to the credit distribution function */
int TxCreditsMin; /* floor for credit distribution, this field is
only relevant to the credit distribution function */
int TxCreditsAssigned; /* number of credits assigned to this EP, this field
is only relevant to the credit dist function */
int TxCredits; /* current credits available, this field is used by
HTC to determine whether a message can be sent or
must be queued */
int TxCreditsToDist; /* pending credits to distribute on this endpoint, this
is set by HTC when credit reports arrive.
The credit distribution functions sets this to zero
when it distributes the credits */
int TxCreditsSeek; /* this is the number of credits that the current pending TX
packet needs to transmit. This is set by HTC when
and endpoint needs credits in order to transmit */
int TxCreditSize; /* size in bytes of each credit (set by HTC) */
int TxCreditsPerMaxMsg; /* credits required for a maximum sized messages (set by HTC) */
void *pHTCReserved; /* reserved for HTC use */
int TxQueueDepth; /* current depth of TX queue , i.e. messages waiting for credits
This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE
or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint
that has non-zero credits to recover
*/
} HTC_ENDPOINT_CREDIT_DIST;
#define HTC_EP_ACTIVE ((A_UINT32) (1u << 31))
/* macro to check if an endpoint has gone active, useful for credit
* distributions */
#define IS_EP_ACTIVE(epDist) ((epDist)->DistFlags & HTC_EP_ACTIVE)
#define SET_EP_ACTIVE(epDist) (epDist)->DistFlags |= HTC_EP_ACTIVE
/* credit distibution code that is passed into the distrbution function,
* there are mandatory and optional codes that must be handled */
typedef enum _HTC_CREDIT_DIST_REASON {
HTC_CREDIT_DIST_SEND_COMPLETE = 0, /* credits available as a result of completed
send operations (MANDATORY) resulting in credit reports */
HTC_CREDIT_DIST_ACTIVITY_CHANGE = 1, /* a change in endpoint activity occured (OPTIONAL) */
HTC_CREDIT_DIST_SEEK_CREDITS, /* an endpoint needs to "seek" credits (OPTIONAL) */
HTC_DUMP_CREDIT_STATE /* for debugging, dump any state information that is kept by
the distribution function */
} HTC_CREDIT_DIST_REASON;
typedef void (*HTC_CREDIT_DIST_CALLBACK)(void *Context,
HTC_ENDPOINT_CREDIT_DIST *
pEPList,
HTC_CREDIT_DIST_REASON
Reason);
typedef void (*HTC_CREDIT_INIT_CALLBACK)(void *Context,
HTC_ENDPOINT_CREDIT_DIST *
pEPList, int TotalCredits);
/* endpoint statistics action */
typedef enum _HTC_ENDPOINT_STAT_ACTION {
HTC_EP_STAT_SAMPLE = 0, /* only read statistics */
HTC_EP_STAT_SAMPLE_AND_CLEAR = 1, /* sample and immediately clear statistics */
HTC_EP_STAT_CLEAR /* clear only */
} HTC_ENDPOINT_STAT_ACTION;
/* endpoint statistics */
typedef struct _HTC_ENDPOINT_STATS {
A_UINT32 TxPosted; /* number of TX packets posted to the endpoint */
A_UINT32 TxCreditLowIndications; /* number of times the host set the credit-low flag in a send message on
this endpoint */
A_UINT32 TxIssued; /* running count of total TX packets issued */
A_UINT32 TxPacketsBundled; /* running count of TX packets that were issued in bundles */
A_UINT32 TxBundles; /* running count of TX bundles that were issued */
A_UINT32 TxDropped; /* tx packets that were dropped */
A_UINT32 TxCreditRpts; /* running count of total credit reports received for this endpoint */
A_UINT32 TxCreditRptsFromRx; /* credit reports received from this endpoint's RX packets */
A_UINT32 TxCreditRptsFromOther; /* credit reports received from RX packets of other endpoints */
A_UINT32 TxCreditRptsFromEp0; /* credit reports received from endpoint 0 RX packets */
A_UINT32 TxCreditsFromRx; /* count of credits received via Rx packets on this endpoint */
A_UINT32 TxCreditsFromOther; /* count of credits received via another endpoint */
A_UINT32 TxCreditsFromEp0; /* count of credits received via another endpoint */
A_UINT32 TxCreditsConsummed; /* count of consummed credits */
A_UINT32 TxCreditsReturned; /* count of credits returned */
A_UINT32 RxReceived; /* count of RX packets received */
A_UINT32 RxLookAheads; /* count of lookahead records
found in messages received on this endpoint */
A_UINT32 RxPacketsBundled; /* count of recv packets received in a bundle */
A_UINT32 RxBundleLookAheads; /* count of number of bundled lookaheads */
A_UINT32 RxBundleIndFromHdr; /* count of the number of bundle indications from the HTC header */
A_UINT32 RxAllocThreshHit; /* count of the number of times the recv allocation threshhold was hit */
A_UINT32 RxAllocThreshBytes; /* total number of bytes */
} HTC_ENDPOINT_STATS;
/* ------ Function Prototypes ------ */
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Create an instance of HTC over the underlying HIF device
@function name: htc_create
@input: HifDevice - hif device handle,
pInfo - initialization information
@output:
@return: HTC_HANDLE on success, NULL on failure
@notes:
@example:
@see also: htc_destroy
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
HTC_HANDLE htc_create(void *HifDevice,
HTC_INIT_INFO *pInfo, cdf_device_t osdev);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Get the underlying HIF device handle
@function name: htc_get_hif_device
@input: HTCHandle - handle passed into the AddInstance callback
@output:
@return: opaque HIF device handle usable in HIF API calls.
@notes:
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
void *htc_get_hif_device(HTC_HANDLE HTCHandle);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Set credit distribution parameters
@function name: htc_set_credit_distribution
@input: HTCHandle - HTC handle
pCreditDistCont - caller supplied context to pass into distribution functions
CreditDistFunc - Distribution function callback
CreditDistInit - Credit Distribution initialization callback
ServicePriorityOrder - Array containing list of service IDs, lowest index is highest
priority
ListLength - number of elements in ServicePriorityOrder
@output:
@return:
@notes: The user can set a custom credit distribution function to handle special requirements
for each endpoint. A default credit distribution routine can be used by setting
CreditInitFunc to NULL. The default credit distribution is only provided for simple
"fair" credit distribution without regard to any prioritization.
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
void htc_set_credit_distribution(HTC_HANDLE HTCHandle,
void *pCreditDistContext,
HTC_CREDIT_DIST_CALLBACK CreditDistFunc,
HTC_CREDIT_INIT_CALLBACK CreditInitFunc,
HTC_SERVICE_ID ServicePriorityOrder[],
int ListLength);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Wait for the target to indicate the HTC layer is ready
@function name: htc_wait_target
@input: HTCHandle - HTC handle
@output:
@return:
@notes: This API blocks until the target responds with an HTC ready message.
The caller should not connect services until the target has indicated it is
ready.
@example:
@see also: htc_connect_service
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
A_STATUS htc_wait_target(HTC_HANDLE HTCHandle);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Start target service communications
@function name: htc_start
@input: HTCHandle - HTC handle
@output:
@return:
@notes: This API indicates to the target that the service connection phase is complete
and the target can freely start all connected services. This API should only be
called AFTER all service connections have been made. TCStart will issue a
SETUP_COMPLETE message to the target to indicate that all service connections
have been made and the target can start communicating over the endpoints.
@example:
@see also: htc_connect_service
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
A_STATUS htc_start(HTC_HANDLE HTCHandle);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Add receive packet to HTC
@function name: htc_add_receive_pkt
@input: HTCHandle - HTC handle
pPacket - HTC receive packet to add
@output:
@return: A_OK on success
@notes: user must supply HTC packets for capturing incomming HTC frames. The caller
must initialize each HTC packet using the SET_HTC_PACKET_INFO_RX_REFILL()
macro.
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
A_STATUS htc_add_receive_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Connect to an HTC service
@function name: htc_connect_service
@input: HTCHandle - HTC handle
pReq - connection details
@output: pResp - connection response
@return:
@notes: Service connections must be performed before htc_start. User provides callback handlers
for various endpoint events.
@example:
@see also: htc_start
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
A_STATUS htc_connect_service(HTC_HANDLE HTCHandle,
HTC_SERVICE_CONNECT_REQ *pReq,
HTC_SERVICE_CONNECT_RESP *pResp);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: HTC register log dump
@function name: htc_dump
@input: HTCHandle - HTC handle
CmdId - Log command
start - start/print logs
@output:
@return:
@notes: Register logs will be started/printed.
be flushed.
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
void htc_dump(HTC_HANDLE HTCHandle, uint8_t CmdId, bool start);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Send an HTC packet
@function name: htc_send_pkt
@input: HTCHandle - HTC handle
pPacket - packet to send
@output:
@return: A_OK
@notes: Caller must initialize packet using SET_HTC_PACKET_INFO_TX() macro.
This interface is fully asynchronous. On error, HTC SendPkt will
call the registered Endpoint callback to cleanup the packet.
@example:
@see also: htc_flush_endpoint
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
A_STATUS htc_send_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Send an HTC packet containing a tx descriptor and data
@function name: htc_send_data_pkt
@input: HTCHandle - HTC handle
pPacket - packet to send
@output:
@return: A_OK
@notes: Caller must initialize packet using SET_HTC_PACKET_INFO_TX() macro.
Caller must provide headroom in an initial fragment added to the
network buffer to store a HTC_FRAME_HDR.
This interface is fully asynchronous. On error, htc_send_data_pkt will
call the registered Endpoint EpDataTxComplete callback to cleanup
the packet.
@example:
@see also: htc_send_pkt
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
#ifdef ATH_11AC_TXCOMPACT
A_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, cdf_nbuf_t netbuf,
int Epid, int ActualLength);
#else /*ATH_11AC_TXCOMPACT */
A_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket,
A_UINT8 more_data);
#endif /*ATH_11AC_TXCOMPACT */
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Flush HTC when target is removed surprisely service communications
@function name: htc_flush_surprise_remove
@input: HTCHandle - HTC handle
@output:
@return:
@notes: All receive and pending TX packets will
be flushed.
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
void htc_flush_surprise_remove(HTC_HANDLE HTCHandle);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Stop HTC service communications
@function name: htc_stop
@input: HTCHandle - HTC handle
@output:
@return:
@notes: HTC communications is halted. All receive and pending TX packets will
be flushed.
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
void htc_stop(HTC_HANDLE HTCHandle);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Destory HTC service
@function name: htc_destroy
@input: HTCHandle
@output:
@return:
@notes: This cleans up all resources allocated by htc_create().
@example:
@see also: htc_create
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
void htc_destroy(HTC_HANDLE HTCHandle);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Flush pending TX packets
@function name: htc_flush_endpoint
@input: HTCHandle - HTC handle
Endpoint - Endpoint to flush
Tag - flush tag
@output:
@return:
@notes: The Tag parameter is used to selectively flush packets with matching tags.
The value of 0 forces all packets to be flush regardless of tag.
@example:
@see also: htc_send_pkt
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
void htc_flush_endpoint(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint,
HTC_TX_TAG Tag);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Dump credit distribution state
@function name: htc_dump_credit_states
@input: HTCHandle - HTC handle
@output:
@return:
@notes: This dumps all credit distribution information to the debugger
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
void htc_dump_credit_states(HTC_HANDLE HTCHandle);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Indicate a traffic activity change on an endpoint
@function name: htc_indicate_activity_change
@input: HTCHandle - HTC handle
Endpoint - endpoint in which activity has changed
Active - true if active, false if it has become inactive
@output:
@return:
@notes: This triggers the registered credit distribution function to
re-adjust credits for active/inactive endpoints.
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
void htc_indicate_activity_change(HTC_HANDLE HTCHandle,
HTC_ENDPOINT_ID Endpoint, A_BOOL Active);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Get endpoint statistics
@function name: htc_get_endpoint_statistics
@input: HTCHandle - HTC handle
Endpoint - Endpoint identifier
Action - action to take with statistics
@output:
pStats - statistics that were sampled (can be NULL if Action is HTC_EP_STAT_CLEAR)
@return: true if statistics profiling is enabled, otherwise false.
@notes: Statistics is a compile-time option and this function may return false
if HTC is not compiled with profiling.
The caller can specify the statistic "action" to take when sampling
the statistics. This includes:
HTC_EP_STAT_SAMPLE: The pStats structure is filled with the current values.
HTC_EP_STAT_SAMPLE_AND_CLEAR: The structure is filled and the current statistics
are cleared.
HTC_EP_STAT_CLEA : the statistics are cleared, the called can pass a NULL value for
pStats
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
A_BOOL htc_get_endpoint_statistics(HTC_HANDLE HTCHandle,
HTC_ENDPOINT_ID Endpoint,
HTC_ENDPOINT_STAT_ACTION Action,
HTC_ENDPOINT_STATS *pStats);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Unblock HTC message reception
@function name: htc_unblock_recv
@input: HTCHandle - HTC handle
@output:
@return:
@notes:
HTC will block the receiver if the EpRecvAlloc callback fails to provide a packet.
The caller can use this API to indicate to HTC when resources (buffers) are available
such that the receiver can be unblocked and HTC may re-attempt fetching the pending message.
This API is not required if the user uses the EpRecvRefill callback or uses the HTCAddReceivePacket()
API to recycle or provide receive packets to HTC.
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
void htc_unblock_recv(HTC_HANDLE HTCHandle);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: send a series of HTC packets
@function name: htc_send_pkts_multiple
@input: HTCHandle - HTC handle
pPktQueue - local queue holding packets to send
@output:
@return: A_OK
@notes: Caller must initialize each packet using SET_HTC_PACKET_INFO_TX() macro.
The queue must only contain packets directed at the same endpoint.
Caller supplies a pointer to an HTC_PACKET_QUEUE structure holding the TX packets in FIFO order.
This API will remove the packets from the pkt queue and place them into the HTC Tx Queue
and bundle messages where possible.
The caller may allocate the pkt queue on the stack to hold the packets.
This interface is fully asynchronous. On error, htc_send_pkts will
call the registered Endpoint callback to cleanup the packet.
@example:
@see also: htc_flush_endpoint
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
A_STATUS htc_send_pkts_multiple(HTC_HANDLE HTCHandle,
HTC_PACKET_QUEUE *pPktQueue);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Add multiple receive packets to HTC
@function name: htc_add_receive_pkt_multiple
@input: HTCHandle - HTC handle
pPktQueue - HTC receive packet queue holding packets to add
@output:
@return: A_OK on success
@notes: user must supply HTC packets for capturing incomming HTC frames. The caller
must initialize each HTC packet using the SET_HTC_PACKET_INFO_RX_REFILL()
macro. The queue must only contain recv packets for the same endpoint.
Caller supplies a pointer to an HTC_PACKET_QUEUE structure holding the recv packet.
This API will remove the packets from the pkt queue and place them into internal
recv packet list.
The caller may allocate the pkt queue on the stack to hold the packets.
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
A_STATUS htc_add_receive_pkt_multiple(HTC_HANDLE HTCHandle,
HTC_PACKET_QUEUE *pPktQueue);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Check if an endpoint is marked active
@function name: htc_is_endpoint_active
@input: HTCHandle - HTC handle
Endpoint - endpoint to check for active state
@output:
@return: returns true if Endpoint is Active
@notes:
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
A_BOOL htc_is_endpoint_active(HTC_HANDLE HTCHandle,
HTC_ENDPOINT_ID Endpoint);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Get the number of recv buffers currently queued into an HTC endpoint
@function name: htc_get_num_recv_buffers
@input: HTCHandle - HTC handle
Endpoint - endpoint to check
@output:
@return: returns number of buffers in queue
@notes:
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
int htc_get_num_recv_buffers(HTC_HANDLE HTCHandle,
HTC_ENDPOINT_ID Endpoint);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Set the target failure handling callback in HTC layer
@function name: htc_set_target_failure_callback
@input: HTCHandle - HTC handle
Callback - target failure handling callback
@output:
@return:
@notes:
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
void htc_set_target_failure_callback(HTC_HANDLE HTCHandle,
HTC_TARGET_FAILURE Callback);
/* internally used functions for testing... */
void htc_enable_recv(HTC_HANDLE HTCHandle);
void htc_disable_recv(HTC_HANDLE HTCHandle);
A_STATUS HTCWaitForPendingRecv(HTC_HANDLE HTCHandle,
A_UINT32 TimeoutInMs,
A_BOOL *pbIsRecvPending);
/* function to fetch stats from htc layer*/
struct ol_ath_htc_stats *ieee80211_ioctl_get_htc_stats(HTC_HANDLE
HTCHandle);
#ifdef HIF_USB
#define HTCReturnReceivePkt(target,p,osbuf) \
A_NETBUF_FREE(osbuf); \
if(p->Status == A_CLONE) { \
cdf_mem_free(p); \
}
#else
#define HTCReturnReceivePkt(target,p,osbuf) htc_add_receive_pkt(target,p)
#endif
#ifdef WLAN_FEATURE_FASTPATH
#define HTC_TX_DESC_FILL(_htc_tx_desc, _download_len, _ep_id, _seq_no) \
do { \
HTC_WRITE32((_htc_tx_desc), \
SM((_download_len), HTC_FRAME_HDR_PAYLOADLEN) | \
SM((_ep_id), HTC_FRAME_HDR_ENDPOINTID)); \
\
HTC_WRITE32((A_UINT32 *)(_htc_tx_desc) + 1, \
SM((_seq_no), HTC_FRAME_HDR_CONTROLBYTES1));\
} while (0)
#endif /* WLAN_FEATURE_FASTPATH */
#ifdef __cplusplus
}
#endif
void htc_get_control_endpoint_tx_host_credits(HTC_HANDLE HTCHandle, int *credit);
void htc_dump_counter_info(HTC_HANDLE HTCHandle);
void *htc_get_targetdef(HTC_HANDLE htc_handle);
void htc_set_target_to_sleep(void *context);
void htc_cancel_deferred_target_sleep(void *context);
int htc_runtime_suspend(void);
int htc_runtime_resume(void);
/* Disable ASPM : Disable PCIe low power */
void htc_disable_aspm(void);
#ifdef IPA_OFFLOAD
void htc_ipa_get_ce_resource(HTC_HANDLE htc_handle,
cdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr);
#else
#define htc_ipa_get_ce_resource(htc_handle, \
ce_sr_base_paddr, \
ce_sr_ring_size, \
ce_reg_paddr) /* NO-OP */
#endif /* IPA_OFFLOAD */
#endif /* _HTC_API_H_ */

View File

@ -1,50 +0,0 @@
/*
* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef HTC_DEBUG_H_
#define HTC_DEBUG_H_
#define ATH_MODULE_NAME htc
#include "a_debug.h"
#include "cdf_trace.h"
/* ------- Debug related stuff ------- */
#define ATH_DEBUG_SEND ATH_DEBUG_MAKE_MODULE_MASK(0)
#define ATH_DEBUG_RECV ATH_DEBUG_MAKE_MODULE_MASK(1)
#define ATH_DEBUG_SYNC ATH_DEBUG_MAKE_MODULE_MASK(2)
#define ATH_DEBUG_DUMP ATH_DEBUG_MAKE_MODULE_MASK(3)
#define ATH_DEBUG_SETUP ATH_DEBUG_MAKE_MODULE_MASK(4)
#define HTC_ERROR(args ...) \
CDF_TRACE(CDF_MODULE_ID_HTC, CDF_TRACE_LEVEL_ERROR, ## args)
#define HTC_WARN(args ...) \
CDF_TRACE(CDF_MODULE_ID_HTC, CDF_TRACE_LEVEL_WARN, ## args)
#define HTC_INFO(args ...) \
CDF_TRACE(CDF_MODULE_ID_HTC, CDF_TRACE_LEVEL_INFO, ## args)
#define HTC_TRACE(args ...) \
CDF_TRACE(CDF_MODULE_ID_HTC, CDF_TRACE_LEVEL_DEBUG, ## args)
#endif /*HTC_DEBUG_H_ */

View File

@ -1,317 +0,0 @@
/*
* Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _HTC_INTERNAL_H_
#define _HTC_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
#include <athdefs.h>
#include "a_types.h"
#include "osapi_linux.h"
#include <cdf_nbuf.h>
#include <cdf_types.h>
#include <cdf_lock.h>
#include <cdf_softirq_timer.h>
#include <cdf_atomic.h>
#include "hif.h"
#include <htc.h>
#include "htc_api.h"
#include "htc_packet.h"
/* HTC operational parameters */
#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */
#define HTC_TARGET_DEBUG_INTR_MASK 0x01
#define HTC_TARGET_CREDIT_INTR_MASK 0xF0
#define HTC_MIN_MSG_PER_BUNDLE 2
#if defined(HIF_USB)
#define HTC_MAX_MSG_PER_BUNDLE 9
#else
#define HTC_MAX_MSG_PER_BUNDLE 16
#endif
/*
* HTC_MAX_TX_BUNDLE_SEND_LIMIT -
* This value is in units of tx frame fragments.
* It needs to be at least as large as the maximum number of tx frames in a
* HTC download bundle times the average number of fragments in each such frame
* (In certain operating systems, such as Linux, we expect to only have
* a single fragment per frame anyway.)
*/
#define HTC_MAX_TX_BUNDLE_SEND_LIMIT 255
#define HTC_PACKET_CONTAINER_ALLOCATION 32
#define NUM_CONTROL_TX_BUFFERS 2
#define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CONTROL_MESSAGE_LENGTH + HTC_HDR_LENGTH)
#define HTC_CONTROL_BUFFER_ALIGN 32
#define HTC_TARGET_RESPONSE_POLL_MS 10
#if !defined(A_SIMOS_DEVHOST)
#define HTC_TARGET_MAX_RESPONSE_POLL 200 /* actual HW */
#else
#define HTC_TARGET_MAX_RESPONSE_POLL 600 /* host + target simulation */
#endif
#define HTC_SERVICE_TX_PACKET_TAG HTC_TX_PACKET_TAG_INTERNAL
#define HTC_CREDIT_HISTORY_MAX 1024
typedef enum {
HTC_REQUEST_CREDIT,
HTC_PROCESS_CREDIT_REPORT,
HTC_SUSPEND_ACK,
HTC_SUSPEND_NACK,
} htc_credit_exchange_type;
typedef struct {
htc_credit_exchange_type type;
uint64_t time;
uint32_t tx_credit;
uint32_t htc_tx_queue_depth;
} HTC_CREDIT_HISTORY;
typedef struct _HTC_ENDPOINT {
HTC_ENDPOINT_ID Id;
/* service ID this endpoint is bound to
* non-zero value means this endpoint is in use
*/
HTC_SERVICE_ID service_id;
HTC_EP_CALLBACKS EpCallBacks; /* callbacks associated with this endpoint */
HTC_PACKET_QUEUE TxQueue; /* HTC frame buffer TX queue */
int MaxTxQueueDepth; /* max depth of the TX queue before we need to
call driver's full handler */
int MaxMsgLength; /* max length of endpoint message */
uint8_t UL_PipeID;
uint8_t DL_PipeID;
int ul_is_polled; /* Need to call HIF to get tx completion callbacks? */
cdf_softirq_timer_t ul_poll_timer;
int ul_poll_timer_active;
int ul_outstanding_cnt;
int dl_is_polled; /* Need to call HIF to fetch rx? (Not currently supported.) */
#if 0 /* not currently supported */
cdf_softirq_timer_t dl_poll_timer;
#endif
HTC_PACKET_QUEUE TxLookupQueue; /* lookup queue to match netbufs to htc packets */
HTC_PACKET_QUEUE RxBufferHoldQueue; /* temporary hold queue for back compatibility */
A_UINT8 SeqNo; /* TX seq no (helpful) for debugging */
cdf_atomic_t TxProcessCount; /* serialization */
struct _HTC_TARGET *target;
int TxCredits; /* TX credits available on this endpoint */
int TxCreditSize; /* size in bytes of each credit (set by HTC) */
int TxCreditsPerMaxMsg; /* credits required per max message (precalculated) */
#ifdef HTC_EP_STAT_PROFILING
HTC_ENDPOINT_STATS endpoint_stats; /* endpoint statistics */
#endif
A_BOOL TxCreditFlowEnabled;
} HTC_ENDPOINT;
#ifdef HTC_EP_STAT_PROFILING
#define INC_HTC_EP_STAT(p, stat, count) ((p)->endpoint_stats.stat += (count))
#else
#define INC_HTC_EP_STAT(p, stat, count)
#endif
typedef struct {
A_UINT16 service_id;
A_UINT8 CreditAllocation;
} HTC_SERVICE_TX_CREDIT_ALLOCATION;
#define HTC_MAX_SERVICE_ALLOC_ENTRIES 8
/* Error codes for HTC layer packet stats*/
enum ol_ath_htc_pkt_ecodes {
GET_HTC_PKT_Q_FAIL = 0, /* error- get packet at head of HTC_PACKET_Q */
HTC_PKT_Q_EMPTY,
HTC_SEND_Q_EMPTY
};
/* our HTC target state */
typedef struct _HTC_TARGET {
struct ol_softc *hif_dev;
HTC_ENDPOINT endpoint[ENDPOINT_MAX];
cdf_spinlock_t HTCLock;
cdf_spinlock_t HTCRxLock;
cdf_spinlock_t HTCTxLock;
cdf_spinlock_t HTCCreditLock;
A_UINT32 HTCStateFlags;
void *host_handle;
HTC_INIT_INFO HTCInitInfo;
HTC_PACKET *pHTCPacketStructPool; /* pool of HTC packets */
HTC_PACKET_QUEUE ControlBufferTXFreeList;
A_UINT8 CtrlResponseBuffer[HTC_MAX_CONTROL_MESSAGE_LENGTH];
int CtrlResponseLength;
cdf_event_t ctrl_response_valid;
A_BOOL CtrlResponseProcessing;
int TotalTransmitCredits;
HTC_SERVICE_TX_CREDIT_ALLOCATION
ServiceTxAllocTable[HTC_MAX_SERVICE_ALLOC_ENTRIES];
int TargetCreditSize;
#ifdef RX_SG_SUPPORT
cdf_nbuf_queue_t RxSgQueue;
A_BOOL IsRxSgInprogress;
A_UINT32 CurRxSgTotalLen; /* current total length */
A_UINT32 ExpRxSgTotalLen; /* expected total length */
#endif
cdf_device_t osdev;
struct ol_ath_htc_stats htc_pkt_stats;
HTC_PACKET *pBundleFreeList;
A_UINT32 ce_send_cnt;
A_UINT32 TX_comp_cnt;
A_UINT8 MaxMsgsPerHTCBundle;
cdf_work_t queue_kicker;
} HTC_TARGET;
#define HTC_ENABLE_BUNDLE(target) (target->MaxMsgsPerHTCBundle > 1)
#ifdef RX_SG_SUPPORT
#define RESET_RX_SG_CONFIG(_target) \
_target->ExpRxSgTotalLen = 0; \
_target->CurRxSgTotalLen = 0; \
_target->IsRxSgInprogress = false;
#endif
#define HTC_STATE_STOPPING (1 << 0)
#define HTC_STOPPING(t) ((t)->HTCStateFlags & HTC_STATE_STOPPING)
#define LOCK_HTC(t) cdf_spin_lock_bh(&(t)->HTCLock);
#define UNLOCK_HTC(t) cdf_spin_unlock_bh(&(t)->HTCLock);
#define LOCK_HTC_RX(t) cdf_spin_lock_bh(&(t)->HTCRxLock);
#define UNLOCK_HTC_RX(t) cdf_spin_unlock_bh(&(t)->HTCRxLock);
#define LOCK_HTC_TX(t) cdf_spin_lock_bh(&(t)->HTCTxLock);
#define UNLOCK_HTC_TX(t) cdf_spin_unlock_bh(&(t)->HTCTxLock);
#define LOCK_HTC_CREDIT(t) cdf_spin_lock_bh(&(t)->HTCCreditLock);
#define UNLOCK_HTC_CREDIT(t) cdf_spin_unlock_bh(&(t)->HTCCreditLock);
#define GET_HTC_TARGET_FROM_HANDLE(hnd) ((HTC_TARGET *)(hnd))
#define IS_TX_CREDIT_FLOW_ENABLED(ep) ((ep)->TxCreditFlowEnabled)
#define HTC_POLL_CLEANUP_PERIOD_MS 10 /* milliseconds */
/* Macro to Increment the HTC_PACKET_ERRORS for Tx.*/
#define OL_ATH_HTC_PKT_ERROR_COUNT_INCR(_target,_ecode) \
do { \
if(_ecode==GET_HTC_PKT_Q_FAIL) (_target->htc_pkt_stats.htc_get_pkt_q_fail_count)+=1; \
if(_ecode==HTC_PKT_Q_EMPTY) (_target->htc_pkt_stats.htc_pkt_q_empty_count)+=1; \
if(_ecode==HTC_SEND_Q_EMPTY) (_target->htc_pkt_stats.htc_send_q_empty_count)+=1; \
} while(0);
/* internal HTC functions */
CDF_STATUS htc_rx_completion_handler(void *Context, cdf_nbuf_t netbuf,
uint8_t pipeID);
CDF_STATUS htc_tx_completion_handler(void *Context, cdf_nbuf_t netbuf,
unsigned int transferID, uint32_t toeplitz_hash_result);
HTC_PACKET *allocate_htc_bundle_packet(HTC_TARGET *target);
void free_htc_bundle_packet(HTC_TARGET *target, HTC_PACKET *pPacket);
HTC_PACKET *allocate_htc_packet_container(HTC_TARGET *target);
void free_htc_packet_container(HTC_TARGET *target, HTC_PACKET *pPacket);
void htc_flush_rx_hold_queue(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint);
void htc_flush_endpoint_tx(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint,
HTC_TX_TAG Tag);
void htc_recv_init(HTC_TARGET *target);
A_STATUS htc_wait_recv_ctrl_message(HTC_TARGET *target);
void htc_free_control_tx_packet(HTC_TARGET *target, HTC_PACKET *pPacket);
HTC_PACKET *htc_alloc_control_tx_packet(HTC_TARGET *target);
A_UINT8 htc_get_credit_allocation(HTC_TARGET *target, A_UINT16 service_id);
void htc_tx_resource_avail_handler(void *context, A_UINT8 pipeID);
void htc_control_rx_complete(void *Context, HTC_PACKET *pPacket);
void htc_process_credit_rpt(HTC_TARGET *target,
HTC_CREDIT_REPORT *pRpt,
int NumEntries, HTC_ENDPOINT_ID FromEndpoint);
void htc_fw_event_handler(void *context, CDF_STATUS status);
void htc_send_complete_check_cleanup(void *context);
void htc_runtime_pm_init(HTC_TARGET *target);
void htc_kick_queues(void *context);
void htc_credit_record(htc_credit_exchange_type type, uint32_t tx_credit,
uint32_t htc_tx_queue_depth);
static inline void htc_send_complete_poll_timer_stop(HTC_ENDPOINT *
pEndpoint) {
LOCK_HTC_TX(pEndpoint->target);
if (pEndpoint->ul_poll_timer_active) {
/* cdf_softirq_timer_cancel(&pEndpoint->ul_poll_timer); */
pEndpoint->ul_poll_timer_active = 0;
}
UNLOCK_HTC_TX(pEndpoint->target);
}
static inline void htc_send_complete_poll_timer_start(HTC_ENDPOINT *
pEndpoint) {
LOCK_HTC_TX(pEndpoint->target);
if (pEndpoint->ul_outstanding_cnt
&& !pEndpoint->ul_poll_timer_active) {
/*
cdf_softirq_timer_start(
&pEndpoint->ul_poll_timer, HTC_POLL_CLEANUP_PERIOD_MS);
*/
pEndpoint->ul_poll_timer_active = 1;
}
UNLOCK_HTC_TX(pEndpoint->target);
}
static inline void
htc_send_complete_check(HTC_ENDPOINT *pEndpoint, int force) {
/*
* Stop the polling-cleanup timer that will result in a later call to
* this function. It may get started again below, if there are still
* outsending sends.
*/
htc_send_complete_poll_timer_stop(pEndpoint);
/*
* Check whether HIF has any prior sends that have finished,
* have not had the post-processing done.
*/
hif_send_complete_check(pEndpoint->target->hif_dev,
pEndpoint->UL_PipeID, force);
/*
* If there are still outstanding sends after polling, start a timer
* to check again a little later.
*/
htc_send_complete_poll_timer_start(pEndpoint);
}
#ifdef __cplusplus
}
#endif
#ifndef DEBUG_BUNDLE
#define DEBUG_BUNDLE 0
#endif
#ifdef HIF_SDIO
#ifndef ENABLE_BUNDLE_TX
#define ENABLE_BUNDLE_TX 1
#endif
#ifndef ENABLE_BUNDLE_RX
#define ENABLE_BUNDLE_RX 1
#endif
#endif /* HIF_SDIO */
#endif /* !_HTC_HOST_INTERNAL_H_ */

View File

@ -1,280 +0,0 @@
/*
* Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef HTC_PACKET_H_
#define HTC_PACKET_H_
#include <osdep.h>
#include <a_types.h> /* A_UINT16, etc. */
#include "dl_list.h"
/* ------ Endpoint IDS ------ */
typedef enum {
ENDPOINT_UNUSED = -1,
ENDPOINT_0 = 0,
ENDPOINT_1 = 1,
ENDPOINT_2 = 2,
ENDPOINT_3,
ENDPOINT_4,
ENDPOINT_5,
ENDPOINT_6,
ENDPOINT_7,
ENDPOINT_8,
ENDPOINT_MAX,
} HTC_ENDPOINT_ID;
struct _HTC_PACKET;
typedef void (*HTC_PACKET_COMPLETION)(void *, struct _HTC_PACKET *);
typedef A_UINT16 HTC_TX_TAG;
typedef struct _HTC_TX_PACKET_INFO {
HTC_TX_TAG Tag; /* tag used to selective flush packets */
int CreditsUsed; /* number of credits used for this TX packet (HTC internal) */
A_UINT8 SendFlags; /* send flags (HTC internal) */
int SeqNo; /* internal seq no for debugging (HTC internal) */
A_UINT32 Flags; /* internal use */
} HTC_TX_PACKET_INFO;
/**
* HTC_TX_PACKET_TAG_XXX - #defines for tagging packets for special handling
* HTC_TX_PACKET_TAG_ALL: zero is reserved and used to flush ALL packets
* HTC_TX_PACKET_TAG_INTERNAL: internal tags start here
* HTC_TX_PACKET_TAG_USER_DEFINED: user-defined tags start here
* HTC_TX_PACKET_TAG_BUNDLED: indicate this is a bundled tx packet
* HTC_TX_PACKET_TAG_AUTO_PM: indicate a power management wmi command
*/
#define HTC_TX_PACKET_TAG_ALL 0
#define HTC_TX_PACKET_TAG_INTERNAL 1
#define HTC_TX_PACKET_TAG_USER_DEFINED (HTC_TX_PACKET_TAG_INTERNAL + 9)
#define HTC_TX_PACKET_TAG_BUNDLED (HTC_TX_PACKET_TAG_USER_DEFINED + 1)
#define HTC_TX_PACKET_TAG_AUTO_PM (HTC_TX_PACKET_TAG_USER_DEFINED + 2)
/* Tag packet for runtime put after sending */
#define HTC_TX_PACKET_TAG_RUNTIME_PUT (HTC_TX_PACKET_TAG_USER_DEFINED + 3)
#define HTC_TX_PACKET_FLAG_FIXUP_NETBUF (1 << 0)
typedef struct _HTC_RX_PACKET_INFO {
A_UINT32 ExpectedHdr; /* HTC internal use */
A_UINT32 HTCRxFlags; /* HTC internal use */
A_UINT32 IndicationFlags; /* indication flags set on each RX packet indication */
} HTC_RX_PACKET_INFO;
#define HTC_RX_FLAGS_INDICATE_MORE_PKTS (1 << 0) /* more packets on this endpoint are being fetched */
/* wrapper around endpoint-specific packets */
typedef struct _HTC_PACKET {
DL_LIST ListLink; /* double link */
void *pPktContext; /* caller's per packet specific context */
A_UINT8 *pBufferStart; /* the true buffer start , the caller can
store the real buffer start here. In
receive callbacks, the HTC layer sets pBuffer
to the start of the payload past the header. This
field allows the caller to reset pBuffer when it
recycles receive packets back to HTC */
/*
* Pointer to the start of the buffer. In the transmit
* direction this points to the start of the payload. In the
* receive direction, however, the buffer when queued up
* points to the start of the HTC header but when returned
* to the caller points to the start of the payload
*/
A_UINT8 *pBuffer; /* payload start (RX/TX) */
A_UINT32 BufferLength; /* length of buffer */
A_UINT32 ActualLength; /* actual length of payload */
HTC_ENDPOINT_ID Endpoint; /* endpoint that this packet was sent/recv'd from */
A_STATUS Status; /* completion status */
union {
HTC_TX_PACKET_INFO AsTx; /* Tx Packet specific info */
HTC_RX_PACKET_INFO AsRx; /* Rx Packet specific info */
} PktInfo;
/* the following fields are for internal HTC use */
A_UINT32 netbufOrigHeadRoom;
HTC_PACKET_COMPLETION Completion; /* completion */
void *pContext; /* HTC private completion context */
void *pNetBufContext; /* optimization for network-oriented data, the HTC packet
can pass the network buffer corresponding to the HTC packet
lower layers may optimized the transfer knowing this is
a network buffer */
} HTC_PACKET;
#define COMPLETE_HTC_PACKET(p,status) \
{ \
(p)->Status = (status); \
(p)->Completion((p)->pContext,(p)); \
}
#define INIT_HTC_PACKET_INFO(p,b,len) \
{ \
(p)->pBufferStart = (b); \
(p)->BufferLength = (len); \
}
/* macro to set an initial RX packet for refilling HTC */
#define SET_HTC_PACKET_INFO_RX_REFILL(p,c,b,len,ep) \
{ \
(p)->pPktContext = (c); \
(p)->pBuffer = (b); \
(p)->pBufferStart = (b); \
(p)->BufferLength = (len); \
(p)->Endpoint = (ep); \
}
/* fast macro to recycle an RX packet that will be re-queued to HTC */
#define HTC_PACKET_RESET_RX(p) \
{ (p)->pBuffer = (p)->pBufferStart; (p)->ActualLength = 0; }
/* macro to set packet parameters for TX */
#define SET_HTC_PACKET_INFO_TX(p,c,b,len,ep,tag) \
{ \
(p)->pPktContext = (c); \
(p)->pBuffer = (b); \
(p)->ActualLength = (len); \
(p)->Endpoint = (ep); \
(p)->PktInfo.AsTx.Tag = (tag); \
(p)->PktInfo.AsTx.Flags = 0; \
(p)->PktInfo.AsTx.SendFlags = 0; \
}
#define SET_HTC_PACKET_NET_BUF_CONTEXT(p,nb) \
(p)->pNetBufContext = (nb)
#define GET_HTC_PACKET_NET_BUF_CONTEXT(p) (p)->pNetBufContext
/* HTC Packet Queueing Macros */
typedef struct _HTC_PACKET_QUEUE {
DL_LIST QueueHead;
int Depth;
} HTC_PACKET_QUEUE;
/* initialize queue */
#define INIT_HTC_PACKET_QUEUE(pQ) \
{ \
DL_LIST_INIT(& (pQ)->QueueHead); \
(pQ)->Depth = 0; \
}
/* enqueue HTC packet to the tail of the queue */
#define HTC_PACKET_ENQUEUE(pQ,p) \
{ dl_list_insert_tail(& (pQ)->QueueHead,& (p)->ListLink); \
(pQ)->Depth ++; \
}
/* enqueue HTC packet to the tail of the queue */
#define HTC_PACKET_ENQUEUE_TO_HEAD(pQ,p) \
{ dl_list_insert_head(& (pQ)->QueueHead,& (p)->ListLink); \
(pQ)->Depth ++; \
}
/* test if a queue is empty */
#define HTC_QUEUE_EMPTY(pQ) ((pQ)->Depth == 0)
/* get packet at head without removing it */
static INLINE HTC_PACKET *htc_get_pkt_at_head(HTC_PACKET_QUEUE *queue)
{
if (queue->Depth == 0) {
return NULL;
}
return
A_CONTAINING_STRUCT((DL_LIST_GET_ITEM_AT_HEAD(&queue->QueueHead)),
HTC_PACKET, ListLink);
}
/* remove a packet from a queue, where-ever it is in the queue */
#define HTC_PACKET_REMOVE(pQ,p) \
{ \
dl_list_remove(& (p)->ListLink); \
(pQ)->Depth --; \
}
/* dequeue an HTC packet from the head of the queue */
static INLINE HTC_PACKET *htc_packet_dequeue(HTC_PACKET_QUEUE *queue)
{
DL_LIST *pItem = dl_list_remove_item_from_head(&queue->QueueHead);
if (pItem != NULL) {
queue->Depth--;
return A_CONTAINING_STRUCT(pItem, HTC_PACKET, ListLink);
}
return NULL;
}
/* dequeue an HTC packet from the tail of the queue */
static INLINE HTC_PACKET *htc_packet_dequeue_tail(HTC_PACKET_QUEUE *queue)
{
DL_LIST *pItem = dl_list_remove_item_from_tail(&queue->QueueHead);
if (pItem != NULL) {
queue->Depth--;
return A_CONTAINING_STRUCT(pItem, HTC_PACKET, ListLink);
}
return NULL;
}
#define HTC_PACKET_QUEUE_DEPTH(pQ) (pQ)->Depth
#define HTC_GET_ENDPOINT_FROM_PKT(p) (p)->Endpoint
#define HTC_GET_TAG_FROM_PKT(p) (p)->PktInfo.AsTx.Tag
/* transfer the packets from one queue to the tail of another queue */
#define HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(pQDest,pQSrc) \
{ \
dl_list_transfer_items_to_tail(&(pQDest)->QueueHead,&(pQSrc)->QueueHead); \
(pQDest)->Depth += (pQSrc)->Depth; \
(pQSrc)->Depth = 0; \
}
/*
* Transfer the packets from one queue to the head of another queue.
* This xfer_to_head(q1,q2) is basically equivalent to xfer_to_tail(q2,q1),
* but it updates the queue descriptor object for the initial queue to refer
* to the concatenated queue.
*/
#define HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(pQDest, pQSrc) \
{ \
dl_list_transfer_items_to_head(&(pQDest)->QueueHead,&(pQSrc)->QueueHead); \
(pQDest)->Depth += (pQSrc)->Depth; \
(pQSrc)->Depth = 0; \
}
/* fast version to init and add a single packet to a queue */
#define INIT_HTC_PACKET_QUEUE_AND_ADD(pQ,pP) \
{ \
DL_LIST_INIT_AND_ADD(&(pQ)->QueueHead,&(pP)->ListLink) \
(pQ)->Depth = 1; \
}
#define HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pQ, pPTemp) \
ITERATE_OVER_LIST_ALLOW_REMOVE(&(pQ)->QueueHead,(pPTemp), HTC_PACKET, ListLink)
#define HTC_PACKET_QUEUE_ITERATE_IS_VALID(pQ) ITERATE_IS_VALID(&(pQ)->QueueHead)
#define HTC_PACKET_QUEUE_ITERATE_RESET(pQ) ITERATE_RESET(&(pQ)->QueueHead)
#define HTC_PACKET_QUEUE_ITERATE_END ITERATE_END
#endif /*HTC_PACKET_H_ */

View File

@ -1,749 +0,0 @@
/*
* Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include "htc_debug.h"
#include "htc_internal.h"
#include "cds_api.h"
#include <cdf_nbuf.h> /* cdf_nbuf_t */
#include "epping_main.h"
/* HTC Control message receive timeout msec */
#define HTC_CONTROL_RX_TIMEOUT 3000
#ifdef DEBUG
void debug_dump_bytes(A_UCHAR *buffer, A_UINT16 length, char *pDescription)
{
A_CHAR stream[60];
A_CHAR byteOffsetStr[10];
A_UINT32 i;
A_UINT16 offset, count, byteOffset;
A_PRINTF("<---------Dumping %d Bytes : %s ------>\n", length,
pDescription);
count = 0;
offset = 0;
byteOffset = 0;
for (i = 0; i < length; i++) {
A_SNPRINTF(stream + offset, (sizeof(stream) - offset),
"%02X ", buffer[i]);
count++;
offset += 3;
if (count == 16) {
count = 0;
offset = 0;
A_SNPRINTF(byteOffsetStr, sizeof(byteOffset), "%4.4X",
byteOffset);
A_PRINTF("[%s]: %s\n", byteOffsetStr, stream);
A_MEMZERO(stream, 60);
byteOffset += 16;
}
}
if (offset != 0) {
A_SNPRINTF(byteOffsetStr, sizeof(byteOffset), "%4.4X",
byteOffset);
A_PRINTF("[%s]: %s\n", byteOffsetStr, stream);
}
A_PRINTF("<------------------------------------------------->\n");
}
#else
void debug_dump_bytes(A_UCHAR *buffer, A_UINT16 length, char *pDescription)
{
}
#endif
static A_STATUS htc_process_trailer(HTC_TARGET *target,
A_UINT8 *pBuffer,
int Length, HTC_ENDPOINT_ID FromEndpoint);
static void do_recv_completion(HTC_ENDPOINT *pEndpoint,
HTC_PACKET_QUEUE *pQueueToIndicate)
{
do {
if (HTC_QUEUE_EMPTY(pQueueToIndicate)) {
/* nothing to indicate */
break;
}
if (pEndpoint->EpCallBacks.EpRecvPktMultiple != NULL) {
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
(" HTC calling ep %d, recv multiple callback (%d pkts) \n",
pEndpoint->Id,
HTC_PACKET_QUEUE_DEPTH
(pQueueToIndicate)));
/* a recv multiple handler is being used, pass the queue to the handler */
pEndpoint->EpCallBacks.EpRecvPktMultiple(pEndpoint->
EpCallBacks.
pContext,
pQueueToIndicate);
INIT_HTC_PACKET_QUEUE(pQueueToIndicate);
} else {
HTC_PACKET *pPacket;
/* using legacy EpRecv */
while (!HTC_QUEUE_EMPTY(pQueueToIndicate)) {
pPacket = htc_packet_dequeue(pQueueToIndicate);
if (pEndpoint->EpCallBacks.EpRecv == NULL) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("HTC ep %d has NULL recv callback on packet %p\n",
pEndpoint->Id,
pPacket));
continue;
}
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
("HTC calling ep %d recv callback on packet %p\n",
pEndpoint->Id, pPacket));
pEndpoint->EpCallBacks.EpRecv(pEndpoint->
EpCallBacks.
pContext,
pPacket);
}
}
} while (false);
}
static void recv_packet_completion(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint,
HTC_PACKET *pPacket)
{
HTC_PACKET_QUEUE container;
INIT_HTC_PACKET_QUEUE_AND_ADD(&container, pPacket);
/* do completion */
do_recv_completion(pEndpoint, &container);
}
void htc_control_rx_complete(void *Context, HTC_PACKET *pPacket)
{
/* TODO, can't really receive HTC control messages yet.... */
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("Invalid call to htc_control_rx_complete\n"));
}
void htc_unblock_recv(HTC_HANDLE HTCHandle)
{
/* TODO find the Need in new model */
}
void htc_enable_recv(HTC_HANDLE HTCHandle)
{
/* TODO find the Need in new model */
}
void htc_disable_recv(HTC_HANDLE HTCHandle)
{
/* TODO find the Need in new model */
}
int htc_get_num_recv_buffers(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
HTC_ENDPOINT *pEndpoint = &target->endpoint[Endpoint];
return HTC_PACKET_QUEUE_DEPTH(&pEndpoint->RxBufferHoldQueue);
}
HTC_PACKET *allocate_htc_packet_container(HTC_TARGET *target)
{
HTC_PACKET *pPacket;
LOCK_HTC_RX(target);
if (NULL == target->pHTCPacketStructPool) {
UNLOCK_HTC_RX(target);
return NULL;
}
pPacket = target->pHTCPacketStructPool;
target->pHTCPacketStructPool = (HTC_PACKET *) pPacket->ListLink.pNext;
UNLOCK_HTC_RX(target);
pPacket->ListLink.pNext = NULL;
return pPacket;
}
void free_htc_packet_container(HTC_TARGET *target, HTC_PACKET *pPacket)
{
LOCK_HTC_RX(target);
if (NULL == target->pHTCPacketStructPool) {
target->pHTCPacketStructPool = pPacket;
pPacket->ListLink.pNext = NULL;
} else {
pPacket->ListLink.pNext =
(DL_LIST *) target->pHTCPacketStructPool;
target->pHTCPacketStructPool = pPacket;
}
UNLOCK_HTC_RX(target);
}
#ifdef RX_SG_SUPPORT
cdf_nbuf_t rx_sg_to_single_netbuf(HTC_TARGET *target)
{
cdf_nbuf_t skb;
uint8_t *anbdata;
uint8_t *anbdata_new;
uint32_t anblen;
cdf_nbuf_t new_skb = NULL;
uint32_t sg_queue_len;
cdf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue;
sg_queue_len = cdf_nbuf_queue_len(rx_sg_queue);
if (sg_queue_len <= 1) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("rx_sg_to_single_netbuf: invalid sg queue len %u\n"));
goto _failed;
}
new_skb = cdf_nbuf_alloc(target->ExpRxSgTotalLen, 0, 4, false);
if (new_skb == NULL) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("rx_sg_to_single_netbuf: can't allocate %u size netbuf\n",
target->ExpRxSgTotalLen));
goto _failed;
}
cdf_nbuf_peek_header(new_skb, &anbdata_new, &anblen);
skb = cdf_nbuf_queue_remove(rx_sg_queue);
do {
cdf_nbuf_peek_header(skb, &anbdata, &anblen);
cdf_mem_copy(anbdata_new, anbdata, cdf_nbuf_len(skb));
cdf_nbuf_put_tail(new_skb, cdf_nbuf_len(skb));
anbdata_new += cdf_nbuf_len(skb);
cdf_nbuf_free(skb);
skb = cdf_nbuf_queue_remove(rx_sg_queue);
} while (skb != NULL);
RESET_RX_SG_CONFIG(target);
return new_skb;
_failed:
while ((skb = cdf_nbuf_queue_remove(rx_sg_queue)) != NULL) {
cdf_nbuf_free(skb);
}
RESET_RX_SG_CONFIG(target);
return NULL;
}
#endif
CDF_STATUS htc_rx_completion_handler(void *Context, cdf_nbuf_t netbuf,
uint8_t pipeID)
{
CDF_STATUS status = CDF_STATUS_SUCCESS;
HTC_FRAME_HDR *HtcHdr;
HTC_TARGET *target = (HTC_TARGET *) Context;
uint8_t *netdata;
uint32_t netlen;
HTC_ENDPOINT *pEndpoint;
HTC_PACKET *pPacket;
A_UINT16 payloadLen;
uint32_t trailerlen = 0;
A_UINT8 htc_ep_id;
#ifdef RX_SG_SUPPORT
LOCK_HTC_RX(target);
if (target->IsRxSgInprogress) {
target->CurRxSgTotalLen += cdf_nbuf_len(netbuf);
cdf_nbuf_queue_add(&target->RxSgQueue, netbuf);
if (target->CurRxSgTotalLen == target->ExpRxSgTotalLen) {
netbuf = rx_sg_to_single_netbuf(target);
if (netbuf == NULL) {
UNLOCK_HTC_RX(target);
goto _out;
}
} else {
netbuf = NULL;
UNLOCK_HTC_RX(target);
goto _out;
}
}
UNLOCK_HTC_RX(target);
#endif
netdata = cdf_nbuf_data(netbuf);
netlen = cdf_nbuf_len(netbuf);
HtcHdr = (HTC_FRAME_HDR *) netdata;
do {
htc_ep_id = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, ENDPOINTID);
if (htc_ep_id >= ENDPOINT_MAX) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("HTC Rx: invalid EndpointID=%d\n",
htc_ep_id));
debug_dump_bytes((A_UINT8 *) HtcHdr,
sizeof(HTC_FRAME_HDR), "BAD HTC Header");
status = CDF_STATUS_E_FAILURE;
CDF_BUG(0);
break;
}
pEndpoint = &target->endpoint[htc_ep_id];
/*
* If this endpoint that received a message from the target has
* a to-target HIF pipe whose send completions are polled rather
* than interrupt-driven, this is a good point to ask HIF to check
* whether it has any completed sends to handle.
*/
if (pEndpoint->ul_is_polled) {
htc_send_complete_check(pEndpoint, 1);
}
payloadLen = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, PAYLOADLEN);
if (netlen < (payloadLen + HTC_HDR_LENGTH)) {
#ifdef RX_SG_SUPPORT
LOCK_HTC_RX(target);
target->IsRxSgInprogress = true;
cdf_nbuf_queue_init(&target->RxSgQueue);
cdf_nbuf_queue_add(&target->RxSgQueue, netbuf);
target->ExpRxSgTotalLen = (payloadLen + HTC_HDR_LENGTH);
target->CurRxSgTotalLen += netlen;
UNLOCK_HTC_RX(target);
netbuf = NULL;
break;
#else
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("HTC Rx: insufficient length, got:%d expected =%zu\n",
netlen, payloadLen + HTC_HDR_LENGTH));
debug_dump_bytes((A_UINT8 *) HtcHdr,
sizeof(HTC_FRAME_HDR),
"BAD RX packet length");
status = CDF_STATUS_E_FAILURE;
CDF_BUG(0);
break;
#endif
}
#ifdef HTC_EP_STAT_PROFILING
LOCK_HTC_RX(target);
INC_HTC_EP_STAT(pEndpoint, RxReceived, 1);
UNLOCK_HTC_RX(target);
#endif
/* if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { */
{
A_UINT8 temp;
A_STATUS temp_status;
/* get flags to check for trailer */
temp = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, FLAGS);
if (temp & HTC_FLAGS_RECV_TRAILER) {
/* extract the trailer length */
temp =
HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR,
CONTROLBYTES0);
if ((temp < sizeof(HTC_RECORD_HDR))
|| (temp > payloadLen)) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("htc_rx_completion_handler, invalid header (payloadlength should be :%d, CB[0] is:%d)\n",
payloadLen, temp));
status = CDF_STATUS_E_INVAL;
break;
}
trailerlen = temp;
/* process trailer data that follows HDR + application payload */
temp_status = htc_process_trailer(target,
((A_UINT8 *) HtcHdr +
HTC_HDR_LENGTH +
payloadLen - temp),
temp, htc_ep_id);
if (A_FAILED(temp_status)) {
status = CDF_STATUS_E_FAILURE;
break;
}
}
}
if (((int)payloadLen - (int)trailerlen) <= 0) {
/* zero length packet with trailer data, just drop these */
break;
}
if (htc_ep_id == ENDPOINT_0) {
A_UINT16 message_id;
HTC_UNKNOWN_MSG *htc_msg;
int wow_nack = 0;
/* remove HTC header */
cdf_nbuf_pull_head(netbuf, HTC_HDR_LENGTH);
netdata = cdf_nbuf_data(netbuf);
netlen = cdf_nbuf_len(netbuf);
htc_msg = (HTC_UNKNOWN_MSG *) netdata;
message_id =
HTC_GET_FIELD(htc_msg, HTC_UNKNOWN_MSG, MESSAGEID);
switch (message_id) {
default:
/* handle HTC control message */
if (target->CtrlResponseProcessing) {
/* this is a fatal error, target should not be sending unsolicited messages
* on the endpoint 0 */
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("HTC Rx Ctrl still processing\n"));
status = CDF_STATUS_E_FAILURE;
CDF_BUG(false);
break;
}
LOCK_HTC_RX(target);
target->CtrlResponseLength =
min((int)netlen,
HTC_MAX_CONTROL_MESSAGE_LENGTH);
A_MEMCPY(target->CtrlResponseBuffer, netdata,
target->CtrlResponseLength);
/* Requester will clear this flag */
target->CtrlResponseProcessing = true;
UNLOCK_HTC_RX(target);
cdf_event_set(&target->ctrl_response_valid);
break;
case HTC_MSG_SEND_SUSPEND_COMPLETE:
wow_nack = 0;
LOCK_HTC_CREDIT(target);
htc_credit_record(HTC_SUSPEND_ACK,
pEndpoint->TxCredits,
HTC_PACKET_QUEUE_DEPTH(
&pEndpoint->TxQueue));
UNLOCK_HTC_CREDIT(target);
target->HTCInitInfo.
TargetSendSuspendComplete((void *)
&wow_nack);
break;
case HTC_MSG_NACK_SUSPEND:
wow_nack = 1;
LOCK_HTC_CREDIT(target);
htc_credit_record(HTC_SUSPEND_ACK,
pEndpoint->TxCredits,
HTC_PACKET_QUEUE_DEPTH(
&pEndpoint->TxQueue));
UNLOCK_HTC_CREDIT(target);
target->HTCInitInfo.
TargetSendSuspendComplete((void *)
&wow_nack);
break;
}
cdf_nbuf_free(netbuf);
netbuf = NULL;
break;
}
/* the current message based HIF architecture allocates net bufs for recv packets
* since this layer bridges that HIF to upper layers , which expects HTC packets,
* we form the packets here
* TODO_FIXME */
pPacket = allocate_htc_packet_container(target);
if (NULL == pPacket) {
status = CDF_STATUS_E_RESOURCES;
break;
}
pPacket->Status = CDF_STATUS_SUCCESS;
pPacket->Endpoint = htc_ep_id;
pPacket->pPktContext = netbuf;
pPacket->pBuffer = cdf_nbuf_data(netbuf) + HTC_HDR_LENGTH;
pPacket->ActualLength = netlen - HTC_HEADER_LEN - trailerlen;
cdf_nbuf_pull_head(netbuf, HTC_HEADER_LEN);
cdf_nbuf_set_pktlen(netbuf, pPacket->ActualLength);
recv_packet_completion(target, pEndpoint, pPacket);
/* recover the packet container */
free_htc_packet_container(target, pPacket);
netbuf = NULL;
} while (false);
#ifdef RX_SG_SUPPORT
_out:
#endif
if (netbuf != NULL) {
cdf_nbuf_free(netbuf);
}
return status;
}
A_STATUS htc_add_receive_pkt_multiple(HTC_HANDLE HTCHandle,
HTC_PACKET_QUEUE *pPktQueue)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
HTC_ENDPOINT *pEndpoint;
HTC_PACKET *pFirstPacket;
A_STATUS status = A_OK;
HTC_PACKET *pPacket;
pFirstPacket = htc_get_pkt_at_head(pPktQueue);
if (NULL == pFirstPacket) {
A_ASSERT(false);
return A_EINVAL;
}
AR_DEBUG_ASSERT(pFirstPacket->Endpoint < ENDPOINT_MAX);
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
("+- htc_add_receive_pkt_multiple : endPointId: %d, cnt:%d, length: %d\n",
pFirstPacket->Endpoint,
HTC_PACKET_QUEUE_DEPTH(pPktQueue),
pFirstPacket->BufferLength));
pEndpoint = &target->endpoint[pFirstPacket->Endpoint];
LOCK_HTC_RX(target);
do {
if (HTC_STOPPING(target)) {
status = A_ERROR;
break;
}
/* store receive packets */
HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pEndpoint->RxBufferHoldQueue,
pPktQueue);
} while (false);
UNLOCK_HTC_RX(target);
if (A_FAILED(status)) {
/* walk through queue and mark each one canceled */
HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pPktQueue, pPacket) {
pPacket->Status = A_ECANCELED;
}
HTC_PACKET_QUEUE_ITERATE_END;
do_recv_completion(pEndpoint, pPktQueue);
}
return status;
}
A_STATUS htc_add_receive_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket)
{
HTC_PACKET_QUEUE queue;
INIT_HTC_PACKET_QUEUE_AND_ADD(&queue, pPacket);
return htc_add_receive_pkt_multiple(HTCHandle, &queue);
}
void htc_flush_rx_hold_queue(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint)
{
HTC_PACKET *pPacket;
HTC_PACKET_QUEUE container;
LOCK_HTC_RX(target);
while (1) {
pPacket = htc_packet_dequeue(&pEndpoint->RxBufferHoldQueue);
if (NULL == pPacket) {
break;
}
UNLOCK_HTC_RX(target);
pPacket->Status = A_ECANCELED;
pPacket->ActualLength = 0;
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
(" Flushing RX packet:%p, length:%d, ep:%d \n",
pPacket, pPacket->BufferLength,
pPacket->Endpoint));
INIT_HTC_PACKET_QUEUE_AND_ADD(&container, pPacket);
/* give the packet back */
do_recv_completion(pEndpoint, &container);
LOCK_HTC_RX(target);
}
UNLOCK_HTC_RX(target);
}
void htc_recv_init(HTC_TARGET *target)
{
/* Initialize ctrl_response_valid to block */
cdf_event_init(&target->ctrl_response_valid);
}
/* polling routine to wait for a control packet to be received */
A_STATUS htc_wait_recv_ctrl_message(HTC_TARGET *target)
{
/* int count = HTC_TARGET_MAX_RESPONSE_POLL; */
AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+HTCWaitCtrlMessageRecv\n"));
/* Wait for BMI request/response transaction to complete */
if (cdf_wait_single_event(&target->ctrl_response_valid,
cdf_system_msecs_to_ticks(HTC_CONTROL_RX_TIMEOUT))) {
CDF_BUG(0);
return A_ERROR;
}
LOCK_HTC_RX(target);
/* caller will clear this flag */
target->CtrlResponseProcessing = true;
UNLOCK_HTC_RX(target);
#if 0
while (count > 0) {
LOCK_HTC_RX(target);
if (target->CtrlResponseValid) {
target->CtrlResponseValid = false;
/* caller will clear this flag */
target->CtrlResponseProcessing = true;
UNLOCK_HTC_RX(target);
break;
}
UNLOCK_HTC_RX(target);
count--;
A_MSLEEP(HTC_TARGET_RESPONSE_POLL_MS);
}
if (count <= 0) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("-HTCWaitCtrlMessageRecv: Timeout!\n"));
return A_ECOMM;
}
#endif
AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-HTCWaitCtrlMessageRecv success\n"));
return A_OK;
}
static A_STATUS htc_process_trailer(HTC_TARGET *target,
A_UINT8 *pBuffer,
int Length, HTC_ENDPOINT_ID FromEndpoint)
{
HTC_RECORD_HDR *pRecord;
A_UINT8 htc_rec_id;
A_UINT8 htc_rec_len;
A_UINT8 *pRecordBuf;
A_UINT8 *pOrigBuffer;
int origLength;
A_STATUS status;
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
("+htc_process_trailer (length:%d) \n", Length));
if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
AR_DEBUG_PRINTBUF(pBuffer, Length, "Recv Trailer");
}
pOrigBuffer = pBuffer;
origLength = Length;
status = A_OK;
while (Length > 0) {
if (Length < sizeof(HTC_RECORD_HDR)) {
status = A_EPROTO;
break;
}
/* these are byte aligned structs */
pRecord = (HTC_RECORD_HDR *) pBuffer;
Length -= sizeof(HTC_RECORD_HDR);
pBuffer += sizeof(HTC_RECORD_HDR);
htc_rec_len = HTC_GET_FIELD(pRecord, HTC_RECORD_HDR, LENGTH);
htc_rec_id = HTC_GET_FIELD(pRecord, HTC_RECORD_HDR, RECORDID);
if (htc_rec_len > Length) {
/* no room left in buffer for record */
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
(" invalid record length: %d (id:%d) buffer has: %d bytes left \n",
htc_rec_len, htc_rec_id, Length));
status = A_EPROTO;
break;
}
/* start of record follows the header */
pRecordBuf = pBuffer;
switch (htc_rec_id) {
case HTC_RECORD_CREDITS:
AR_DEBUG_ASSERT(htc_rec_len >=
sizeof(HTC_CREDIT_REPORT));
htc_process_credit_rpt(target,
(HTC_CREDIT_REPORT *) pRecordBuf,
htc_rec_len /
(sizeof(HTC_CREDIT_REPORT)),
FromEndpoint);
break;
#ifdef HIF_SDIO
case HTC_RECORD_LOOKAHEAD:
/* Process in HIF layer */
break;
case HTC_RECORD_LOOKAHEAD_BUNDLE:
/* Process in HIF layer */
break;
#endif /* HIF_SDIO */
default:
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
(" HTC unhandled record: id:%d length:%d \n",
htc_rec_id, htc_rec_len));
break;
}
if (A_FAILED(status)) {
break;
}
/* advance buffer past this record for next time around */
pBuffer += htc_rec_len;
Length -= htc_rec_len;
}
if (A_FAILED(status)) {
debug_dump_bytes(pOrigBuffer, origLength, "BAD Recv Trailer");
}
AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-htc_process_trailer \n"));
return status;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,369 +0,0 @@
/*
* Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include "htc_debug.h"
#include "htc_internal.h"
#include <cdf_nbuf.h> /* cdf_nbuf_t */
#include "hif.h"
/* use credit flow control over HTC */
unsigned int htc_credit_flow = 1;
#ifndef DEBUG_CREDIT
#define DEBUG_CREDIT 0
#endif
A_STATUS htc_connect_service(HTC_HANDLE HTCHandle,
HTC_SERVICE_CONNECT_REQ *pConnectReq,
HTC_SERVICE_CONNECT_RESP *pConnectResp)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
A_STATUS status = A_OK;
HTC_PACKET *pSendPacket = NULL;
HTC_CONNECT_SERVICE_RESPONSE_MSG *pResponseMsg;
HTC_CONNECT_SERVICE_MSG *pConnectMsg;
HTC_ENDPOINT_ID assignedEndpoint = ENDPOINT_MAX;
HTC_ENDPOINT *pEndpoint;
unsigned int maxMsgSize = 0;
cdf_nbuf_t netbuf;
A_UINT8 txAlloc;
int length;
A_BOOL disableCreditFlowCtrl = false;
A_UINT16 conn_flags;
A_UINT16 rsp_msg_id, rsp_msg_serv_id, rsp_msg_max_msg_size;
A_UINT8 rsp_msg_status, rsp_msg_end_id, rsp_msg_serv_meta_len;
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
("+htc_connect_service, target:%p SvcID:0x%X\n", target,
pConnectReq->service_id));
do {
AR_DEBUG_ASSERT(pConnectReq->service_id != 0);
if (HTC_CTRL_RSVD_SVC == pConnectReq->service_id) {
/* special case for pseudo control service */
assignedEndpoint = ENDPOINT_0;
maxMsgSize = HTC_MAX_CONTROL_MESSAGE_LENGTH;
txAlloc = 0;
} else {
txAlloc = htc_get_credit_allocation(target,
pConnectReq->service_id);
if (!txAlloc) {
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
("Service %d does not allocate target credits!\n",
pConnectReq->service_id));
}
/* allocate a packet to send to the target */
pSendPacket = htc_alloc_control_tx_packet(target);
if (NULL == pSendPacket) {
AR_DEBUG_ASSERT(false);
status = A_NO_MEMORY;
break;
}
netbuf =
(cdf_nbuf_t)
GET_HTC_PACKET_NET_BUF_CONTEXT(pSendPacket);
length =
sizeof(HTC_CONNECT_SERVICE_MSG) +
pConnectReq->MetaDataLength;
/* assemble connect service message */
cdf_nbuf_put_tail(netbuf, length);
pConnectMsg =
(HTC_CONNECT_SERVICE_MSG *) cdf_nbuf_data(netbuf);
if (NULL == pConnectMsg) {
AR_DEBUG_ASSERT(0);
status = A_EFAULT;
break;
}
A_MEMZERO(pConnectMsg, sizeof(HTC_CONNECT_SERVICE_MSG));
conn_flags =
(pConnectReq->
ConnectionFlags & ~HTC_SET_RECV_ALLOC_MASK) |
HTC_CONNECT_FLAGS_SET_RECV_ALLOCATION(txAlloc);
HTC_SET_FIELD(pConnectMsg, HTC_CONNECT_SERVICE_MSG,
MESSAGEID, HTC_MSG_CONNECT_SERVICE_ID);
HTC_SET_FIELD(pConnectMsg, HTC_CONNECT_SERVICE_MSG,
SERVICE_ID, pConnectReq->service_id);
HTC_SET_FIELD(pConnectMsg, HTC_CONNECT_SERVICE_MSG,
CONNECTIONFLAGS, conn_flags);
if (pConnectReq->
ConnectionFlags &
HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL) {
disableCreditFlowCtrl = true;
}
#if defined(HIF_USB)
if (!htc_credit_flow) {
disableCreditFlowCtrl = true;
}
#else
/* Only enable credit for WMI service */
if (!htc_credit_flow
&& pConnectReq->service_id != WMI_CONTROL_SVC) {
disableCreditFlowCtrl = true;
}
#endif
/* check caller if it wants to transfer meta data */
if ((pConnectReq->pMetaData != NULL) &&
(pConnectReq->MetaDataLength <=
HTC_SERVICE_META_DATA_MAX_LENGTH)) {
/* copy meta data into message buffer (after header ) */
A_MEMCPY((A_UINT8 *) pConnectMsg +
sizeof(HTC_CONNECT_SERVICE_MSG),
pConnectReq->pMetaData,
pConnectReq->MetaDataLength);
HTC_SET_FIELD(pConnectMsg,
HTC_CONNECT_SERVICE_MSG,
SERVICEMETALENGTH,
pConnectReq->MetaDataLength);
}
SET_HTC_PACKET_INFO_TX(pSendPacket,
NULL,
(A_UINT8 *) pConnectMsg,
length,
ENDPOINT_0,
HTC_SERVICE_TX_PACKET_TAG);
status = htc_send_pkt((HTC_HANDLE) target, pSendPacket);
/* we don't own it anymore */
pSendPacket = NULL;
if (A_FAILED(status)) {
break;
}
/* wait for response */
status = htc_wait_recv_ctrl_message(target);
if (A_FAILED(status)) {
break;
}
/* we controlled the buffer creation so it has to be properly aligned */
pResponseMsg =
(HTC_CONNECT_SERVICE_RESPONSE_MSG *) target->
CtrlResponseBuffer;
rsp_msg_id = HTC_GET_FIELD(pResponseMsg,
HTC_CONNECT_SERVICE_RESPONSE_MSG,
MESSAGEID);
rsp_msg_serv_id =
HTC_GET_FIELD(pResponseMsg,
HTC_CONNECT_SERVICE_RESPONSE_MSG,
SERVICEID);
rsp_msg_status =
HTC_GET_FIELD(pResponseMsg,
HTC_CONNECT_SERVICE_RESPONSE_MSG,
STATUS);
rsp_msg_end_id =
HTC_GET_FIELD(pResponseMsg,
HTC_CONNECT_SERVICE_RESPONSE_MSG,
ENDPOINTID);
rsp_msg_max_msg_size =
HTC_GET_FIELD(pResponseMsg,
HTC_CONNECT_SERVICE_RESPONSE_MSG,
MAXMSGSIZE);
rsp_msg_serv_meta_len =
HTC_GET_FIELD(pResponseMsg,
HTC_CONNECT_SERVICE_RESPONSE_MSG,
SERVICEMETALENGTH);
if ((rsp_msg_id != HTC_MSG_CONNECT_SERVICE_RESPONSE_ID)
|| (target->CtrlResponseLength <
sizeof(HTC_CONNECT_SERVICE_RESPONSE_MSG))) {
/* this message is not valid */
AR_DEBUG_ASSERT(false);
status = A_EPROTO;
break;
}
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
("htc_connect_service, service 0x%X connect response from target status:%d, assigned ep: %d\n",
rsp_msg_serv_id, rsp_msg_status,
rsp_msg_end_id));
pConnectResp->ConnectRespCode = rsp_msg_status;
/* check response status */
if (rsp_msg_status != HTC_SERVICE_SUCCESS) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
(" Target failed service 0x%X connect request (status:%d)\n",
rsp_msg_serv_id,
rsp_msg_status));
status = A_EPROTO;
#ifdef QCA_TX_HTT2_SUPPORT
/* Keep work and not to block the control message. */
target->CtrlResponseProcessing = false;
#endif /* QCA_TX_HTT2_SUPPORT */
break;
}
assignedEndpoint = (HTC_ENDPOINT_ID) rsp_msg_end_id;
maxMsgSize = rsp_msg_max_msg_size;
if ((pConnectResp->pMetaData != NULL) &&
(rsp_msg_serv_meta_len > 0) &&
(rsp_msg_serv_meta_len <=
HTC_SERVICE_META_DATA_MAX_LENGTH)) {
/* caller supplied a buffer and the target responded with data */
int copyLength =
min((int)pConnectResp->BufferLength,
(int)rsp_msg_serv_meta_len);
/* copy the meta data */
A_MEMCPY(pConnectResp->pMetaData,
((A_UINT8 *) pResponseMsg) +
sizeof
(HTC_CONNECT_SERVICE_RESPONSE_MSG),
copyLength);
pConnectResp->ActualLength = copyLength;
}
/* done processing response buffer */
target->CtrlResponseProcessing = false;
}
/* the rest of these are parameter checks so set the error status */
status = A_EPROTO;
if (assignedEndpoint >= ENDPOINT_MAX) {
AR_DEBUG_ASSERT(false);
break;
}
if (0 == maxMsgSize) {
AR_DEBUG_ASSERT(false);
break;
}
pEndpoint = &target->endpoint[assignedEndpoint];
pEndpoint->Id = assignedEndpoint;
if (pEndpoint->service_id != 0) {
/* endpoint already in use! */
AR_DEBUG_ASSERT(false);
break;
}
/* return assigned endpoint to caller */
pConnectResp->Endpoint = assignedEndpoint;
pConnectResp->MaxMsgLength = maxMsgSize;
/* setup the endpoint */
/* service_id marks the endpoint in use */
pEndpoint->service_id = pConnectReq->service_id;
pEndpoint->MaxTxQueueDepth = pConnectReq->MaxSendQueueDepth;
pEndpoint->MaxMsgLength = maxMsgSize;
pEndpoint->TxCredits = txAlloc;
pEndpoint->TxCreditSize = target->TargetCreditSize;
pEndpoint->TxCreditsPerMaxMsg =
maxMsgSize / target->TargetCreditSize;
if (maxMsgSize % target->TargetCreditSize) {
pEndpoint->TxCreditsPerMaxMsg++;
}
#if DEBUG_CREDIT
cdf_print(" Endpoint%d initial credit:%d, size:%d.\n",
pEndpoint->Id, pEndpoint->TxCredits,
pEndpoint->TxCreditSize);
#endif
/* copy all the callbacks */
pEndpoint->EpCallBacks = pConnectReq->EpCallbacks;
status = hif_map_service_to_pipe(target->hif_dev,
pEndpoint->service_id,
&pEndpoint->UL_PipeID,
&pEndpoint->DL_PipeID,
&pEndpoint->ul_is_polled,
&pEndpoint->dl_is_polled);
if (A_FAILED(status)) {
break;
}
cdf_assert(!pEndpoint->dl_is_polled); /* not currently supported */
if (pEndpoint->ul_is_polled) {
cdf_softirq_timer_init(target->osdev,
&pEndpoint->ul_poll_timer,
htc_send_complete_check_cleanup,
pEndpoint,
CDF_TIMER_TYPE_SW);
}
AR_DEBUG_PRINTF(ATH_DEBUG_SETUP,
("HTC Service:0x%4.4X, ULpipe:%d DLpipe:%d id:%d Ready\n",
pEndpoint->service_id, pEndpoint->UL_PipeID,
pEndpoint->DL_PipeID, pEndpoint->Id));
if (disableCreditFlowCtrl && pEndpoint->TxCreditFlowEnabled) {
pEndpoint->TxCreditFlowEnabled = false;
AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
("HTC Service:0x%4.4X ep:%d TX flow control disabled\n",
pEndpoint->service_id,
assignedEndpoint));
}
} while (false);
AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-htc_connect_service \n"));
return status;
}
void htc_set_credit_distribution(HTC_HANDLE HTCHandle,
void *pCreditDistContext,
HTC_CREDIT_DIST_CALLBACK CreditDistFunc,
HTC_CREDIT_INIT_CALLBACK CreditInitFunc,
HTC_SERVICE_ID ServicePriorityOrder[],
int ListLength)
{
/* NOT Supported, this transport does not use a credit based flow control mechanism */
}
void htc_fw_event_handler(void *context, CDF_STATUS status)
{
HTC_TARGET *target = (HTC_TARGET *) context;
HTC_INIT_INFO *initInfo = &target->HTCInitInfo;
/* check if target failure handler exists and pass error code to it. */
if (target->HTCInitInfo.TargetFailure != NULL) {
initInfo->TargetFailure(initInfo->pContext, status);
}
}
/* Disable ASPM : disable PCIe low power */
void htc_disable_aspm(void)
{
hif_disable_aspm();
}