mlxsw: pci: Avoid flow control for EMAD packets
Locally generated packets ingress the device through its CPU port. When the CPU port is congested and there are not enough credits in its headroom buffer, packets can be dropped. While this might be acceptable for data packets that traverse the network, configuration packets exchanged between the host and the device (EMADs) should not be subjected to this flow control. The "sdq_lp" bit in the SDQ (Send Descriptor Queue) context allows the host to instruct the device to treat packets sent on this queue as "local processing" and always process them, regardless of the state of the CPU port's headroom. Add the definition of this bit and set it for the dedicated SDQ reserved for the transmission of EMAD packets. This makes the "local processing" bit in the WQE (Work Queue Element) redundant, so clear it. Signed-off-by: Danielle Ratson <danieller@nvidia.com> Signed-off-by: Ido Schimmel <idosch@nvidia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
7da0694c01
commit
d43e427174
@ -935,6 +935,18 @@ static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
|
||||
*/
|
||||
MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
|
||||
|
||||
enum mlxsw_cmd_mbox_sw2hw_dq_sdq_lp {
|
||||
MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE,
|
||||
MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE,
|
||||
};
|
||||
|
||||
/* cmd_mbox_sw2hw_dq_sdq_lp
|
||||
* SDQ local Processing
|
||||
* 0: local processing by wqe.lp
|
||||
* 1: local processing (ignoring wqe.lp)
|
||||
*/
|
||||
MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_lp, 0x00, 23, 1);
|
||||
|
||||
/* cmd_mbox_sw2hw_dq_sdq_tclass
|
||||
* SDQ: CPU Egress TClass
|
||||
* RDQ: Reserved
|
||||
|
@ -285,6 +285,7 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
||||
struct mlxsw_pci_queue *q)
|
||||
{
|
||||
int tclass;
|
||||
int lp;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
@ -292,9 +293,12 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
||||
q->consumer_counter = 0;
|
||||
tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
|
||||
MLXSW_PCI_SDQ_CTL_TC;
|
||||
lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE :
|
||||
MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
|
||||
|
||||
/* Set CQ of same number of this SDQ. */
|
||||
mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
|
||||
mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
|
||||
mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
|
||||
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
|
||||
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
|
||||
@ -1678,7 +1682,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
|
||||
|
||||
wqe = elem_info->elem;
|
||||
mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
|
||||
mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
|
||||
mlxsw_pci_wqe_lp_set(wqe, 0);
|
||||
mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
|
||||
|
||||
err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
|
||||
|
Loading…
x
Reference in New Issue
Block a user