octeontx2-af: Support variable number of lmacs
[ Upstream commit f2e664ad503d4e5ce7c42a0862ab164331a0ef37 ] Most of the code in CGX/RPM driver assumes that max lmacs per given MAC as always, 4 and the number of MAC blocks also as 4. With this assumption, the max number of interfaces supported is hardcoded to 16. This creates a problem as next gen CN10KB silicon MAC supports 8 lmacs per MAC block. This patch solves the problem by using "max lmac per MAC block" value from constant csrs and uses cgx_cnt_max value which is populated based number of MAC blocks supported by silicon. Signed-off-by: Rakesh Babu Saladi <rsaladi2@marvell.com> Signed-off-by: Hariprasad Kelam <hkelam@marvell.com> Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Stable-dep-of: e307b5a845c5 ("octeontx2-af: Fix pause frame configuration") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
7d3912613d
commit
0f74dde5be
@ -78,7 +78,7 @@ static bool is_dev_rpm(void *cgxd)
|
||||
|
||||
bool is_lmac_valid(struct cgx *cgx, int lmac_id)
|
||||
{
|
||||
if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX)
|
||||
if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
|
||||
return false;
|
||||
return test_bit(lmac_id, &cgx->lmac_bmap);
|
||||
}
|
||||
@ -90,7 +90,7 @@ static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
|
||||
{
|
||||
int tmp, id = 0;
|
||||
|
||||
for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
|
||||
for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
|
||||
if (tmp == lmac_id)
|
||||
break;
|
||||
id++;
|
||||
@ -121,7 +121,7 @@ u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
|
||||
|
||||
struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
|
||||
{
|
||||
if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
|
||||
if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
|
||||
return NULL;
|
||||
|
||||
return cgx->lmac_idmap[lmac_id];
|
||||
@ -1410,7 +1410,7 @@ int cgx_get_fwdata_base(u64 *base)
|
||||
if (!cgx)
|
||||
return -ENXIO;
|
||||
|
||||
first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
|
||||
first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
|
||||
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
|
||||
err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
|
||||
if (!err)
|
||||
@ -1499,7 +1499,7 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
|
||||
|
||||
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
|
||||
{
|
||||
int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
|
||||
int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
|
||||
u64 req = 0;
|
||||
|
||||
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
|
||||
@ -1537,7 +1537,7 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
|
||||
int i, err;
|
||||
|
||||
/* Do Link up for all the enabled lmacs */
|
||||
for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
|
||||
for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
|
||||
err = cgx_fwi_link_change(cgx, i, true);
|
||||
if (err)
|
||||
dev_info(dev, "cgx port %d:%d Link up command failed\n",
|
||||
@ -1557,14 +1557,6 @@ int cgx_lmac_linkup_start(void *cgxd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cgx_lmac_get_fifolen(struct cgx *cgx)
|
||||
{
|
||||
u64 cfg;
|
||||
|
||||
cfg = cgx_read(cgx, 0, CGX_CONST);
|
||||
cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
|
||||
}
|
||||
|
||||
static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
|
||||
int cnt, bool req_free)
|
||||
{
|
||||
@ -1619,17 +1611,14 @@ static int cgx_lmac_init(struct cgx *cgx)
|
||||
u64 lmac_list;
|
||||
int i, err;
|
||||
|
||||
cgx_lmac_get_fifolen(cgx);
|
||||
|
||||
cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
|
||||
/* lmac_list specifies which lmacs are enabled
|
||||
* when bit n is set to 1, LMAC[n] is enabled
|
||||
*/
|
||||
if (cgx->mac_ops->non_contiguous_serdes_lane)
|
||||
lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
|
||||
|
||||
if (cgx->lmac_count > MAX_LMAC_PER_CGX)
|
||||
cgx->lmac_count = MAX_LMAC_PER_CGX;
|
||||
if (cgx->lmac_count > cgx->max_lmac_per_mac)
|
||||
cgx->lmac_count = cgx->max_lmac_per_mac;
|
||||
|
||||
for (i = 0; i < cgx->lmac_count; i++) {
|
||||
lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
|
||||
@ -1707,7 +1696,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
|
||||
}
|
||||
|
||||
/* Free all lmac related resources */
|
||||
for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
|
||||
for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
|
||||
lmac = cgx->lmac_idmap[i];
|
||||
if (!lmac)
|
||||
continue;
|
||||
@ -1723,6 +1712,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
|
||||
|
||||
static void cgx_populate_features(struct cgx *cgx)
|
||||
{
|
||||
u64 cfg;
|
||||
|
||||
cfg = cgx_read(cgx, 0, CGX_CONST);
|
||||
cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
|
||||
cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
|
||||
|
||||
if (is_dev_rpm(cgx))
|
||||
cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
|
||||
RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
|
||||
|
@ -18,11 +18,8 @@
|
||||
/* PCI BAR nos */
|
||||
#define PCI_CFG_REG_BAR_NUM 0
|
||||
|
||||
#define CGX_ID_MASK 0x7
|
||||
#define MAX_LMAC_PER_CGX 4
|
||||
#define CGX_ID_MASK 0xF
|
||||
#define MAX_DMAC_ENTRIES_PER_CGX 32
|
||||
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
|
||||
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
|
||||
|
||||
/* Registers */
|
||||
#define CGXX_CMRX_CFG 0x00
|
||||
@ -56,6 +53,7 @@
|
||||
#define CGXX_SCRATCH1_REG 0x1058
|
||||
#define CGX_CONST 0x2000
|
||||
#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0)
|
||||
#define CGX_CONST_MAX_LMACS GENMASK_ULL(31, 24)
|
||||
#define CGXX_SPUX_CONTROL1 0x10000
|
||||
#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700
|
||||
#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800
|
||||
|
@ -128,7 +128,10 @@ struct cgx {
|
||||
struct pci_dev *pdev;
|
||||
u8 cgx_id;
|
||||
u8 lmac_count;
|
||||
struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
|
||||
/* number of LMACs per MAC could be 4 or 8 */
|
||||
u8 max_lmac_per_mac;
|
||||
#define MAX_LMAC_COUNT 8
|
||||
struct lmac *lmac_idmap[MAX_LMAC_COUNT];
|
||||
struct work_struct cgx_cmd_work;
|
||||
struct workqueue_struct *cgx_cmd_workq;
|
||||
struct list_head cgx_list;
|
||||
|
@ -480,7 +480,7 @@ struct rvu {
|
||||
u8 cgx_mapped_pfs;
|
||||
u8 cgx_cnt_max; /* CGX port count max */
|
||||
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
|
||||
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
|
||||
u64 *cgxlmac2pf_map; /* bitmap of mapped pfs for
|
||||
* every cgx lmac port
|
||||
*/
|
||||
unsigned long pf_notify_bmap; /* Flags for PF notification */
|
||||
|
@ -55,8 +55,9 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
|
||||
return (cgx_features_get(cgxd) & feature);
|
||||
}
|
||||
|
||||
#define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx)
|
||||
/* Returns bitmap of mapped PFs */
|
||||
static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
|
||||
static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
|
||||
{
|
||||
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
|
||||
}
|
||||
@ -71,7 +72,8 @@ int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
|
||||
if (!pfmap)
|
||||
return -ENODEV;
|
||||
else
|
||||
return find_first_bit(&pfmap, 16);
|
||||
return find_first_bit(&pfmap,
|
||||
rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
|
||||
}
|
||||
|
||||
static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
|
||||
@ -129,14 +131,14 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
|
||||
if (!cgx_cnt_max)
|
||||
return 0;
|
||||
|
||||
if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
|
||||
if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
|
||||
return -EINVAL;
|
||||
|
||||
/* Alloc map table
|
||||
* An additional entry is required since PF id starts from 1 and
|
||||
* hence entry at offset 0 is invalid.
|
||||
*/
|
||||
size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
|
||||
size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
|
||||
rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
|
||||
if (!rvu->pf2cgxlmac_map)
|
||||
return -ENOMEM;
|
||||
@ -145,9 +147,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
|
||||
memset(rvu->pf2cgxlmac_map, 0xFF, size);
|
||||
|
||||
/* Reverse map table */
|
||||
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
|
||||
cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
|
||||
GFP_KERNEL);
|
||||
rvu->cgxlmac2pf_map =
|
||||
devm_kzalloc(rvu->dev,
|
||||
cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
|
||||
GFP_KERNEL);
|
||||
if (!rvu->cgxlmac2pf_map)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -156,7 +159,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
|
||||
if (!rvu_cgx_pdata(cgx, rvu))
|
||||
continue;
|
||||
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
|
||||
for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
|
||||
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
|
||||
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
|
||||
iter);
|
||||
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
|
||||
@ -235,7 +238,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
|
||||
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
|
||||
|
||||
do {
|
||||
pfid = find_first_bit(&pfmap, 16);
|
||||
pfid = find_first_bit(&pfmap,
|
||||
rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
|
||||
clear_bit(pfid, &pfmap);
|
||||
|
||||
/* check if notification is enabled */
|
||||
@ -310,7 +314,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
|
||||
if (!cgxd)
|
||||
continue;
|
||||
lmac_bmap = cgx_get_lmac_bmap(cgxd);
|
||||
for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
|
||||
for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
|
||||
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
|
||||
if (err)
|
||||
dev_err(rvu->dev,
|
||||
@ -396,7 +400,7 @@ int rvu_cgx_exit(struct rvu *rvu)
|
||||
if (!cgxd)
|
||||
continue;
|
||||
lmac_bmap = cgx_get_lmac_bmap(cgxd);
|
||||
for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
|
||||
for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
|
||||
cgx_lmac_evh_unregister(cgxd, lmac);
|
||||
}
|
||||
|
||||
|
@ -2618,7 +2618,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
|
||||
rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
|
||||
rvu->rvu_dbg.cgx_root);
|
||||
|
||||
for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
|
||||
for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
|
||||
/* lmac debugfs dir */
|
||||
sprintf(dname, "lmac%d", lmac_id);
|
||||
rvu->rvu_dbg.lmac =
|
||||
|
@ -4079,7 +4079,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
|
||||
|
||||
/* Get LMAC id's from bitmap */
|
||||
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
|
||||
for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
|
||||
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
|
||||
lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
|
||||
if (!lmac_fifo_len) {
|
||||
dev_err(rvu->dev,
|
||||
|
@ -1999,7 +1999,9 @@ int rvu_npc_exact_init(struct rvu *rvu)
|
||||
/* Install SDP drop rule */
|
||||
drop_mcam_idx = &table->num_drop_rules;
|
||||
|
||||
max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE;
|
||||
max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
|
||||
PF_CGXMAP_BASE;
|
||||
|
||||
for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
|
||||
if (rvu->pf2cgxlmac_map[i] == 0xFF)
|
||||
continue;
|
||||
|
Loading…
Reference in New Issue
Block a user