net/mlx5e: Add support for using xdp->data_meta
Add support for using xdp->data_meta for cross-program communication Pass "true" to the last argument of xdp_prepare_buff(). After SKB is built, call skb_metadata_set() if metadata was pushed. Signed-off-by: Alex Liu <liualex@fb.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
@ -1489,7 +1489,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
|
||||
static inline
|
||||
struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
|
||||
u32 frag_size, u16 headroom,
|
||||
u32 cqe_bcnt)
|
||||
u32 cqe_bcnt, u32 metasize)
|
||||
{
|
||||
struct sk_buff *skb = build_skb(va, frag_size);
|
||||
|
||||
@ -1501,6 +1501,9 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
|
||||
skb_reserve(skb, headroom);
|
||||
skb_put(skb, cqe_bcnt);
|
||||
|
||||
if (metasize)
|
||||
skb_metadata_set(skb, metasize);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
@ -1508,7 +1511,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
|
||||
u32 len, struct xdp_buff *xdp)
|
||||
{
|
||||
xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
|
||||
xdp_prepare_buff(xdp, va, headroom, len, false);
|
||||
xdp_prepare_buff(xdp, va, headroom, len, true);
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
@ -1521,6 +1524,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
|
||||
struct sk_buff *skb;
|
||||
void *va, *data;
|
||||
u32 frag_size;
|
||||
u32 metasize;
|
||||
|
||||
va = page_address(di->page) + wi->offset;
|
||||
data = va + rx_headroom;
|
||||
@ -1537,7 +1541,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
|
||||
|
||||
rx_headroom = xdp.data - xdp.data_hard_start;
|
||||
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
|
||||
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
|
||||
metasize = xdp.data - xdp.data_meta;
|
||||
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
@ -1836,6 +1841,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
|
||||
struct sk_buff *skb;
|
||||
void *va, *data;
|
||||
u32 frag_size;
|
||||
u32 metasize;
|
||||
|
||||
/* Check packet size. Note LRO doesn't use linear SKB */
|
||||
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
|
||||
@ -1861,7 +1867,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
|
||||
|
||||
rx_headroom = xdp.data - xdp.data_hard_start;
|
||||
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
|
||||
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
|
||||
metasize = xdp.data - xdp.data_meta;
|
||||
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32, metasize);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
@ -1892,7 +1899,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
|
||||
dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
|
||||
prefetchw(hdr);
|
||||
prefetch(data);
|
||||
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
|
||||
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
|
||||
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
Reference in New Issue
Block a user