Merge 33dc9614dc ("Merge tag 'ktest-v5.10-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-ktest") into android-mainline

Steps on the way to 5.10-rc8/final

Resolves conflicts with:
	net/xfrm/xfrm_state.c

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I06495103fc0f2531b241b3577820f4461ba83dd5
This commit is contained in:
Greg Kroah-Hartman 2020-12-11 10:25:57 +01:00
commit a070681690
122 changed files with 853 additions and 393 deletions

View File

@ -3247,7 +3247,7 @@ R: Martin KaFai Lau <kafai@fb.com>
R: Song Liu <songliubraving@fb.com>
R: Yonghong Song <yhs@fb.com>
R: John Fastabend <john.fastabend@gmail.com>
R: KP Singh <kpsingh@chromium.org>
R: KP Singh <kpsingh@kernel.org>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Supported
@ -3366,7 +3366,7 @@ F: arch/x86/net/
X: arch/x86/net/bpf_jit_comp32.c
BPF LSM (Security Audit and Enforcement using BPF)
M: KP Singh <kpsingh@chromium.org>
M: KP Singh <kpsingh@kernel.org>
R: Florent Revest <revest@chromium.org>
R: Brendan Jackman <jackmanb@chromium.org>
L: bpf@vger.kernel.org
@ -10570,6 +10570,13 @@ S: Supported
F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
F: drivers/net/ethernet/marvell/octeontx2/af/
MARVELL PRESTERA ETHERNET SWITCH DRIVER
M: Vadym Kochan <vkochan@marvell.com>
M: Taras Chornyi <tchornyi@marvell.com>
S: Supported
W: https://github.com/Marvell-switching/switchdev-prestera
F: drivers/net/ethernet/marvell/prestera/
MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes

View File

@ -5,7 +5,7 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-y := fault.o mem.o pgtable.o mmap.o \
obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \
init_$(BITS).o pgtable_$(BITS).o \
pgtable-frag.o ioremap.o ioremap_$(BITS).o \
init-common.o mmu_context.o drmem.o

View File

@ -0,0 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/uaccess.h>
#include <linux/kernel.h>
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
return is_kernel_addr((unsigned long)unsafe_src);
}

View File

@ -1269,9 +1269,6 @@ ssize_t rdma_query_gid_table(struct ib_device *device,
unsigned long flags;
rdma_for_each_port(device, port_num) {
if (!rdma_ib_or_roce(device, port_num))
continue;
table = rdma_gid_table(device, port_num);
read_lock_irqsave(&table->rwlock, flags);
for (i = 0; i < table->sz; i++) {

View File

@ -1522,6 +1522,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
goto out;
}
@ -2114,6 +2115,7 @@ static int cm_req_handler(struct cm_work *work)
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
goto destroy;
}
cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;

View File

@ -940,8 +940,8 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
1);
EFA_SET(&params.modify_mask,
EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
params.cur_qp_state = qp_attr->cur_qp_state;
params.qp_state = qp_attr->qp_state;
params.cur_qp_state = cur_state;
params.qp_state = new_state;
}
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {

View File

@ -1936,6 +1936,15 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
}
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
/* calculate the db_rec_db2 data since it is constant so no
* need to reflect from user
*/
qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
qp->urq.db_rec_db2_data.data.value =
cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
&qp->urq.db_rec_db2_data,
DB_REC_WIDTH_32B,

View File

@ -88,13 +88,15 @@ enum pulse8_msgcodes {
MSGCODE_SET_PHYSICAL_ADDRESS, /* 0x20 */
MSGCODE_GET_DEVICE_TYPE,
MSGCODE_SET_DEVICE_TYPE,
MSGCODE_GET_HDMI_VERSION,
MSGCODE_GET_HDMI_VERSION, /* Removed in FW >= 10 */
MSGCODE_SET_HDMI_VERSION,
MSGCODE_GET_OSD_NAME,
MSGCODE_SET_OSD_NAME,
MSGCODE_WRITE_EEPROM,
MSGCODE_GET_ADAPTER_TYPE, /* 0x28 */
MSGCODE_SET_ACTIVE_SOURCE,
MSGCODE_GET_AUTO_POWER_ON, /* New for FW >= 10 */
MSGCODE_SET_AUTO_POWER_ON,
MSGCODE_FRAME_EOM = 0x80,
MSGCODE_FRAME_ACK = 0x40,
@ -143,6 +145,8 @@ static const char * const pulse8_msgnames[] = {
"WRITE_EEPROM",
"GET_ADAPTER_TYPE",
"SET_ACTIVE_SOURCE",
"GET_AUTO_POWER_ON",
"SET_AUTO_POWER_ON",
};
static const char *pulse8_msgname(u8 cmd)
@ -579,12 +583,14 @@ static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
if (err)
goto unlock;
cmd[0] = MSGCODE_SET_HDMI_VERSION;
cmd[1] = adap->log_addrs.cec_version;
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 0);
if (err)
goto unlock;
if (pulse8->vers < 10) {
cmd[0] = MSGCODE_SET_HDMI_VERSION;
cmd[1] = adap->log_addrs.cec_version;
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 0);
if (err)
goto unlock;
}
if (adap->log_addrs.osd_name[0]) {
size_t osd_len = strlen(adap->log_addrs.osd_name);
@ -650,7 +656,6 @@ static void pulse8_disconnect(struct serio *serio)
struct pulse8 *pulse8 = serio_get_drvdata(serio);
cec_unregister_adapter(pulse8->adap);
pulse8->serio = NULL;
serio_set_drvdata(serio, NULL);
serio_close(serio);
}
@ -692,6 +697,14 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
dev_dbg(pulse8->dev, "Autonomous mode: %s",
data[0] ? "on" : "off");
if (pulse8->vers >= 10) {
cmd[0] = MSGCODE_GET_AUTO_POWER_ON;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
if (!err)
dev_dbg(pulse8->dev, "Auto Power On: %s",
data[0] ? "on" : "off");
}
cmd[0] = MSGCODE_GET_DEVICE_TYPE;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
if (err)
@ -753,12 +766,15 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n",
cec_phys_addr_exp(*pa));
cmd[0] = MSGCODE_GET_HDMI_VERSION;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
if (err)
return err;
log_addrs->cec_version = data[0];
dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
log_addrs->cec_version = CEC_OP_CEC_VERSION_1_4;
if (pulse8->vers < 10) {
cmd[0] = MSGCODE_GET_HDMI_VERSION;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
if (err)
return err;
log_addrs->cec_version = data[0];
dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
}
cmd[0] = MSGCODE_GET_OSD_NAME;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
@ -830,8 +846,10 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
dev_name(&serio->dev), caps, 1);
err = PTR_ERR_OR_ZERO(pulse8->adap);
if (err < 0)
goto free_device;
if (err < 0) {
kfree(pulse8);
return err;
}
pulse8->dev = &serio->dev;
serio_set_drvdata(serio, pulse8);
@ -874,8 +892,6 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
serio_close(serio);
delete_adap:
cec_delete_adapter(pulse8->adap);
free_device:
kfree(pulse8);
return err;
}

View File

@ -414,6 +414,17 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
vb->index = q->num_buffers + buffer;
vb->type = q->type;
vb->memory = memory;
/*
* We need to set these flags here so that the videobuf2 core
* will call ->prepare()/->finish() cache sync/flush on vb2
* buffers when appropriate. However, we can avoid explicit
* ->prepare() and ->finish() cache sync for DMABUF buffers,
* because DMA exporter takes care of it.
*/
if (q->memory != VB2_MEMORY_DMABUF) {
vb->need_cache_sync_on_prepare = 1;
vb->need_cache_sync_on_finish = 1;
}
for (plane = 0; plane < num_planes; ++plane) {
vb->planes[plane].length = plane_sizes[plane];
vb->planes[plane].min_length = plane_sizes[plane];

View File

@ -151,15 +151,12 @@ static inline u32 mtk_chk_period(struct mtk_ir *ir)
{
u32 val;
/* Period of raw software sampling in ns */
val = DIV_ROUND_CLOSEST(1000000000ul,
clk_get_rate(ir->bus) / ir->data->div);
/*
* Period for software decoder used in the
* unit of raw software sampling
*/
val = DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, val);
val = DIV_ROUND_CLOSEST(clk_get_rate(ir->bus),
USEC_PER_SEC * ir->data->div / MTK_IR_SAMPLE);
dev_dbg(ir->dev, "@pwm clk = \t%lu\n",
clk_get_rate(ir->bus) / ir->data->div);
@ -412,7 +409,7 @@ static int mtk_ir_probe(struct platform_device *pdev)
mtk_irq_enable(ir, MTK_IRINT_EN);
dev_info(dev, "Initialized MT7623 IR driver, sample period = %dus\n",
DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, 1000));
MTK_IR_SAMPLE);
return 0;

View File

@ -504,11 +504,11 @@ void vidtv_channel_si_destroy(struct vidtv_mux *m)
{
u32 i;
vidtv_psi_pat_table_destroy(m->si.pat);
for (i = 0; i < m->si.pat->num_pmt; ++i)
vidtv_psi_pmt_table_destroy(m->si.pmt_secs[i]);
vidtv_psi_pat_table_destroy(m->si.pat);
kfree(m->si.pmt_secs);
vidtv_psi_sdt_table_destroy(m->si.sdt);
vidtv_psi_nit_table_destroy(m->si.nit);

View File

@ -420,7 +420,7 @@ void vidtv_psi_desc_assign(struct vidtv_psi_desc **to,
struct vidtv_psi_desc *desc);
/**
* vidtv_psi_pmt_desc_assign - Assigns a descriptor loop at some point in a PMT section.
* vidtv_pmt_desc_assign - Assigns a descriptor loop at some point in a PMT section.
* @pmt: The PMT section that will contain the descriptor loop
* @to: Where in the PMT to assign this descriptor loop to
* @desc: The descriptor loop that will be assigned.
@ -434,7 +434,7 @@ void vidtv_pmt_desc_assign(struct vidtv_psi_table_pmt *pmt,
struct vidtv_psi_desc *desc);
/**
* vidtv_psi_sdt_desc_assign - Assigns a descriptor loop at some point in a SDT.
* vidtv_sdt_desc_assign - Assigns a descriptor loop at some point in a SDT.
* @sdt: The SDT that will contain the descriptor loop
* @to: Where in the PMT to assign this descriptor loop to
* @desc: The descriptor loop that will be assigned.
@ -474,7 +474,7 @@ void vidtv_psi_pmt_stream_assign(struct vidtv_psi_table_pmt *pmt,
struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc);
/**
* vidtv_psi_create_sec_for_each_pat_entry - Create a PMT section for each
* vidtv_psi_pmt_create_sec_for_each_pat_entry - Create a PMT section for each
* program found in the PAT
* @pat: The PAT to look for programs.
* @pcr_pid: packet ID for the PCR to be used for the program described in this
@ -743,7 +743,7 @@ struct vidtv_psi_table_eit {
struct vidtv_psi_table_eit
*vidtv_psi_eit_table_init(u16 network_id,
u16 transport_stream_id,
u16 service_id);
__be16 service_id);
/**
* struct vidtv_psi_eit_write_args - Arguments for writing an EIT section

View File

@ -467,8 +467,10 @@ struct vidtv_encoder
e->is_video_encoder = false;
ctx = kzalloc(priv_sz, GFP_KERNEL);
if (!ctx)
if (!ctx) {
kfree(e);
return NULL;
}
e->ctx = ctx;
ctx->last_duration = 0;

View File

@ -44,7 +44,7 @@ struct vidtv_mpeg_ts {
u8 adaptation_field:1;
u8 scrambling:2;
} __packed;
struct vidtv_mpeg_ts_adaption adaption[];
struct vidtv_mpeg_ts_adaption *adaption;
} __packed;
/**

View File

@ -745,6 +745,19 @@ const struct bond_option *bond_opt_get(unsigned int option)
return &bond_opts[option];
}
static void bond_set_xfrm_features(struct net_device *bond_dev, u64 mode)
{
if (!IS_ENABLED(CONFIG_XFRM_OFFLOAD))
return;
if (mode == BOND_MODE_ACTIVEBACKUP)
bond_dev->wanted_features |= BOND_XFRM_FEATURES;
else
bond_dev->wanted_features &= ~BOND_XFRM_FEATURES;
netdev_update_features(bond_dev);
}
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@ -767,13 +780,8 @@ static int bond_option_mode_set(struct bonding *bond,
if (newval->value == BOND_MODE_ALB)
bond->params.tlb_dynamic_lb = 1;
#ifdef CONFIG_XFRM_OFFLOAD
if (newval->value == BOND_MODE_ACTIVEBACKUP)
bond->dev->wanted_features |= BOND_XFRM_FEATURES;
else
bond->dev->wanted_features &= ~BOND_XFRM_FEATURES;
netdev_change_features(bond->dev);
#endif /* CONFIG_XFRM_OFFLOAD */
if (bond->dev->reg_state == NETREG_REGISTERED)
bond_set_xfrm_features(bond->dev, newval->value);
/* don't cache arp_validate between modes */
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;

View File

@ -382,8 +382,13 @@ static int softing_netdev_open(struct net_device *ndev)
/* check or determine and set bittime */
ret = open_candev(ndev);
if (!ret)
ret = softing_startstop(ndev, 1);
if (ret)
return ret;
ret = softing_startstop(ndev, 1);
if (ret < 0)
close_candev(ndev);
return ret;
}

View File

@ -569,7 +569,6 @@ static int felix_setup(struct dsa_switch *ds)
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
int port, err;
int tc;
err = felix_init_structs(felix, ds->num_ports);
if (err)
@ -608,12 +607,6 @@ static int felix_setup(struct dsa_switch *ds)
ocelot_write_rix(ocelot,
ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
ANA_PGID_PGID, PGID_UC);
/* Setup the per-traffic class flooding PGIDs */
for (tc = 0; tc < FELIX_NUM_TC; tc++)
ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
ANA_FLOODING_FLD_UNICAST(PGID_UC),
ANA_FLOODING, tc);
ds->mtu_enforcement_ingress = true;
ds->configure_vlan_while_not_filtering = true;

View File

@ -1429,6 +1429,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, felix);
ocelot = &felix->ocelot;
ocelot->dev = &pdev->dev;
ocelot->num_flooding_pgids = FELIX_NUM_TC;
felix->info = &felix_info_vsc9959;
felix->switch_base = pci_resource_start(pdev,
felix->info->switch_pci_bar);

View File

@ -1210,6 +1210,7 @@ static int seville_probe(struct platform_device *pdev)
ocelot = &felix->ocelot;
ocelot->dev = &pdev->dev;
ocelot->num_flooding_pgids = 1;
felix->info = &seville_info_vsc9953;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

View File

@ -21,6 +21,7 @@ config ET131X
tristate "Agere ET-1310 Gigabit Ethernet support"
depends on PCI
select PHYLIB
select CRC32
help
This driver supports Agere ET-1310 ethernet adapters.

View File

@ -23,6 +23,7 @@ config MACB
tristate "Cadence MACB/GEM support"
depends on HAS_DMA && COMMON_CLK
select PHYLINK
select CRC32
help
The Cadence MACB ethernet interface is found on many Atmel AT32 and
AT91 parts. This driver also supports the Cadence GEM (Gigabit

View File

@ -987,9 +987,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
u32 ctrl, iplen, maclen;
#if IS_ENABLED(CONFIG_IPV6)
struct ipv6hdr *ip6;
#endif
unsigned int ndesc;
struct tcphdr *tcp;
int len16, pktlen;
@ -1043,17 +1041,15 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
cpl->len = htons(pktlen);
memcpy(buf, skb->data, pktlen);
if (tx_info->ip_family == AF_INET) {
if (!IS_ENABLED(CONFIG_IPV6) || tx_info->ip_family == AF_INET) {
/* we need to correct ip header len */
ip = (struct iphdr *)(buf + maclen);
ip->tot_len = htons(pktlen - maclen);
cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
#if IS_ENABLED(CONFIG_IPV6)
} else {
ip6 = (struct ipv6hdr *)(buf + maclen);
ip6->payload_len = htons(pktlen - maclen - iplen);
cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
#endif
}
cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |

View File

@ -33,6 +33,7 @@ config FTGMAC100
depends on !64BIT || BROKEN
select PHYLIB
select MDIO_ASPEED if MACH_ASPEED_G6
select CRC32
help
This driver supports the FTGMAC100 Gigabit Ethernet controller
from Faraday. It is used on Faraday A369, Andes AG102 and some

View File

@ -25,6 +25,7 @@ config FEC
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
select CRC32
select PHYLIB
imply PTP_1588_CLOCK
help

View File

@ -269,6 +269,7 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac,
if (!of_device_is_available(node)) {
netdev_err(mac->net_dev, "pcs-handle node not available\n");
of_node_put(node);
return -ENODEV;
}

View File

@ -143,8 +143,8 @@ static const struct {
{ ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
{ ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
{ ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
{ ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" },
{ ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" },
{ ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" },
{ ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
{ ENETC_PM0_ROVR, "MAC rx oversized packets" },
{ ENETC_PM0_RJBR, "MAC rx jabber packets" },
{ ENETC_PM0_RFRG, "MAC rx fragment packets" },
@ -163,9 +163,13 @@ static const struct {
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
{ ENETC_PM0_TPKT, "MAC tx packets" },
{ ENETC_PM0_TUND, "MAC tx undersized packets" },
{ ENETC_PM0_T64, "MAC tx 64 byte packets" },
{ ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
{ ENETC_PM0_T255, "MAC tx 128-255 byte packets" },
{ ENETC_PM0_T511, "MAC tx 256-511 byte packets" },
{ ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
{ ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" },
{ ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" },
{ ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
{ ENETC_PM0_TCNP, "MAC tx control packets" },
{ ENETC_PM0_TDFR, "MAC tx deferred packets" },
{ ENETC_PM0_TMCOL, "MAC tx multiple collisions" },

View File

@ -267,8 +267,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_R255 0x8180
#define ENETC_PM0_R511 0x8188
#define ENETC_PM0_R1023 0x8190
#define ENETC_PM0_R1518 0x8198
#define ENETC_PM0_R1519X 0x81A0
#define ENETC_PM0_R1522 0x8198
#define ENETC_PM0_R1523X 0x81A0
#define ENETC_PM0_ROVR 0x81A8
#define ENETC_PM0_RJBR 0x81B0
#define ENETC_PM0_RFRG 0x81B8
@ -287,9 +287,13 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_TBCA 0x8250
#define ENETC_PM0_TPKT 0x8260
#define ENETC_PM0_TUND 0x8268
#define ENETC_PM0_T64 0x8270
#define ENETC_PM0_T127 0x8278
#define ENETC_PM0_T255 0x8280
#define ENETC_PM0_T511 0x8288
#define ENETC_PM0_T1023 0x8290
#define ENETC_PM0_T1518 0x8298
#define ENETC_PM0_T1522 0x8298
#define ENETC_PM0_T1523X 0x82A0
#define ENETC_PM0_TCNP 0x82C0
#define ENETC_PM0_TDFR 0x82D0
#define ENETC_PM0_TMCOL 0x82D8

View File

@ -4,6 +4,7 @@ config FSL_FMAN
depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
select GENERIC_ALLOCATOR
select PHYLIB
select CRC32
default n
help
Freescale Data-Path Acceleration Architecture Frame Manager

View File

@ -35,8 +35,6 @@
#define HCLGE_DBG_DFX_SSU_2_OFFSET 12
#pragma pack(1)
struct hclge_qos_pri_map_cmd {
u8 pri0_tc : 4,
pri1_tc : 4;
@ -85,8 +83,6 @@ struct hclge_dbg_reg_type_info {
struct hclge_dbg_reg_common_msg reg_msg;
};
#pragma pack()
static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},

View File

@ -6475,13 +6475,13 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
/* Ungate PGCB clock */
mac_data = er32(FEXTNVM9);
mac_data |= BIT(28);
mac_data &= ~BIT(28);
ew32(FEXTNVM9, mac_data);
/* Enable K1 off to enable mPHY Power Gating */
mac_data = er32(FEXTNVM6);
mac_data |= BIT(31);
ew32(FEXTNVM12, mac_data);
ew32(FEXTNVM6, mac_data);
/* Enable mPHY power gating for any link and speed */
mac_data = er32(FEXTNVM8);
@ -6525,11 +6525,11 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
/* Disable K1 off */
mac_data = er32(FEXTNVM6);
mac_data &= ~BIT(31);
ew32(FEXTNVM12, mac_data);
ew32(FEXTNVM6, mac_data);
/* Disable Ungate PGCB clock */
mac_data = er32(FEXTNVM9);
mac_data &= ~BIT(28);
mac_data |= BIT(28);
ew32(FEXTNVM9, mac_data);
/* Cancel not waking from dynamic

View File

@ -1850,6 +1850,7 @@ static inline bool i40e_page_is_reusable(struct page *page)
* the adapter for another receive
*
* @rx_buffer: buffer containing the page
* @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
*
* If page is reusable, rx_buffer->page_offset is adjusted to point to
* an unused region in the page.
@ -1872,7 +1873,8 @@ static inline bool i40e_page_is_reusable(struct page *page)
*
* In either case, if the page is reusable its refcount is increased.
**/
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@ -1883,7 +1885,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely((page_count(page) - pagecnt_bias) > 1))
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define I40E_LAST_OFFSET \
@ -1942,16 +1944,24 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
* i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* @rx_ring: rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
* @rx_buffer_pgcnt: buffer page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
const unsigned int size)
const unsigned int size,
int *rx_buffer_pgcnt)
{
struct i40e_rx_buffer *rx_buffer;
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
*rx_buffer_pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buffer->page);
#else
0;
#endif
prefetch_page_address(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
@ -2102,14 +2112,16 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* i40e_put_rx_buffer - Clean up used buffer and either recycle or free
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
* @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
*
* This function will clean up the contents of the rx_buffer. It will
* either recycle the buffer or unmap it and free the associated resources.
*/
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer)
struct i40e_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
if (i40e_can_reuse_rx_page(rx_buffer)) {
if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
} else {
@ -2336,6 +2348,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
int rx_buffer_pgcnt;
unsigned int size;
u64 qword;
@ -2378,7 +2391,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
break;
i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = i40e_get_rx_buffer(rx_ring, size);
rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
@ -2421,7 +2434,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
break;
}
i40e_put_rx_buffer(rx_ring, rx_buffer);
i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb))

View File

@ -762,13 +762,15 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
/**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page
* @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed
*/
static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
static bool
ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
{
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@ -779,7 +781,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely((page_count(page) - pagecnt_bias) > 1))
if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define ICE_LAST_OFFSET \
@ -864,17 +866,24 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
* @rx_ring: Rx descriptor ring to transact packets on
* @skb: skb to be used
* @size: size of buffer to add to skb
* @rx_buf_pgcnt: rx_buf page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct ice_rx_buf *
ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
const unsigned int size)
const unsigned int size, int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
*rx_buf_pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buf->page);
#else
0;
#endif
prefetchw(rx_buf->page);
*skb = rx_buf->skb;
@ -1006,12 +1015,15 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* ice_put_rx_buf - Clean up used buffer and either recycle or free
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
* @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
*
* This function will update next_to_clean and then clean up the contents
* of the rx_buf. It will either recycle the buffer or unmap it and free
* the associated resources.
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
static void
ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
int rx_buf_pgcnt)
{
u16 ntc = rx_ring->next_to_clean + 1;
@ -1022,7 +1034,7 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (!rx_buf)
return;
if (ice_can_reuse_rx_page(rx_buf)) {
if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
} else {
@ -1097,6 +1109,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
int rx_buf_pgcnt;
u16 vlan_tag = 0;
u8 rx_ptype;
@ -1119,7 +1132,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
dma_rmb();
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
ice_put_rx_buf(rx_ring, NULL);
ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++;
continue;
}
@ -1128,7 +1141,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
if (!size) {
xdp.data = NULL;
@ -1168,7 +1181,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
total_rx_pkts++;
cleaned_count++;
ice_put_rx_buf(rx_ring, rx_buf);
ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
continue;
construct_skb:
if (skb) {
@ -1187,7 +1200,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
break;
}
ice_put_rx_buf(rx_ring, rx_buf);
ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
cleaned_count++;
/* skip if it is NOP desc */

View File

@ -138,6 +138,8 @@ struct vf_mac_filter {
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
#define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_1536 1536
@ -247,6 +249,9 @@ enum igb_tx_flags {
#define IGB_SFF_ADDRESSING_MODE 0x4
#define IGB_SFF_8472_UNSUP 0x00
/* TX resources are shared between XDP and netstack
* and we need to tag the buffer type to distinguish them
*/
enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,

View File

@ -2824,20 +2824,25 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
{
int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
struct igb_adapter *adapter = netdev_priv(dev);
struct bpf_prog *prog = bpf->prog, *old_prog;
bool running = netif_running(dev);
struct bpf_prog *old_prog;
bool need_reset;
/* verify igb ring attributes are sufficient for XDP */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
if (frame_size > igb_rx_bufsz(ring))
if (frame_size > igb_rx_bufsz(ring)) {
NL_SET_ERR_MSG_MOD(bpf->extack,
"The RX buffer size is too small for the frame size");
netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
igb_rx_bufsz(ring), frame_size);
return -EINVAL;
}
}
old_prog = xchg(&adapter->xdp_prog, prog);
@ -2869,7 +2874,7 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
return igb_xdp_setup(dev, xdp->prog);
return igb_xdp_setup(dev, xdp);
default:
return -EINVAL;
}
@ -2910,10 +2915,12 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
*/
tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return -ENXIO;
return IGB_XDP_CONSUMED;
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
nq->trans_start = jiffies;
ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
__netif_tx_unlock(nq);
@ -2946,6 +2953,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
nq->trans_start = jiffies;
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
@ -3950,8 +3960,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
/* set default work limits */
adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
VLAN_HLEN;
adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
spin_lock_init(&adapter->nfc_lock);
@ -6491,7 +6500,7 @@ static void igb_get_stats64(struct net_device *netdev,
static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
if (adapter->xdp_prog) {
int i;
@ -6500,7 +6509,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_ring *ring = adapter->rx_ring[i];
if (max_frame > igb_rx_bufsz(ring)) {
netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n");
netdev_warn(adapter->netdev,
"Requested MTU size is not supported with XDP. Max frame size is %d\n",
max_frame);
return -EINVAL;
}
}
@ -8351,6 +8362,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
SKB_DATA_ALIGN(xdp->data_end -
xdp->data_hard_start);
#endif
unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
/* prefetch first cache line of first page */
@ -8365,6 +8377,9 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data);
if (metasize)
skb_metadata_set(skb, metasize);
/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
@ -8771,7 +8786,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rx_ring->skb = skb;
if (xdp_xmit & IGB_XDP_REDIR)
xdp_do_flush_map();
xdp_do_flush();
if (xdp_xmit & IGB_XDP_TX) {
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);

View File

@ -1945,7 +1945,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@ -1956,7 +1957,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
/* The last offset is a bit aggressive in that we assume the
@ -2021,11 +2022,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff **skb,
const unsigned int size)
const unsigned int size,
int *rx_buffer_pgcnt)
{
struct ixgbe_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
*rx_buffer_pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buffer->page);
#else
0;
#endif
prefetchw(rx_buffer->page);
*skb = rx_buffer->skb;
@ -2055,9 +2063,10 @@ static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
struct sk_buff *skb)
struct sk_buff *skb,
int rx_buffer_pgcnt)
{
if (ixgbe_can_reuse_rx_page(rx_buffer)) {
if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
@ -2303,6 +2312,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
int rx_buffer_pgcnt;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
@ -2322,7 +2332,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
dma_rmb();
rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
@ -2367,7 +2377,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
break;
}
ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
cleaned_count++;
/* place incomplete frames back on ring for completion */

View File

@ -318,8 +318,10 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_port_init;
}
if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX)
if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) {
err = -EINVAL;
goto err_port_init;
}
/* firmware requires that port's MAC address consist of the first
* 5 bytes of the base MAC address

View File

@ -1378,8 +1378,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
tx_ring->cons, tx_ring->prod);
priv->port_stats.tx_timeout++;
en_dbg(DRV, priv, "Scheduling watchdog\n");
queue_work(mdev->workqueue, &priv->watchdog_task);
if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
en_dbg(DRV, priv, "Scheduling port restart\n");
queue_work(mdev->workqueue, &priv->restart_task);
}
}
@ -1733,6 +1735,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
if (t != TX_XDP) {
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
tx_ring->recycle_ring = NULL;
@ -1829,6 +1832,7 @@ int mlx4_en_start_port(struct net_device *dev)
local_bh_enable();
}
clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@ -1999,7 +2003,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
static void mlx4_en_restart(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
watchdog_task);
restart_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
@ -2377,7 +2381,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (netif_running(dev)) {
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
/* NIC is probably restarting - let watchdog task reset
/* NIC is probably restarting - let restart task reset
* the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
@ -2386,7 +2390,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (err) {
en_err(priv, "Failed restarting port:%d\n",
priv->port);
queue_work(mdev->workqueue, &priv->watchdog_task);
if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
&priv->state))
queue_work(mdev->workqueue, &priv->restart_task);
}
}
mutex_unlock(&mdev->state_lock);
@ -2792,7 +2798,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
if (err) {
en_err(priv, "Failed starting port %d for XDP change\n",
priv->port);
queue_work(mdev->workqueue, &priv->watchdog_task);
if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
queue_work(mdev->workqueue, &priv->restart_task);
}
}
@ -3165,7 +3172,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->restart_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);

View File

@ -392,6 +392,35 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
u16 cqe_index, struct mlx4_en_tx_ring *ring)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_info *tx_info;
struct mlx4_en_tx_desc *tx_desc;
u16 wqe_index;
int desc_size;
en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
false);
wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
tx_info = &ring->tx_info[wqe_index];
desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
wqe_index, desc_size);
tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
return;
en_err(priv, "Scheduling port restart\n");
queue_work(mdev->workqueue, &priv->restart_task);
}
int mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq, int napi_budget)
{
@ -438,13 +467,10 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
dma_rmb();
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
cqe_err->vendor_err_syndrome,
cqe_err->syndrome);
}
MLX4_CQE_OPCODE_ERROR))
if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
ring);
/* Skip over last polled CQE */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;

View File

@ -271,6 +271,10 @@ struct mlx4_en_page_cache {
} buf[MLX4_EN_CACHE_SIZE];
};
enum {
MLX4_EN_TX_RING_STATE_RECOVERING,
};
struct mlx4_en_priv;
struct mlx4_en_tx_ring {
@ -317,6 +321,7 @@ struct mlx4_en_tx_ring {
* Only queue_stopped might be used if BQL is not properly working.
*/
unsigned long queue_stopped;
unsigned long state;
struct mlx4_hwq_resources sp_wqres;
struct mlx4_qp sp_qp;
struct mlx4_qp_context sp_context;
@ -530,6 +535,10 @@ struct mlx4_en_stats_bitmap {
struct mutex mutex; /* for mutual access to stats bitmap */
};
enum {
MLX4_EN_STATE_FLAG_RESTARTING,
};
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@ -595,7 +604,7 @@ struct mlx4_en_priv {
struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
struct work_struct watchdog_task;
struct work_struct restart_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
@ -641,6 +650,7 @@ struct mlx4_en_priv {
u32 pflags;
u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
u8 rss_hash_fn;
unsigned long state;
};
enum mlx4_en_wol {

View File

@ -198,6 +198,7 @@ config MLX5_EN_TLS
config MLX5_SW_STEERING
bool "Mellanox Technologies software-managed steering"
depends on MLX5_CORE_EN && MLX5_ESWITCH
select CRC32
default y
help
Build support for software-managed steering in the NIC.

View File

@ -47,6 +47,7 @@ config LAN743X
depends on PCI
select PHYLIB
select CRC16
select CRC32
help
Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip

View File

@ -1489,10 +1489,11 @@ int ocelot_init(struct ocelot *ocelot)
SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
/* Setup flooding PGIDs */
ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
ANA_FLOODING_FLD_UNICAST(PGID_UC),
ANA_FLOODING, 0);
for (i = 0; i < ocelot->num_flooding_pgids; i++)
ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
ANA_FLOODING_FLD_UNICAST(PGID_UC),
ANA_FLOODING, i);
ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |

View File

@ -1254,6 +1254,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
}
ocelot->num_phys_ports = of_get_child_count(ports);
ocelot->num_flooding_pgids = 1;
ocelot->vcap = vsc7514_vcap_props;
ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE;

View File

@ -22,6 +22,7 @@ config NFP
depends on VXLAN || VXLAN=n
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
select CRC32
help
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both

View File

@ -3562,9 +3562,6 @@ static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
struct nfp_net_dp *dp;
int err;
if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
return -EBUSY;
if (!prog == !nn->dp.xdp_prog) {
WRITE_ONCE(nn->dp.xdp_prog, prog);
xdp_attachment_setup(&nn->xdp, bpf);
@ -3593,9 +3590,6 @@ static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
{
int err;
if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
return -EBUSY;
err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
if (err)
return err;

View File

@ -3,6 +3,7 @@ config LPC_ENET
tristate "NXP ethernet MAC on LPC devices"
depends on ARCH_LPC32XX || COMPILE_TEST
select PHYLIB
select CRC32
help
Say Y or M here if you want to use the NXP ethernet MAC included on
some NXP LPC devices. You can safely enable this option for LPC32xx

View File

@ -19,6 +19,7 @@ if NET_VENDOR_ROCKER
config ROCKER
tristate "Rocker switch driver (EXPERIMENTAL)"
depends on PCI && NET_SWITCHDEV && BRIDGE
select CRC32
help
This driver supports Rocker switch device.

View File

@ -246,13 +246,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
goto err_parse_dt;
}
ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(dwmac->ops->addr_width));
if (ret) {
dev_err(&pdev->dev, "DMA mask set failed\n");
goto err_dma_mask;
}
plat_dat->addr64 = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
@ -272,7 +266,6 @@ static int imx_dwmac_probe(struct platform_device *pdev)
err_dwmac_init:
err_drv_probe:
imx_dwmac_exit(pdev, plat_dat->bsp_priv);
err_dma_mask:
err_parse_dt:
err_match_data:
stmmac_remove_config_dt(pdev, plat_dat);

View File

@ -30,7 +30,6 @@
#define PRG_ETH0_EXT_RMII_MODE 4
/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
/* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one
@ -155,8 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
return -ENOMEM;
clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK);
clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >>
clk_configs->m250_mux.shift;
clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parents,
ARRAY_SIZE(mux_parents), &clk_mux_ops,
&clk_configs->m250_mux.hw);

View File

@ -22,7 +22,7 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
!(value & DMA_BUS_MODE_SFT_RESET),
10000, 100000);
10000, 1000000);
}
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)

View File

@ -1533,6 +1533,19 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
stmmac_free_tx_buffer(priv, queue, i);
}
/**
* stmmac_free_tx_skbufs - free TX skb buffers
* @priv: private structure
*/
static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
{
u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
u32 queue;
for (queue = 0; queue < tx_queue_cnt; queue++)
dma_free_tx_skbufs(priv, queue);
}
/**
* free_dma_rx_desc_resources - free RX dma desc resources
* @priv: private structure
@ -2895,9 +2908,6 @@ static int stmmac_release(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
if (priv->eee_enabled)
del_timer_sync(&priv->eee_ctrl_timer);
if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
@ -2916,6 +2926,11 @@ static int stmmac_release(struct net_device *dev)
if (priv->lpi_irq > 0)
free_irq(priv->lpi_irq, dev);
if (priv->eee_enabled) {
priv->tx_path_in_lpi_mode = false;
del_timer_sync(&priv->eee_ctrl_timer);
}
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
@ -4930,6 +4945,14 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "SPH feature enabled\n");
}
/* The current IP register MAC_HW_Feature1[ADDR64] only define
* 32/40/64 bit width, but some SOC support others like i.MX8MP
* support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
* So overwrite dma_cap.addr64 according to HW real design.
*/
if (priv->plat->addr64)
priv->dma_cap.addr64 = priv->plat->addr64;
if (priv->dma_cap.addr64) {
ret = dma_set_mask_and_coherent(device,
DMA_BIT_MASK(priv->dma_cap.addr64));
@ -5142,6 +5165,11 @@ int stmmac_suspend(struct device *dev)
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
del_timer_sync(&priv->tx_queue[chan].txtimer);
if (priv->eee_enabled) {
priv->tx_path_in_lpi_mode = false;
del_timer_sync(&priv->eee_ctrl_timer);
}
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);
@ -5247,11 +5275,20 @@ int stmmac_resume(struct device *dev)
return ret;
}
if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
rtnl_lock();
phylink_start(priv->phylink);
/* We may have called phylink_speed_down before */
phylink_speed_up(priv->phylink);
rtnl_unlock();
}
rtnl_lock();
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);
@ -5265,14 +5302,6 @@ int stmmac_resume(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_unlock();
if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
rtnl_lock();
phylink_start(priv->phylink);
/* We may have called phylink_speed_down before */
phylink_speed_up(priv->phylink);
rtnl_unlock();
}
phylink_mac_change(priv->phylink, true);
netif_device_attach(ndev);

View File

@ -1265,9 +1265,6 @@ static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
if (!priv->xdpi.prog && !prog)
return 0;
if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
return -EBUSY;
WRITE_ONCE(priv->xdp_prog, prog);
xdp_attachment_setup(&priv->xdpi, bpf);

View File

@ -1351,7 +1351,6 @@ static int temac_probe(struct platform_device *pdev)
struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
struct temac_local *lp;
struct net_device *ndev;
struct resource *res;
const void *addr;
__be32 *p;
bool little_endian;
@ -1500,13 +1499,11 @@ static int temac_probe(struct platform_device *pdev)
of_node_put(dma_np);
} else if (pdata) {
/* 2nd memory resource specifies DMA registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!lp->sdma_regs) {
lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(lp->sdma_regs)) {
dev_err(&pdev->dev,
"could not map DMA registers\n");
return -ENOMEM;
return PTR_ERR(lp->sdma_regs);
}
if (pdata->dma_little_endian) {
lp->dma_in = temac_dma_in32_le;

View File

@ -257,21 +257,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
skb_dst_set(skb, &tun_dst->dst);
/* Ignore packet loops (and multicast echo) */
if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
goto rx_error;
switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
if (pskb_may_pull(skb, sizeof(struct iphdr)))
goto rx_error;
break;
case htons(ETH_P_IPV6):
if (pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto rx_error;
break;
default:
goto rx_error;
if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
geneve->dev->stats.rx_errors++;
goto drop;
}
oiph = skb_network_header(skb);
skb_reset_network_header(skb);
@ -308,8 +298,6 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
dev_sw_netstats_rx_add(geneve->dev, len);
return;
rx_error:
geneve->dev->stats.rx_errors++;
drop:
/* Consume bad packet */
kfree_skb(skb);

View File

@ -156,6 +156,9 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
/* The allocator will give us a power-of-2 number of pages. But we
* can't guarantee that, so request it. That way we won't waste any
* memory that would be available beyond the required space.
*
* Note that gsi_trans_pool_exit_dma() assumes the total allocated
* size is exactly (count * size).
*/
total_size = get_order(total_size) << PAGE_SHIFT;
@ -175,7 +178,9 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
{
dma_free_coherent(dev, pool->size, pool->base, pool->addr);
size_t total_size = pool->count * pool->size;
dma_free_coherent(dev, total_size, pool->base, pool->addr);
memset(pool, 0, sizeof(*pool));
}

View File

@ -63,15 +63,20 @@ static int
nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
{
struct nsim_bpf_bound_prog *state;
int ret = 0;
state = env->prog->aux->offload->dev_priv;
if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx)
msleep(state->nsim_dev->bpf_bind_verifier_delay);
if (insn_idx == env->prog->len - 1)
if (insn_idx == env->prog->len - 1) {
pr_vlog(env, "Hello from netdevsim!\n");
return 0;
if (!state->nsim_dev->bpf_bind_verifier_accept)
ret = -EOPNOTSUPP;
}
return ret;
}
static int nsim_bpf_finalize(struct bpf_verifier_env *env)
@ -190,9 +195,6 @@ nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf,
{
int err;
if (!xdp_attachment_flags_ok(xdp, bpf))
return -EBUSY;
if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
return -EOPNOTSUPP;
@ -598,6 +600,9 @@ int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
&nsim_dev->bpf_bind_accept);
debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev->ddir,
&nsim_dev->bpf_bind_verifier_delay);
nsim_dev->bpf_bind_verifier_accept = true;
debugfs_create_bool("bpf_bind_verifier_accept", 0600, nsim_dev->ddir,
&nsim_dev->bpf_bind_verifier_accept);
return 0;
}

View File

@ -189,6 +189,7 @@ struct nsim_dev {
struct dentry *take_snapshot;
struct bpf_offload_dev *bpf_dev;
bool bpf_bind_accept;
bool bpf_bind_verifier_accept;
u32 bpf_bind_verifier_delay;
struct dentry *ddir_bpf_bound_progs;
u32 prog_id_gen;

View File

@ -1315,11 +1315,17 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
int orig_iif = skb->skb_iif;
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
bool is_ndisc = ipv6_ndisc_frame(skb);
bool is_ll_src;
/* loopback, multicast & non-ND link-local traffic; do not push through
* packet taps again. Reset pkt_type for upper layers to process skb
* packet taps again. Reset pkt_type for upper layers to process skb.
* for packets with lladdr src, however, skip so that the dst can be
* determine at input using original ifindex in the case that daddr
* needs strict
*/
if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL;
if (skb->pkt_type == PACKET_LOOPBACK ||
(need_strict && !is_ndisc && !is_ll_src)) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
IP6CB(skb)->flags |= IP6SKB_L3SLAVE;

View File

@ -205,3 +205,12 @@ config NFS_DISABLE_UDP_SUPPORT
Choose Y here to disable the use of NFS over UDP. NFS over UDP
on modern networks (1Gb+) can lead to data corruption caused by
fragmentation during high loads.
config NFS_V4_2_READ_PLUS
bool "NFS: Enable support for the NFSv4.2 READ_PLUS operation"
depends on NFS_V4_2
default n
help
This is intended for developers only. The READ_PLUS operation has
been shown to have issues under specific conditions and should not
be used in production.

View File

@ -838,7 +838,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs_pgio_mirror *pgm;
struct nfs4_ff_layout_mirror *mirror;
struct nfs4_pnfs_ds *ds;
u32 ds_idx, i;
u32 ds_idx;
retry:
ff_layout_pg_check_layout(pgio, req);
@ -864,11 +864,9 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
goto retry;
}
for (i = 0; i < pgio->pg_mirror_count; i++) {
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
pgm = &pgio->pg_mirrors[i];
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
}
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
pgm = &pgio->pg_mirrors[0];
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
pgio->pg_mirror_idx = ds_idx;
@ -985,6 +983,21 @@ ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
return 1;
}
static u32
ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
{
u32 old = desc->pg_mirror_idx;
desc->pg_mirror_idx = idx;
return old;
}
static struct nfs_pgio_mirror *
ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
{
return &desc->pg_mirrors[idx];
}
static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
.pg_init = ff_layout_pg_init_read,
.pg_test = pnfs_generic_pg_test,
@ -998,6 +1011,8 @@ static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
.pg_doio = pnfs_generic_pg_writepages,
.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
.pg_cleanup = pnfs_generic_pg_cleanup,
.pg_get_mirror = ff_layout_pg_get_mirror_write,
.pg_set_mirror = ff_layout_pg_set_mirror_write,
};
static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)

View File

@ -1241,12 +1241,13 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
.rpc_resp = &res,
};
u32 xdrlen;
int ret, np;
int ret, np, i;
ret = -ENOMEM;
res.scratch = alloc_page(GFP_KERNEL);
if (!res.scratch)
return -ENOMEM;
goto out;
xdrlen = nfs42_listxattr_xdrsize(buflen);
if (xdrlen > server->lxasize)
@ -1254,9 +1255,12 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
np = xdrlen / PAGE_SIZE + 1;
pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL);
if (pages == NULL) {
__free_page(res.scratch);
return -ENOMEM;
if (!pages)
goto out_free_scratch;
for (i = 0; i < np; i++) {
pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i])
goto out_free_pages;
}
arg.xattr_pages = pages;
@ -1271,14 +1275,15 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
*eofp = res.eof;
}
out_free_pages:
while (--np >= 0) {
if (pages[np])
__free_page(pages[np]);
}
__free_page(res.scratch);
kfree(pages);
out_free_scratch:
__free_page(res.scratch);
out:
return ret;
}

View File

@ -1528,7 +1528,6 @@ static void nfs4_xdr_enc_listxattrs(struct rpc_rqst *req,
rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->count,
hdr.replen);
req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES;
encode_nops(&hdr);
}

View File

@ -377,10 +377,10 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
goto out_stateowner;
set_bit(NFS_SRV_SSC_COPY_STATE, &ctx->state->flags);
set_bit(NFS_OPEN_STATE, &ctx->state->flags);
memcpy(&ctx->state->open_stateid.other, &stateid->other,
NFS4_STATEID_OTHER_SIZE);
update_open_stateid(ctx->state, stateid, NULL, filep->f_mode);
set_bit(NFS_OPEN_STATE, &ctx->state->flags);
nfs_file_set_open_context(filep, ctx);
put_nfs_open_context(ctx);

View File

@ -5309,7 +5309,7 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
nfs4_read_done_cb(task, hdr);
}
#ifdef CONFIG_NFS_V4_2
#if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
static void nfs42_read_plus_support(struct nfs_server *server, struct rpc_message *msg)
{
if (server->caps & NFS_CAP_READ_PLUS)

View File

@ -31,13 +31,29 @@
static struct kmem_cache *nfs_page_cachep;
static const struct rpc_call_ops nfs_pgio_common_ops;
static struct nfs_pgio_mirror *
nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
{
if (desc->pg_ops->pg_get_mirror)
return desc->pg_ops->pg_get_mirror(desc, idx);
return &desc->pg_mirrors[0];
}
struct nfs_pgio_mirror *
nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
{
return &desc->pg_mirrors[desc->pg_mirror_idx];
return nfs_pgio_get_mirror(desc, desc->pg_mirror_idx);
}
EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
static u32
nfs_pgio_set_current_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
{
if (desc->pg_ops->pg_set_mirror)
return desc->pg_ops->pg_set_mirror(desc, idx);
return desc->pg_mirror_idx;
}
void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr,
void (*release)(struct nfs_pgio_header *hdr))
@ -1259,7 +1275,7 @@ static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
return;
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
mirror = &desc->pg_mirrors[midx];
mirror = nfs_pgio_get_mirror(desc, midx);
desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
desc->pg_error);
}
@ -1293,12 +1309,12 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
goto out_failed;
}
desc->pg_mirror_idx = midx;
nfs_pgio_set_current_mirror(desc, midx);
if (!nfs_pageio_add_request_mirror(desc, dupreq))
goto out_cleanup_subreq;
}
desc->pg_mirror_idx = 0;
nfs_pgio_set_current_mirror(desc, 0);
if (!nfs_pageio_add_request_mirror(desc, req))
goto out_failed;
@ -1320,10 +1336,12 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
u32 mirror_idx)
{
struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
u32 restore_idx = desc->pg_mirror_idx;
struct nfs_pgio_mirror *mirror;
u32 restore_idx;
restore_idx = nfs_pgio_set_current_mirror(desc, mirror_idx);
mirror = nfs_pgio_current_mirror(desc);
desc->pg_mirror_idx = mirror_idx;
for (;;) {
nfs_pageio_doio(desc);
if (desc->pg_error < 0 || !mirror->pg_recoalesce)
@ -1331,7 +1349,7 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
if (!nfs_do_recoalesce(desc))
break;
}
desc->pg_mirror_idx = restore_idx;
nfs_pgio_set_current_mirror(desc, restore_idx);
}
/*
@ -1405,7 +1423,7 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
u32 midx;
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
mirror = &desc->pg_mirrors[midx];
mirror = nfs_pgio_get_mirror(desc, midx);
if (!list_empty(&mirror->pg_list)) {
prev = nfs_list_entry(mirror->pg_list.prev);
if (index != prev->wb_index + 1) {

View File

@ -227,7 +227,7 @@ struct xt_table {
unsigned int valid_hooks;
/* Man behind the curtain... */
struct xt_table_info *private;
struct xt_table_info __rcu *private;
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
@ -448,6 +448,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
struct xt_table_info
*xt_table_get_private_protected(const struct xt_table *table);
#ifdef CONFIG_COMPAT
#include <net/compat.h>

View File

@ -55,6 +55,7 @@ struct nfs_page {
unsigned short wb_nio; /* Number of I/O attempts */
};
struct nfs_pgio_mirror;
struct nfs_pageio_descriptor;
struct nfs_pageio_ops {
void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *);
@ -64,6 +65,9 @@ struct nfs_pageio_ops {
unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *,
struct nfs_page *);
void (*pg_cleanup)(struct nfs_pageio_descriptor *);
struct nfs_pgio_mirror *
(*pg_get_mirror)(struct nfs_pageio_descriptor *, u32);
u32 (*pg_set_mirror)(struct nfs_pageio_descriptor *, u32);
};
struct nfs_rw_ops {

View File

@ -867,7 +867,7 @@ static inline int security_inode_killpriv(struct dentry *dentry)
static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
{
return -EOPNOTSUPP;
return cap_inode_getsecurity(inode, name, buffer, alloc);
}
static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)

View File

@ -170,6 +170,7 @@ struct plat_stmmacenet_data {
int unicast_filter_entries;
int tx_fifo_size;
int rx_fifo_size;
u32 addr64;
u32 rx_queues_to_use;
u32 tx_queues_to_use;
u8 rx_sched_algorithm;

View File

@ -86,10 +86,8 @@
#define bond_for_each_slave_rcu(bond, pos, iter) \
netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
#ifdef CONFIG_XFRM_OFFLOAD
#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
NETIF_F_GSO_ESP)
#endif /* CONFIG_XFRM_OFFLOAD */
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;

View File

@ -1524,4 +1524,8 @@ void __init nft_chain_route_init(void);
void nft_chain_route_fini(void);
void nf_tables_trans_destroy_flush_work(void);
int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result);
__be64 nf_jiffies64_to_msecs(u64 input);
#endif /* _NET_NF_TABLES_H */

View File

@ -240,8 +240,6 @@ struct xdp_attachment_info {
};
struct netdev_bpf;
bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
struct netdev_bpf *bpf);
void xdp_attachment_setup(struct xdp_attachment_info *info,
struct netdev_bpf *bpf);

View File

@ -618,6 +618,9 @@ struct ocelot {
/* Keep track of the vlan port masks */
u32 vlan_mask[VLAN_N_VID];
/* Switches like VSC9959 have flooding per traffic class */
int num_flooding_pgids;
/* In tables like ANA:PORT and the ANA:PGID:PGID mask,
* the CPU is located after the physical ports (at the
* num_phys_ports index).

View File

@ -1298,9 +1298,7 @@ static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
static bool __reg64_bound_s32(s64 a)
{
if (a > S32_MIN && a < S32_MAX)
return true;
return false;
return a > S32_MIN && a < S32_MAX;
}
static bool __reg64_bound_u32(u64 a)
@ -1314,10 +1312,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
{
__mark_reg32_unbounded(reg);
if (__reg64_bound_s32(reg->smin_value))
if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
reg->s32_min_value = (s32)reg->smin_value;
if (__reg64_bound_s32(reg->smax_value))
reg->s32_max_value = (s32)reg->smax_value;
}
if (__reg64_bound_u32(reg->umin_value))
reg->u32_min_value = (u32)reg->umin_value;
if (__reg64_bound_u32(reg->umax_value))
@ -4895,6 +4893,8 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
ret_reg->smax_value = meta->msize_max_value;
ret_reg->s32_max_value = meta->msize_max_value;
ret_reg->smin_value = -MAX_ERRNO;
ret_reg->s32_min_value = -MAX_ERRNO;
__reg_deduce_bounds(ret_reg);
__reg_bound_offset(ret_reg);
__update_reg_bounds(ret_reg);

View File

@ -177,6 +177,9 @@ static int br_dev_open(struct net_device *dev)
br_stp_enable_bridge(br);
br_multicast_open(br);
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
br_multicast_join_snoopers(br);
return 0;
}
@ -197,6 +200,9 @@ static int br_dev_stop(struct net_device *dev)
br_stp_disable_bridge(br);
br_multicast_stop(br);
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
br_multicast_leave_snoopers(br);
netif_stop_queue(dev);
return 0;

View File

@ -3286,7 +3286,7 @@ static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
}
#endif
static void br_multicast_join_snoopers(struct net_bridge *br)
void br_multicast_join_snoopers(struct net_bridge *br)
{
br_ip4_multicast_join_snoopers(br);
br_ip6_multicast_join_snoopers(br);
@ -3317,7 +3317,7 @@ static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
}
#endif
static void br_multicast_leave_snoopers(struct net_bridge *br)
void br_multicast_leave_snoopers(struct net_bridge *br)
{
br_ip4_multicast_leave_snoopers(br);
br_ip6_multicast_leave_snoopers(br);
@ -3336,9 +3336,6 @@ static void __br_multicast_open(struct net_bridge *br,
void br_multicast_open(struct net_bridge *br)
{
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
br_multicast_join_snoopers(br);
__br_multicast_open(br, &br->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
__br_multicast_open(br, &br->ip6_own_query);
@ -3354,9 +3351,6 @@ void br_multicast_stop(struct net_bridge *br)
del_timer_sync(&br->ip6_other_query.timer);
del_timer_sync(&br->ip6_own_query.timer);
#endif
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
br_multicast_leave_snoopers(br);
}
void br_multicast_dev_del(struct net_bridge *br)
@ -3487,6 +3481,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
{
struct net_bridge_port *port;
bool change_snoopers = false;
spin_lock_bh(&br->multicast_lock);
if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
@ -3495,7 +3490,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
br_mc_disabled_update(br->dev, val);
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
br_multicast_leave_snoopers(br);
change_snoopers = true;
goto unlock;
}
@ -3506,9 +3501,30 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
list_for_each_entry(port, &br->port_list, list)
__br_multicast_enable_port(port);
change_snoopers = true;
unlock:
spin_unlock_bh(&br->multicast_lock);
/* br_multicast_join_snoopers has the potential to cause
* an MLD Report/Leave to be delivered to br_multicast_rcv,
* which would in turn call br_multicast_add_group, which would
* attempt to acquire multicast_lock. This function should be
* called after the lock has been released to avoid deadlocks on
* multicast_lock.
*
* br_multicast_leave_snoopers does not have the problem since
* br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
* returns without calling br_multicast_ipv4/6_rcv if it's not
* enabled. Moved both functions out just for symmetry.
*/
if (change_snoopers) {
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
br_multicast_join_snoopers(br);
else
br_multicast_leave_snoopers(br);
}
return 0;
}

View File

@ -792,6 +792,8 @@ void br_multicast_del_port(struct net_bridge_port *port);
void br_multicast_enable_port(struct net_bridge_port *port);
void br_multicast_disable_port(struct net_bridge_port *port);
void br_multicast_init(struct net_bridge *br);
void br_multicast_join_snoopers(struct net_bridge *br);
void br_multicast_leave_snoopers(struct net_bridge *br);
void br_multicast_open(struct net_bridge *br);
void br_multicast_stop(struct net_bridge *br);
void br_multicast_dev_del(struct net_bridge *br);
@ -969,6 +971,14 @@ static inline void br_multicast_init(struct net_bridge *br)
{
}
static inline void br_multicast_join_snoopers(struct net_bridge *br)
{
}
static inline void br_multicast_leave_snoopers(struct net_bridge *br)
{
}
static inline void br_multicast_open(struct net_bridge *br)
{
}

View File

@ -266,8 +266,10 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
}
masterv = br_vlan_get_master(br, v->vid, extack);
if (!masterv)
if (!masterv) {
err = -ENOMEM;
goto out_filt;
}
v->brvlan = masterv;
if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);

View File

@ -1157,6 +1157,9 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname,
if (level != SOL_CAN_ISOTP)
return -EINVAL;
if (so->bound)
return -EISCONN;
switch (optname) {
case CAN_ISOTP_OPTS:
if (optlen != sizeof(struct can_isotp_options))

View File

@ -8917,6 +8917,17 @@ static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
return dev->xdp_state[mode].prog;
}
static u8 dev_xdp_prog_count(struct net_device *dev)
{
u8 count = 0;
int i;
for (i = 0; i < __MAX_XDP_MODE; i++)
if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
count++;
return count;
}
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
{
struct bpf_prog *prog = dev_xdp_prog(dev, mode);
@ -9007,6 +9018,7 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
struct bpf_xdp_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog, u32 flags)
{
unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
struct bpf_prog *cur_prog;
enum bpf_xdp_mode mode;
bpf_op_t bpf_op;
@ -9022,11 +9034,17 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
return -EINVAL;
}
/* just one XDP mode bit should be set, zero defaults to SKB mode */
if (hweight32(flags & XDP_FLAGS_MODES) > 1) {
/* just one XDP mode bit should be set, zero defaults to drv/skb mode */
if (num_modes > 1) {
NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
return -EINVAL;
}
/* avoid ambiguity if offload + drv/skb mode progs are both loaded */
if (!num_modes && dev_xdp_prog_count(dev) > 1) {
NL_SET_ERR_MSG(extack,
"More than one program loaded, unset mode is ambiguous");
return -EINVAL;
}
/* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");

View File

@ -381,10 +381,8 @@ static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
if (this->release == release &&
this->indr.cb_priv == cb_priv) {
this->indr.cb_priv == cb_priv)
list_move(&this->indr.list, cleanup_list);
return;
}
}
}

View File

@ -39,12 +39,11 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
{
int ret;
/* Preempt disable is needed to protect per-cpu redirect_info between
* BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
* access to maps strictly require a rcu_read_lock() for protection,
* mixing with BH RCU lock doesn't work.
/* Migration disable and BH disable are needed to protect per-cpu
* redirect_info between BPF prog and skb_do_redirect().
*/
preempt_disable();
migrate_disable();
local_bh_disable();
bpf_compute_data_pointers(skb);
ret = bpf_prog_run_save_cb(lwt->prog, skb);
@ -78,7 +77,8 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
break;
}
preempt_enable();
local_bh_enable();
migrate_enable();
return ret;
}

View File

@ -335,11 +335,10 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
* scenarios (e.g. queue full), it is possible to return the xdp_frame
* while still leveraging this protection. The @napi_direct boolean
* is used for those calls sites. Thus, allowing for faster recycling
* of xdp_frames/pages in those cases. This path is never used by the
* MEM_TYPE_XSK_BUFF_POOL memory type, so it's explicitly not part of
* the switch-statement.
* of xdp_frames/pages in those cases.
*/
static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
struct xdp_buff *xdp)
{
struct xdp_mem_allocator *xa;
struct page *page;
@ -361,6 +360,10 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
page = virt_to_page(data); /* Assumes order0 page*/
put_page(page);
break;
case MEM_TYPE_XSK_BUFF_POOL:
/* NB! Only valid from an xdp_buff! */
xsk_buff_free(xdp);
break;
default:
/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
@ -370,19 +373,19 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
void xdp_return_frame(struct xdp_frame *xdpf)
{
__xdp_return(xdpf->data, &xdpf->mem, false);
__xdp_return(xdpf->data, &xdpf->mem, false, NULL);
}
EXPORT_SYMBOL_GPL(xdp_return_frame);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
{
__xdp_return(xdpf->data, &xdpf->mem, true);
__xdp_return(xdpf->data, &xdpf->mem, true, NULL);
}
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
void xdp_return_buff(struct xdp_buff *xdp)
{
__xdp_return(xdp->data, &xdp->rxq->mem, true);
__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
}
/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
@ -400,18 +403,6 @@ void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
}
EXPORT_SYMBOL_GPL(__xdp_release_frame);
bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
struct netdev_bpf *bpf)
{
if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) {
NL_SET_ERR_MSG(bpf->extack,
"program loaded with different flags");
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok);
void xdp_attachment_setup(struct xdp_attachment_info *info,
struct netdev_bpf *bpf)
{

View File

@ -628,6 +628,8 @@ int ethnl_parse_bitset(unsigned long *val, unsigned long *mask,
return ret;
change_bits = nla_get_u32(tb[ETHTOOL_A_BITSET_SIZE]);
if (change_bits > nbits)
change_bits = nbits;
bitmap_from_arr32(val, nla_data(tb[ETHTOOL_A_BITSET_VALUE]),
change_bits);
if (change_bits < nbits)

View File

@ -825,7 +825,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
if (has_gw && has_via) {
NL_SET_ERR_MSG(extack,
"Nexthop configuration can not contain both GATEWAY and VIA");
goto errout;
return -EINVAL;
}
return 0;

View File

@ -203,7 +203,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
local_bh_disable();
addend = xt_write_recseq_begin();
private = READ_ONCE(table->private); /* Address dependency. */
private = rcu_access_pointer(table->private);
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
@ -649,7 +649,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
const struct xt_table_info *private = xt_table_get_private_protected(table);
/* We need atomic snapshot of counters: rest doesn't change
* (other than comefrom, which userspace doesn't care
@ -673,7 +673,7 @@ static int copy_entries_to_user(unsigned int total_size,
unsigned int off, num;
const struct arpt_entry *e;
struct xt_counters *counters;
struct xt_table_info *private = table->private;
struct xt_table_info *private = xt_table_get_private_protected(table);
int ret = 0;
void *loc_cpu_entry;
@ -807,7 +807,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
if (!IS_ERR(t)) {
struct arpt_getinfo info;
const struct xt_table_info *private = t->private;
const struct xt_table_info *private = xt_table_get_private_protected(t);
#ifdef CONFIG_COMPAT
struct xt_table_info tmp;
@ -860,7 +860,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
if (!IS_ERR(t)) {
const struct xt_table_info *private = t->private;
const struct xt_table_info *private = xt_table_get_private_protected(t);
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
@ -1017,7 +1017,7 @@ static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
}
local_bh_disable();
private = t->private;
private = xt_table_get_private_protected(t);
if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
@ -1330,7 +1330,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
void __user *userptr)
{
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
const struct xt_table_info *private = xt_table_get_private_protected(table);
void __user *pos;
unsigned int size;
int ret = 0;

View File

@ -258,7 +258,7 @@ ipt_do_table(struct sk_buff *skb,
WARN_ON(!(table->valid_hooks & (1 << hook)));
local_bh_disable();
addend = xt_write_recseq_begin();
private = READ_ONCE(table->private); /* Address dependency. */
private = rcu_access_pointer(table->private);
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
@ -791,7 +791,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
const struct xt_table_info *private = xt_table_get_private_protected(table);
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
@ -815,7 +815,7 @@ copy_entries_to_user(unsigned int total_size,
unsigned int off, num;
const struct ipt_entry *e;
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
const struct xt_table_info *private = xt_table_get_private_protected(table);
int ret = 0;
const void *loc_cpu_entry;
@ -964,7 +964,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
t = xt_request_find_table_lock(net, AF_INET, name);
if (!IS_ERR(t)) {
struct ipt_getinfo info;
const struct xt_table_info *private = t->private;
const struct xt_table_info *private = xt_table_get_private_protected(t);
#ifdef CONFIG_COMPAT
struct xt_table_info tmp;
@ -1018,7 +1018,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
t = xt_find_table_lock(net, AF_INET, get.name);
if (!IS_ERR(t)) {
const struct xt_table_info *private = t->private;
const struct xt_table_info *private = xt_table_get_private_protected(t);
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
@ -1173,7 +1173,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
}
local_bh_disable();
private = t->private;
private = xt_table_get_private_protected(t);
if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
@ -1543,7 +1543,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
void __user *userptr)
{
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
const struct xt_table_info *private = xt_table_get_private_protected(table);
void __user *pos;
unsigned int size;
int ret = 0;

View File

@ -510,7 +510,6 @@ static void tcp_init_buffer_space(struct sock *sk)
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
tcp_sndbuf_expand(sk);
tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss);
tcp_mstamp_refresh(tp);
tp->rcvq_space.time = tp->tcp_mstamp;
tp->rcvq_space.seq = tp->copied_seq;
@ -534,6 +533,8 @@ static void tcp_init_buffer_space(struct sock *sk)
tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
tp->snd_cwnd_stamp = tcp_jiffies32;
tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd,
(u32)TCP_INIT_CWND * tp->advmss);
}
/* 4. Recalculate window clamp after socket hit its memory bounds. */

View File

@ -984,7 +984,8 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
tcp_rsk(req)->syn_tos & ~INET_ECN_MASK :
(tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
(inet_sk(sk)->tos & INET_ECN_MASK) :
inet_sk(sk)->tos;
if (!INET_ECN_is_capable(tos) &&
@ -1541,7 +1542,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
newinet->inet_id = prandom_u32();
/* Set ToS of the new socket based upon the value of incoming SYN. */
/* Set ToS of the new socket based upon the value of incoming SYN.
* ECT bits are set later in tcp_init_transfer().
*/
if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;

View File

@ -1880,7 +1880,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
* window, and remember whether we were cwnd-limited then.
*/
if (!before(tp->snd_una, tp->max_packets_seq) ||
tp->packets_out > tp->max_packets_out) {
tp->packets_out > tp->max_packets_out ||
is_cwnd_limited) {
tp->max_packets_out = tp->packets_out;
tp->max_packets_seq = tp->snd_nxt;
tp->is_cwnd_limited = is_cwnd_limited;
@ -2702,6 +2703,10 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
else
tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
if (likely(sent_pkts || is_cwnd_limited))
tcp_cwnd_validate(sk, is_cwnd_limited);
if (likely(sent_pkts)) {
if (tcp_in_cwnd_reduction(sk))
tp->prr_out += sent_pkts;
@ -2709,8 +2714,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
/* Send one loss probe per tail loss episode. */
if (push_one != 2)
tcp_schedule_loss_probe(sk, false);
is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
tcp_cwnd_validate(sk, is_cwnd_limited);
return false;
}
return !tp->packets_out && !tcp_write_queue_empty(sk);

View File

@ -2173,7 +2173,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
__skb_pull(skb, skb_transport_offset(skb));
ret = udp_queue_rcv_one_skb(sk, skb);
if (ret > 0)
ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret);
}
return 0;
}

View File

@ -280,7 +280,7 @@ ip6t_do_table(struct sk_buff *skb,
local_bh_disable();
addend = xt_write_recseq_begin();
private = READ_ONCE(table->private); /* Address dependency. */
private = rcu_access_pointer(table->private);
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
@ -807,7 +807,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
const struct xt_table_info *private = xt_table_get_private_protected(table);
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
@ -831,7 +831,7 @@ copy_entries_to_user(unsigned int total_size,
unsigned int off, num;
const struct ip6t_entry *e;
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
const struct xt_table_info *private = xt_table_get_private_protected(table);
int ret = 0;
const void *loc_cpu_entry;
@ -980,7 +980,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
t = xt_request_find_table_lock(net, AF_INET6, name);
if (!IS_ERR(t)) {
struct ip6t_getinfo info;
const struct xt_table_info *private = t->private;
const struct xt_table_info *private = xt_table_get_private_protected(t);
#ifdef CONFIG_COMPAT
struct xt_table_info tmp;
@ -1035,7 +1035,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
t = xt_find_table_lock(net, AF_INET6, get.name);
if (!IS_ERR(t)) {
struct xt_table_info *private = t->private;
struct xt_table_info *private = xt_table_get_private_protected(t);
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
@ -1189,7 +1189,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
}
local_bh_disable();
private = t->private;
private = xt_table_get_private_protected(t);
if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
@ -1552,7 +1552,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
void __user *userptr)
{
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
const struct xt_table_info *private = xt_table_get_private_protected(table);
void __user *pos;
unsigned int size;
int ret = 0;

View File

@ -528,7 +528,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
tcp_rsk(req)->syn_tos & ~INET_ECN_MASK :
(tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
(np->tclass & INET_ECN_MASK) :
np->tclass;
if (!INET_ECN_is_capable(tclass) &&
@ -1320,7 +1321,9 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
if (np->repflow)
newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
/* Set ToS of the new socket based upon the value of incoming SYN. */
/* Set ToS of the new socket based upon the value of incoming SYN.
* ECT bits are set later in tcp_init_transfer().
*/
if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;

View File

@ -948,6 +948,8 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
return ret;
}
set_bit(SDATA_STATE_RUNNING, &sdata->state);
ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR);
if (ret) {
kfree(sdata);

View File

@ -60,6 +60,7 @@ static struct mesh_table *mesh_table_alloc(void)
atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock);
spin_lock_init(&newtbl->walk_lock);
rhashtable_init(&newtbl->rhead, &mesh_rht_params);
return newtbl;
}
@ -773,9 +774,6 @@ int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
goto free_path;
}
rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
sdata->u.mesh.mesh_paths = tbl_path;
sdata->u.mesh.mpp_paths = tbl_mpp;

View File

@ -3455,7 +3455,7 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
*chandef = he_chandef;
return false;
return true;
}
bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,

View File

@ -67,6 +67,7 @@ void mptcp_seq_show(struct seq_file *seq)
for (i = 0; mptcp_snmp_list[i].name; i++)
seq_puts(seq, " 0");
seq_putc(seq, '\n');
return;
}

View File

@ -1723,6 +1723,10 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
}
nla_strlcpy(ifname, attr, IFNAMSIZ);
/* nf_tables_netdev_event() is called under rtnl_mutex, this is
* indirectly serializing all the other holders of the commit_mutex with
* the rtnl_mutex.
*/
dev = __dev_get_by_name(net, ifname);
if (!dev) {
err = -ENOENT;
@ -3719,7 +3723,7 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
return 0;
}
static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
{
u64 ms = be64_to_cpu(nla_get_be64(nla));
u64 max = (u64)(~((u64)0));
@ -3733,7 +3737,7 @@ static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
return 0;
}
static __be64 nf_jiffies64_to_msecs(u64 input)
__be64 nf_jiffies64_to_msecs(u64 input)
{
return cpu_to_be64(jiffies64_to_msecs(input));
}

View File

@ -177,8 +177,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
}
#endif
case NFT_CT_ID:
if (!nf_ct_is_confirmed(ct))
goto err;
*dest = nf_ct_get_id(ct);
return;
default:

View File

@ -157,8 +157,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
tb[NFTA_DYNSET_TIMEOUT])));
err = nf_msecs_to_jiffies64(tb[NFTA_DYNSET_TIMEOUT], &timeout);
if (err)
return err;
}
priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
@ -267,7 +269,7 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
goto nla_put_failure;
if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
cpu_to_be64(jiffies_to_msecs(priv->timeout)),
nf_jiffies64_to_msecs(priv->timeout),
NFTA_DYNSET_PAD))
goto nla_put_failure;
if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))

Some files were not shown because too many files have changed in this diff Show More