This is the 5.10.169 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmP2AtsACgkQONu9yGCS aT6Pow/+MC7snhMqMZKWujfijbaP4PZm2WsfkTxpiTECg50axy+TJ0mtRzvrtDXt DyMec3IW121OeCItZ48W8lA17Efd5CAzZd58PxwWygfxSQ6livsr2mxAoWSOfk+5 Bc1IJnKlpxWVovC8TswZuWFsatF5C0+xL5qzw0udY6bMLgKgAl7SIFykeEX3jibW Zjg7DSdFeSy7iDSSc2wu0MG6yM35y4PMnO7rYFzNRaTDpESJfm9ew+NGBLT9E6Ao 8oIP2bBGCt83W7GZx6IpcrqtkP+SkrnLZ7NUTvfQQnwcYReZnXYShXRyirSbmjkQ OVziXOwRA3IGyvNQVMAef8vySbNwcTaUArKw4Lc3MhE0mT1wjpyNkF3Qyq9NvDsW x3wPXd/cIGZHBMKqxAndXdjZEKf/E5WzAxpDBm6H9U6EBqy7xRWylH1RwTUEYnPM 7yqEEE2qlI/PTL5q+sJbzNhTI+XzhQvAkCuPs0APr1B5nqAV3Duo/PhRMg52S4Tq HGEZDuPzektLljqpON9IO+4g0aLLOodJccWwEghZIcWX2N5zQ69TI0stUzq3OLiJ iDU+feOr0JWQ59OK6JdVpL3LvcDJ8vUs9Fm57crSSi1lt5nqE0fKS5GbNtraKpcs yPFwBoqiiN7Q/oi9dV3SjBuVWId6FUGcq5rD5nKNWdPzdHGo/44= =KOqD -----END PGP SIGNATURE----- Merge 5.10.169 into android12-5.10-lts Changes in 5.10.169 ASoC: Intel: sof_rt5682: always set dpcm_capture for amplifiers selftests/bpf: Verify copy_register_state() preserves parent/live fields ALSA: hda: Do not unset preset when cleaning up codec ASoC: cs42l56: fix DT probe tools/virtio: fix the vringh test for virtio ring changes net/rose: Fix to not accept on connected socket net: stmmac: do not stop RX_CLK in Rx LPI state for qcs404 SoC net: sched: sch: Bounds check priority s390/decompressor: specify __decompress() buf len to avoid overflow nvme-fc: fix a missing queue put in nvmet_fc_ls_create_association platform/x86: touchscreen_dmi: Add Chuwi Vi8 (CWI501) DMI match nvmem: core: add error handling for dev_set_name nvmem: core: remove nvmem_config wp_gpio nvmem: core: fix cleanup after dev_set_name() nvmem: core: fix registration vs use race aio: fix mremap after fork null-deref s390/signal: fix endless loop in do_signal ovl: remove privs in ovl_copyfile() ovl: remove privs in ovl_fallocate() netfilter: nft_tproxy: restrict to prerouting hook mmc: jz4740: Work around bug on JZ4760(B) mmc: sdio: fix possible resource leaks in some error paths mmc: mmc_spi: fix error handling in mmc_spi_probe() ALSA: hda/conexant: add a new hda codec SN6180 ALSA: hda/realtek - fixed wrong gpio assigned sched/psi: Fix use-after-free in ep_remove_wait_queue() hugetlb: check for undefined shift on 32 bit architectures Revert "mm: Always release pages to the buddy allocator in memblock_free_late()." net: Fix unwanted sign extension in netdev_stats_to_stats64() revert "squashfs: harden sanity check in squashfs_read_xattr_id_table" ixgbe: allow to increase MTU to 3K with XDP enabled i40e: add double of VLAN header when computing the max MTU net: bgmac: fix BCM5358 support by setting correct flags sctp: sctp_sock_filter(): avoid list_entry() on possibly empty list net/sched: tcindex: update imperfect hash filters respecting rcu dccp/tcp: Avoid negative sk_forward_alloc by ipv6_pinfo.pktoptions. net/usb: kalmia: Don't pass act_len in usb_bulk_msg error path net: openvswitch: fix possible memory leak in ovs_meter_cmd_set() net: stmmac: fix order of dwmac5 FlexPPS parametrization sequence bnxt_en: Fix mqprio and XDP ring checking logic net: stmmac: Restrict warning on disabling DMA store and fwd mode net: mpls: fix stale pointer if allocation fails during device rename ixgbe: add double of VLAN header when computing the max MTU ipv6: Fix datagram socket connection with DSCP. ipv6: Fix tcp socket connection with DSCP. nilfs2: fix underflow in second superblock position calculations drm/i915/gen11: Moving WAs to icl_gt_workarounds_init() drm/i915/gen11: Wa_1408615072/Wa_1407596294 should be on GT list flow_offload: fill flags to action structure net/sched: act_ctinfo: use percpu stats i40e: Add checking for null for nlmsg_find_attr() net/sched: tcindex: search key must be 16 bits kvm: initialize all of the kvm_debugregs structure before sending it to userspace alarmtimer: Prevent starvation by small intervals and SIG_IGN ASoC: SOF: Intel: hda-dai: fix possible stream_tag leak net: sched: sch: Fix off by one in htb_activate_prios() nvmem: core: fix return value Linux 5.10.169 Change-Id: Id846e3a42e381eb9bc554ffb5ab1390343b72287 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
0caf8151c2
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 168
|
||||
SUBLEVEL = 169
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -79,6 +79,6 @@ void *decompress_kernel(void)
|
||||
void *output = (void *)decompress_offset;
|
||||
|
||||
__decompress(_compressed_start, _compressed_end - _compressed_start,
|
||||
NULL, NULL, output, 0, NULL, error);
|
||||
NULL, NULL, output, vmlinux.image_size, NULL, error);
|
||||
return output;
|
||||
}
|
||||
|
@ -472,7 +472,7 @@ void do_signal(struct pt_regs *regs)
|
||||
current->thread.system_call =
|
||||
test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
|
||||
|
||||
if (test_thread_flag(TIF_SIGPENDING) && get_signal(&ksig)) {
|
||||
if (get_signal(&ksig)) {
|
||||
/* Whee! Actually deliver the signal. */
|
||||
if (current->thread.system_call) {
|
||||
regs->int_code = current->thread.system_call;
|
||||
|
@ -4455,12 +4455,11 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
memset(dbgregs, 0, sizeof(*dbgregs));
|
||||
memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
|
||||
kvm_get_dr(vcpu, 6, &val);
|
||||
dbgregs->dr6 = val;
|
||||
dbgregs->dr7 = vcpu->arch.dr7;
|
||||
dbgregs->flags = 0;
|
||||
memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
|
||||
}
|
||||
|
||||
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
|
||||
|
@ -1212,6 +1212,22 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
GAMT_CHKN_BIT_REG,
|
||||
GAMT_CHKN_DISABLE_L3_COH_PIPE);
|
||||
|
||||
/*
|
||||
* Wa_1408615072:icl,ehl (vsunit)
|
||||
* Wa_1407596294:icl,ehl (hsunit)
|
||||
*/
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
|
||||
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1407352427:icl,ehl */
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
|
||||
PSDUNIT_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1406680159:icl,ehl */
|
||||
wa_write_or(wal,
|
||||
SUBSLICE_UNIT_LEVEL_CLKGATE,
|
||||
GWUNIT_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1607087056:icl,ehl,jsl */
|
||||
if (IS_ICELAKE(i915) ||
|
||||
IS_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0)) {
|
||||
@ -1816,22 +1832,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
|
||||
GEN11_ENABLE_32_PLANE_MODE);
|
||||
|
||||
/*
|
||||
* Wa_1408615072:icl,ehl (vsunit)
|
||||
* Wa_1407596294:icl,ehl (hsunit)
|
||||
*/
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
|
||||
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1407352427:icl,ehl */
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
|
||||
PSDUNIT_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1406680159:icl,ehl */
|
||||
wa_write_or(wal,
|
||||
SUBSLICE_UNIT_LEVEL_CLKGATE,
|
||||
GWUNIT_CLKGATE_DIS);
|
||||
|
||||
/*
|
||||
* Wa_1408767742:icl[a2..forever],ehl[all]
|
||||
* Wa_1605460711:icl[a0..c0]
|
||||
|
@ -295,6 +295,12 @@ static void sdio_release_func(struct device *dev)
|
||||
if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
|
||||
sdio_free_func_cis(func);
|
||||
|
||||
/*
|
||||
* We have now removed the link to the tuples in the
|
||||
* card structure, so remove the reference.
|
||||
*/
|
||||
put_device(&func->card->dev);
|
||||
|
||||
kfree(func->info);
|
||||
kfree(func->tmpbuf);
|
||||
kfree(func);
|
||||
@ -325,6 +331,12 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
|
||||
|
||||
device_initialize(&func->dev);
|
||||
|
||||
/*
|
||||
* We may link to tuples in the card structure,
|
||||
* we need make sure we have a reference to it.
|
||||
*/
|
||||
get_device(&func->card->dev);
|
||||
|
||||
func->dev.parent = &card->dev;
|
||||
func->dev.bus = &sdio_bus_type;
|
||||
func->dev.release = sdio_release_func;
|
||||
@ -378,10 +390,9 @@ int sdio_add_func(struct sdio_func *func)
|
||||
*/
|
||||
void sdio_remove_func(struct sdio_func *func)
|
||||
{
|
||||
if (!sdio_func_present(func))
|
||||
return;
|
||||
if (sdio_func_present(func))
|
||||
device_del(&func->dev);
|
||||
|
||||
device_del(&func->dev);
|
||||
of_node_put(func->dev.of_node);
|
||||
put_device(&func->dev);
|
||||
}
|
||||
|
@ -391,12 +391,6 @@ int sdio_read_func_cis(struct sdio_func *func)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Since we've linked to tuples in the card structure,
|
||||
* we must make sure we have a reference to it.
|
||||
*/
|
||||
get_device(&func->card->dev);
|
||||
|
||||
/*
|
||||
* Vendor/device id is optional for function CIS, so
|
||||
* copy it from the card structure as needed.
|
||||
@ -422,11 +416,5 @@ void sdio_free_func_cis(struct sdio_func *func)
|
||||
}
|
||||
|
||||
func->tuples = NULL;
|
||||
|
||||
/*
|
||||
* We have now removed the link to the tuples in the
|
||||
* card structure, so remove the reference.
|
||||
*/
|
||||
put_device(&func->card->dev);
|
||||
}
|
||||
|
||||
|
@ -1041,6 +1041,16 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
|
||||
mmc->ops = &jz4740_mmc_ops;
|
||||
if (!mmc->f_max)
|
||||
mmc->f_max = JZ_MMC_CLK_RATE;
|
||||
|
||||
/*
|
||||
* There seems to be a problem with this driver on the JZ4760 and
|
||||
* JZ4760B SoCs. There, when using the maximum rate supported (50 MHz),
|
||||
* the communication fails with many SD cards.
|
||||
* Until this bug is sorted out, limit the maximum rate to 24 MHz.
|
||||
*/
|
||||
if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE)
|
||||
mmc->f_max = JZ_MMC_CLK_RATE;
|
||||
|
||||
mmc->f_min = mmc->f_max / 128;
|
||||
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
|
||||
|
||||
|
@ -1450,7 +1450,7 @@ static int mmc_spi_probe(struct spi_device *spi)
|
||||
|
||||
status = mmc_add_host(mmc);
|
||||
if (status != 0)
|
||||
goto fail_add_host;
|
||||
goto fail_glue_init;
|
||||
|
||||
/*
|
||||
* Index 0 is card detect
|
||||
@ -1458,7 +1458,7 @@ static int mmc_spi_probe(struct spi_device *spi)
|
||||
*/
|
||||
status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
|
||||
if (status == -EPROBE_DEFER)
|
||||
goto fail_add_host;
|
||||
goto fail_gpiod_request;
|
||||
if (!status) {
|
||||
/*
|
||||
* The platform has a CD GPIO signal that may support
|
||||
@ -1473,7 +1473,7 @@ static int mmc_spi_probe(struct spi_device *spi)
|
||||
/* Index 1 is write protect/read only */
|
||||
status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
|
||||
if (status == -EPROBE_DEFER)
|
||||
goto fail_add_host;
|
||||
goto fail_gpiod_request;
|
||||
if (!status)
|
||||
has_ro = true;
|
||||
|
||||
@ -1487,7 +1487,7 @@ static int mmc_spi_probe(struct spi_device *spi)
|
||||
? ", cd polling" : "");
|
||||
return 0;
|
||||
|
||||
fail_add_host:
|
||||
fail_gpiod_request:
|
||||
mmc_remove_host(mmc);
|
||||
fail_glue_init:
|
||||
mmc_spi_dma_free(host);
|
||||
|
@ -228,12 +228,12 @@ static int bgmac_probe(struct bcma_device *core)
|
||||
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
|
||||
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
|
||||
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
|
||||
if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
|
||||
ci->pkg == BCMA_PKG_ID_BCM47186) {
|
||||
if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
|
||||
(ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
|
||||
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
|
||||
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
|
||||
}
|
||||
if (ci->pkg == BCMA_PKG_ID_BCM5358)
|
||||
if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358)
|
||||
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
|
||||
break;
|
||||
case BCMA_CHIP_ID_BCM53573:
|
||||
|
@ -8761,10 +8761,14 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
|
||||
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
|
||||
if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
|
||||
bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
|
||||
netdev_err(bp->dev, "tx ring reservation failure\n");
|
||||
netdev_reset_tc(bp->dev);
|
||||
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
|
||||
if (bp->tx_nr_rings_xdp)
|
||||
bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
|
||||
else
|
||||
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
|
@ -2788,7 +2788,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
|
||||
if (i40e_enabled_xdp_vsi(vsi)) {
|
||||
int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
|
||||
int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
|
||||
|
||||
if (frame_size > i40e_max_xdp_frame_size(vsi))
|
||||
return -EINVAL;
|
||||
@ -12520,6 +12520,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
|
||||
}
|
||||
|
||||
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
|
||||
if (!br_spec)
|
||||
return -EINVAL;
|
||||
|
||||
nla_for_each_nested(attr, br_spec, rem) {
|
||||
__u16 mode;
|
||||
|
@ -67,6 +67,8 @@
|
||||
#define IXGBE_RXBUFFER_4K 4096
|
||||
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
|
||||
|
||||
#define IXGBE_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
|
||||
|
||||
/* Attempt to maximize the headroom available for incoming frames. We
|
||||
* use a 2K buffer for receives and need 1536/1534 to store the data for
|
||||
* the frame. This leaves us with 512 bytes of room. From that we need
|
||||
|
@ -6728,6 +6728,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
|
||||
ixgbe_free_rx_resources(adapter->rx_ring[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP
|
||||
* @adapter: device handle, pointer to adapter
|
||||
*/
|
||||
static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
|
||||
return IXGBE_RXBUFFER_2K;
|
||||
else
|
||||
return IXGBE_RXBUFFER_3K;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_change_mtu - Change the Maximum Transfer Unit
|
||||
* @netdev: network interface device structure
|
||||
@ -6739,18 +6751,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (adapter->xdp_prog) {
|
||||
int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
|
||||
VLAN_HLEN;
|
||||
int i;
|
||||
if (ixgbe_enabled_xdp_adapter(adapter)) {
|
||||
int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
|
||||
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
struct ixgbe_ring *ring = adapter->rx_ring[i];
|
||||
|
||||
if (new_frame_size > ixgbe_rx_bufsz(ring)) {
|
||||
e_warn(probe, "Requested MTU size is not supported with XDP\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) {
|
||||
e_warn(probe, "Requested MTU size is not supported with XDP\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -505,6 +505,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
|
||||
plat_dat->has_gmac4 = 1;
|
||||
plat_dat->pmt = 1;
|
||||
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
|
||||
if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
|
||||
plat_dat->rx_clk_runs_in_lpi = 1;
|
||||
|
||||
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
|
||||
if (ret)
|
||||
|
@ -520,9 +520,9 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
|
||||
return 0;
|
||||
}
|
||||
|
||||
val |= PPSCMDx(index, 0x2);
|
||||
val |= TRGTMODSELx(index, 0x2);
|
||||
val |= PPSEN0;
|
||||
writel(val, ioaddr + MAC_PPS_CONTROL);
|
||||
|
||||
writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
|
||||
|
||||
@ -547,6 +547,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
|
||||
writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
|
||||
|
||||
/* Finally, activate it */
|
||||
val |= PPSCMDx(index, 0x2);
|
||||
writel(val, ioaddr + MAC_PPS_CONTROL);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1058,7 +1058,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
|
||||
|
||||
stmmac_mac_set(priv, priv->ioaddr, true);
|
||||
if (phy && priv->dma_cap.eee) {
|
||||
priv->eee_active = phy_init_eee(phy, 1) >= 0;
|
||||
priv->eee_active =
|
||||
phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
|
||||
priv->eee_enabled = stmmac_eee_init(priv);
|
||||
priv->tx_lpi_enabled = priv->eee_enabled;
|
||||
stmmac_set_eee_pls(priv, priv->hw, true);
|
||||
|
@ -559,7 +559,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
|
||||
dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
|
||||
|
||||
plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
|
||||
if (plat->force_thresh_dma_mode) {
|
||||
if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) {
|
||||
plat->force_sf_dma_mode = 0;
|
||||
dev_warn(&pdev->dev,
|
||||
"force_sf_dma_mode is ignored if force_thresh_dma_mode is set.\n");
|
||||
|
@ -65,8 +65,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
|
||||
init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
|
||||
if (status != 0) {
|
||||
netdev_err(dev->net,
|
||||
"Error sending init packet. Status %i, length %i\n",
|
||||
status, act_len);
|
||||
"Error sending init packet. Status %i\n",
|
||||
status);
|
||||
return status;
|
||||
}
|
||||
else if (act_len != init_msg_len) {
|
||||
@ -83,8 +83,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
|
||||
|
||||
if (status != 0)
|
||||
netdev_err(dev->net,
|
||||
"Error receiving init result. Status %i, length %i\n",
|
||||
status, act_len);
|
||||
"Error receiving init result. Status %i\n",
|
||||
status);
|
||||
else if (act_len != expected_len)
|
||||
netdev_err(dev->net, "Unexpected init result length: %i\n",
|
||||
act_len);
|
||||
|
@ -1675,8 +1675,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
|
||||
else {
|
||||
queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
|
||||
be16_to_cpu(rqst->assoc_cmd.sqsize));
|
||||
if (!queue)
|
||||
if (!queue) {
|
||||
ret = VERR_QUEUE_ALLOC_FAIL;
|
||||
nvmet_fc_tgt_a_put(iod->assoc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -627,16 +627,19 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
|
||||
|
||||
nvmem->id = rval;
|
||||
|
||||
if (config->wp_gpio)
|
||||
nvmem->wp_gpio = config->wp_gpio;
|
||||
else if (!config->ignore_wp)
|
||||
nvmem->dev.type = &nvmem_provider_type;
|
||||
nvmem->dev.bus = &nvmem_bus_type;
|
||||
nvmem->dev.parent = config->dev;
|
||||
|
||||
device_initialize(&nvmem->dev);
|
||||
|
||||
if (!config->ignore_wp)
|
||||
nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
|
||||
GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(nvmem->wp_gpio)) {
|
||||
ida_free(&nvmem_ida, nvmem->id);
|
||||
rval = PTR_ERR(nvmem->wp_gpio);
|
||||
kfree(nvmem);
|
||||
return ERR_PTR(rval);
|
||||
nvmem->wp_gpio = NULL;
|
||||
goto err_put_device;
|
||||
}
|
||||
|
||||
kref_init(&nvmem->refcnt);
|
||||
@ -648,9 +651,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
|
||||
nvmem->stride = config->stride ?: 1;
|
||||
nvmem->word_size = config->word_size ?: 1;
|
||||
nvmem->size = config->size;
|
||||
nvmem->dev.type = &nvmem_provider_type;
|
||||
nvmem->dev.bus = &nvmem_bus_type;
|
||||
nvmem->dev.parent = config->dev;
|
||||
nvmem->root_only = config->root_only;
|
||||
nvmem->priv = config->priv;
|
||||
nvmem->type = config->type;
|
||||
@ -661,18 +661,21 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
|
||||
|
||||
switch (config->id) {
|
||||
case NVMEM_DEVID_NONE:
|
||||
dev_set_name(&nvmem->dev, "%s", config->name);
|
||||
rval = dev_set_name(&nvmem->dev, "%s", config->name);
|
||||
break;
|
||||
case NVMEM_DEVID_AUTO:
|
||||
dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
|
||||
rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
|
||||
break;
|
||||
default:
|
||||
dev_set_name(&nvmem->dev, "%s%d",
|
||||
rval = dev_set_name(&nvmem->dev, "%s%d",
|
||||
config->name ? : "nvmem",
|
||||
config->name ? config->id : nvmem->id);
|
||||
break;
|
||||
}
|
||||
|
||||
if (rval)
|
||||
goto err_put_device;
|
||||
|
||||
nvmem->read_only = device_property_present(config->dev, "read-only") ||
|
||||
config->read_only || !nvmem->reg_write;
|
||||
|
||||
@ -680,16 +683,10 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
|
||||
nvmem->dev.groups = nvmem_dev_groups;
|
||||
#endif
|
||||
|
||||
dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
|
||||
|
||||
rval = device_register(&nvmem->dev);
|
||||
if (rval)
|
||||
goto err_put_device;
|
||||
|
||||
if (config->compat) {
|
||||
rval = nvmem_sysfs_setup_compat(nvmem, config);
|
||||
if (rval)
|
||||
goto err_device_del;
|
||||
goto err_put_device;
|
||||
}
|
||||
|
||||
if (config->cells) {
|
||||
@ -706,6 +703,12 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
|
||||
if (rval)
|
||||
goto err_remove_cells;
|
||||
|
||||
dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
|
||||
|
||||
rval = device_add(&nvmem->dev);
|
||||
if (rval)
|
||||
goto err_remove_cells;
|
||||
|
||||
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
|
||||
|
||||
return nvmem;
|
||||
@ -714,8 +717,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
|
||||
nvmem_device_remove_all_cells(nvmem);
|
||||
if (config->compat)
|
||||
nvmem_sysfs_remove_compat(nvmem, config);
|
||||
err_device_del:
|
||||
device_del(&nvmem->dev);
|
||||
err_put_device:
|
||||
put_device(&nvmem->dev);
|
||||
|
||||
|
@ -1030,6 +1030,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
|
||||
DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Chuwi Vi8 (CWI501) */
|
||||
.driver_data = (void *)&chuwi_vi8_data,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
|
||||
DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.W86JLBNR01"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Chuwi Vi8 (CWI506) */
|
||||
.driver_data = (void *)&chuwi_vi8_data,
|
||||
|
4
fs/aio.c
4
fs/aio.c
@ -335,6 +335,9 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
|
||||
spin_lock(&mm->ioctx_lock);
|
||||
rcu_read_lock();
|
||||
table = rcu_dereference(mm->ioctx_table);
|
||||
if (!table)
|
||||
goto out_unlock;
|
||||
|
||||
for (i = 0; i < table->nr; i++) {
|
||||
struct kioctx *ctx;
|
||||
|
||||
@ -348,6 +351,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
|
||||
}
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
spin_unlock(&mm->ioctx_lock);
|
||||
return res;
|
||||
|
@ -1129,7 +1129,14 @@ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
|
||||
|
||||
minseg = range[0] + segbytes - 1;
|
||||
do_div(minseg, segbytes);
|
||||
|
||||
if (range[1] < 4096)
|
||||
goto out;
|
||||
|
||||
maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
|
||||
if (maxseg < segbytes)
|
||||
goto out;
|
||||
|
||||
do_div(maxseg, segbytes);
|
||||
maxseg--;
|
||||
|
||||
|
@ -409,6 +409,15 @@ int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
|
||||
if (newsize > devsize)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Prevent underflow in second superblock position calculation.
|
||||
* The exact minimum size check is done in nilfs_sufile_resize().
|
||||
*/
|
||||
if (newsize < 4096) {
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write lock is required to protect some functions depending
|
||||
* on the number of segments, the number of reserved segments,
|
||||
|
@ -544,9 +544,15 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
|
||||
{
|
||||
struct nilfs_super_block **sbp = nilfs->ns_sbp;
|
||||
struct buffer_head **sbh = nilfs->ns_sbh;
|
||||
u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size);
|
||||
u64 sb2off, devsize = nilfs->ns_bdev->bd_inode->i_size;
|
||||
int valid[2], swp = 0;
|
||||
|
||||
if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) {
|
||||
nilfs_err(sb, "device size too small");
|
||||
return -EINVAL;
|
||||
}
|
||||
sb2off = NILFS_SB2_OFFSET_BYTES(devsize);
|
||||
|
||||
sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize,
|
||||
&sbh[0]);
|
||||
sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]);
|
||||
|
@ -519,9 +519,16 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
|
||||
const struct cred *old_cred;
|
||||
int ret;
|
||||
|
||||
inode_lock(inode);
|
||||
/* Update mode */
|
||||
ovl_copyattr(ovl_inode_real(inode), inode);
|
||||
ret = file_remove_privs(file);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = ovl_real_fdget(file, &real);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_unlock;
|
||||
|
||||
old_cred = ovl_override_creds(file_inode(file)->i_sb);
|
||||
ret = vfs_fallocate(real.file, mode, offset, len);
|
||||
@ -532,6 +539,9 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
|
||||
|
||||
fdput(real);
|
||||
|
||||
out_unlock:
|
||||
inode_unlock(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -675,14 +685,23 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
|
||||
const struct cred *old_cred;
|
||||
loff_t ret;
|
||||
|
||||
inode_lock(inode_out);
|
||||
if (op != OVL_DEDUPE) {
|
||||
/* Update mode */
|
||||
ovl_copyattr(ovl_inode_real(inode_out), inode_out);
|
||||
ret = file_remove_privs(file_out);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = ovl_real_fdget(file_out, &real_out);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_unlock;
|
||||
|
||||
ret = ovl_real_fdget(file_in, &real_in);
|
||||
if (ret) {
|
||||
fdput(real_out);
|
||||
return ret;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
old_cred = ovl_override_creds(file_inode(file_out)->i_sb);
|
||||
@ -711,6 +730,9 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
|
||||
fdput(real_in);
|
||||
fdput(real_out);
|
||||
|
||||
out_unlock:
|
||||
inode_unlock(inode_out);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
|
||||
/* Sanity check values */
|
||||
|
||||
/* there is always at least one xattr id */
|
||||
if (*xattr_ids <= 0)
|
||||
if (*xattr_ids == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
|
||||
|
@ -553,7 +553,10 @@ static inline struct hstate *hstate_sizelog(int page_size_log)
|
||||
if (!page_size_log)
|
||||
return &default_hstate;
|
||||
|
||||
return size_to_hstate(1UL << page_size_log);
|
||||
if (page_size_log < BITS_PER_LONG)
|
||||
return size_to_hstate(1UL << page_size_log);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
|
||||
|
@ -49,7 +49,6 @@ enum nvmem_type {
|
||||
* @word_size: Minimum read/write access granularity.
|
||||
* @stride: Minimum read/write access stride.
|
||||
* @priv: User context passed to read/write callbacks.
|
||||
* @wp-gpio: Write protect pin
|
||||
* @ignore_wp: Write Protect pin is managed by the provider.
|
||||
*
|
||||
* Note: A default "nvmem<id>" name will be assigned to the device if
|
||||
@ -64,7 +63,6 @@ struct nvmem_config {
|
||||
const char *name;
|
||||
int id;
|
||||
struct module *owner;
|
||||
struct gpio_desc *wp_gpio;
|
||||
const struct nvmem_cell_info *cells;
|
||||
int ncells;
|
||||
enum nvmem_type type;
|
||||
|
@ -199,6 +199,7 @@ struct plat_stmmacenet_data {
|
||||
int rss_en;
|
||||
int mac_port_sel_speed;
|
||||
bool en_tx_lpi_clockgating;
|
||||
bool rx_clk_runs_in_lpi;
|
||||
int has_xgmac;
|
||||
bool vlan_fail_q_en;
|
||||
u8 vlan_fail_q;
|
||||
|
@ -2264,6 +2264,19 @@ static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struc
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk)
|
||||
{
|
||||
skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
|
||||
if (skb) {
|
||||
if (sk_rmem_schedule(sk, skb, skb->truesize)) {
|
||||
skb_set_owner_r(skb, sk);
|
||||
return skb;
|
||||
}
|
||||
__kfree_skb(skb);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void sk_reset_timer(struct sock *sk, struct timer_list *timer,
|
||||
unsigned long expires);
|
||||
|
||||
|
@ -1225,10 +1225,11 @@ void psi_trigger_destroy(struct psi_trigger *t)
|
||||
|
||||
group = t->group;
|
||||
/*
|
||||
* Wakeup waiters to stop polling. Can happen if cgroup is deleted
|
||||
* from under a polling process.
|
||||
* Wakeup waiters to stop polling and clear the queue to prevent it from
|
||||
* being accessed later. Can happen if cgroup is deleted from under a
|
||||
* polling process.
|
||||
*/
|
||||
wake_up_interruptible(&t->event_wait);
|
||||
wake_up_pollfree(&t->event_wait);
|
||||
|
||||
mutex_lock(&group->trigger_lock);
|
||||
|
||||
|
@ -470,11 +470,35 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(alarm_forward);
|
||||
|
||||
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
|
||||
static u64 __alarm_forward_now(struct alarm *alarm, ktime_t interval, bool throttle)
|
||||
{
|
||||
struct alarm_base *base = &alarm_bases[alarm->type];
|
||||
ktime_t now = base->get_ktime();
|
||||
|
||||
return alarm_forward(alarm, base->get_ktime(), interval);
|
||||
if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && throttle) {
|
||||
/*
|
||||
* Same issue as with posix_timer_fn(). Timers which are
|
||||
* periodic but the signal is ignored can starve the system
|
||||
* with a very small interval. The real fix which was
|
||||
* promised in the context of posix_timer_fn() never
|
||||
* materialized, but someone should really work on it.
|
||||
*
|
||||
* To prevent DOS fake @now to be 1 jiffie out which keeps
|
||||
* the overrun accounting correct but creates an
|
||||
* inconsistency vs. timer_gettime(2).
|
||||
*/
|
||||
ktime_t kj = NSEC_PER_SEC / HZ;
|
||||
|
||||
if (interval < kj)
|
||||
now = ktime_add(now, kj);
|
||||
}
|
||||
|
||||
return alarm_forward(alarm, now, interval);
|
||||
}
|
||||
|
||||
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
|
||||
{
|
||||
return __alarm_forward_now(alarm, interval, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(alarm_forward_now);
|
||||
|
||||
@ -548,9 +572,10 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
|
||||
if (posix_timer_event(ptr, si_private) && ptr->it_interval) {
|
||||
/*
|
||||
* Handle ignored signals and rearm the timer. This will go
|
||||
* away once we handle ignored signals proper.
|
||||
* away once we handle ignored signals proper. Ensure that
|
||||
* small intervals cannot starve the system.
|
||||
*/
|
||||
ptr->it_overrun += alarm_forward_now(alarm, ptr->it_interval);
|
||||
ptr->it_overrun += __alarm_forward_now(alarm, ptr->it_interval, true);
|
||||
++ptr->it_requeue_pending;
|
||||
ptr->it_active = 1;
|
||||
result = ALARMTIMER_RESTART;
|
||||
|
@ -1604,13 +1604,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
|
||||
end = PFN_DOWN(base + size);
|
||||
|
||||
for (; cursor < end; cursor++) {
|
||||
/*
|
||||
* Reserved pages are always initialized by the end of
|
||||
* memblock_free_all() (by memmap_init() and, if deferred
|
||||
* initialization is enabled, memmap_init_reserved_pages()), so
|
||||
* these pages can be released directly to the buddy allocator.
|
||||
*/
|
||||
__free_pages_core(pfn_to_page(cursor), 0);
|
||||
memblock_free_pages(pfn_to_page(cursor), cursor, 0);
|
||||
totalram_pages_inc();
|
||||
}
|
||||
}
|
||||
|
@ -541,11 +541,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
|
||||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
|
||||
/* Clone pktoptions received with SYN, if we own the req */
|
||||
if (*own_req && ireq->pktopts) {
|
||||
newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
|
||||
newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
|
||||
consume_skb(ireq->pktopts);
|
||||
ireq->pktopts = NULL;
|
||||
if (newnp->pktoptions)
|
||||
skb_set_owner_r(newnp->pktoptions, newsk);
|
||||
}
|
||||
|
||||
return newsk;
|
||||
@ -605,7 +603,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
--ANK (980728)
|
||||
*/
|
||||
if (np->rxopt.all)
|
||||
opt_skb = skb_clone(skb, GFP_ATOMIC);
|
||||
opt_skb = skb_clone_and_charge_r(skb, sk);
|
||||
|
||||
if (sk->sk_state == DCCP_OPEN) { /* Fast path */
|
||||
if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
|
||||
@ -669,7 +667,6 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
|
||||
if (ipv6_opt_accepted(sk, opt_skb,
|
||||
&DCCP_SKB_CB(opt_skb)->header.h6)) {
|
||||
skb_set_owner_r(opt_skb, sk);
|
||||
memmove(IP6CB(opt_skb),
|
||||
&DCCP_SKB_CB(opt_skb)->header.h6,
|
||||
sizeof(struct inet6_skb_parm));
|
||||
|
@ -51,7 +51,7 @@ static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
|
||||
fl6->flowi6_mark = sk->sk_mark;
|
||||
fl6->fl6_dport = inet->inet_dport;
|
||||
fl6->fl6_sport = inet->inet_sport;
|
||||
fl6->flowlabel = np->flow_label;
|
||||
fl6->flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
|
||||
fl6->flowi6_uid = sk->sk_uid;
|
||||
|
||||
if (!fl6->flowi6_oif)
|
||||
|
@ -269,6 +269,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
||||
fl6.flowi6_proto = IPPROTO_TCP;
|
||||
fl6.daddr = sk->sk_v6_daddr;
|
||||
fl6.saddr = saddr ? *saddr : np->saddr;
|
||||
fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
|
||||
fl6.flowi6_oif = sk->sk_bound_dev_if;
|
||||
fl6.flowi6_mark = sk->sk_mark;
|
||||
fl6.fl6_dport = usin->sin6_port;
|
||||
@ -1406,14 +1407,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
|
||||
|
||||
/* Clone pktoptions received with SYN, if we own the req */
|
||||
if (ireq->pktopts) {
|
||||
newnp->pktoptions = skb_clone(ireq->pktopts,
|
||||
sk_gfp_mask(sk, GFP_ATOMIC));
|
||||
newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
|
||||
consume_skb(ireq->pktopts);
|
||||
ireq->pktopts = NULL;
|
||||
if (newnp->pktoptions) {
|
||||
if (newnp->pktoptions)
|
||||
tcp_v6_restore_cb(newnp->pktoptions);
|
||||
skb_set_owner_r(newnp->pktoptions, newsk);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (!req_unhash && found_dup_sk) {
|
||||
@ -1481,7 +1479,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
--ANK (980728)
|
||||
*/
|
||||
if (np->rxopt.all)
|
||||
opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
|
||||
opt_skb = skb_clone_and_charge_r(skb, sk);
|
||||
|
||||
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
|
||||
struct dst_entry *dst;
|
||||
@ -1563,7 +1561,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
if (np->repflow)
|
||||
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
|
||||
if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
|
||||
skb_set_owner_r(opt_skb, sk);
|
||||
tcp_v6_restore_cb(opt_skb);
|
||||
opt_skb = xchg(&np->pktoptions, opt_skb);
|
||||
} else {
|
||||
|
@ -1427,6 +1427,7 @@ static int mpls_dev_sysctl_register(struct net_device *dev,
|
||||
free:
|
||||
kfree(table);
|
||||
out:
|
||||
mdev->sysctl = NULL;
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
@ -1436,6 +1437,9 @@ static void mpls_dev_sysctl_unregister(struct net_device *dev,
|
||||
struct net *net = dev_net(dev);
|
||||
struct ctl_table *table;
|
||||
|
||||
if (!mdev->sysctl)
|
||||
return;
|
||||
|
||||
table = mdev->sysctl->ctl_table_arg;
|
||||
unregister_net_sysctl_table(mdev->sysctl);
|
||||
kfree(table);
|
||||
|
@ -289,6 +289,13 @@ static int nft_tproxy_dump(struct sk_buff *skb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nft_tproxy_validate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
const struct nft_data **data)
|
||||
{
|
||||
return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
|
||||
}
|
||||
|
||||
static struct nft_expr_type nft_tproxy_type;
|
||||
static const struct nft_expr_ops nft_tproxy_ops = {
|
||||
.type = &nft_tproxy_type,
|
||||
@ -296,6 +303,7 @@ static const struct nft_expr_ops nft_tproxy_ops = {
|
||||
.eval = nft_tproxy_eval,
|
||||
.init = nft_tproxy_init,
|
||||
.dump = nft_tproxy_dump,
|
||||
.validate = nft_tproxy_validate,
|
||||
};
|
||||
|
||||
static struct nft_expr_type nft_tproxy_type __read_mostly = {
|
||||
|
@ -450,7 +450,7 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
|
||||
|
||||
err = attach_meter(meter_tbl, meter);
|
||||
if (err)
|
||||
goto exit_unlock;
|
||||
goto exit_free_old_meter;
|
||||
|
||||
ovs_unlock();
|
||||
|
||||
@ -473,6 +473,8 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
|
||||
genlmsg_end(reply, ovs_reply_header);
|
||||
return genlmsg_reply(reply, info);
|
||||
|
||||
exit_free_old_meter:
|
||||
ovs_meter_free(old_meter);
|
||||
exit_unlock:
|
||||
ovs_unlock();
|
||||
nlmsg_free(reply);
|
||||
|
@ -487,6 +487,12 @@ static int rose_listen(struct socket *sock, int backlog)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
|
||||
lock_sock(sk);
|
||||
if (sock->state != SS_UNCONNECTED) {
|
||||
release_sock(sk);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sk->sk_state != TCP_LISTEN) {
|
||||
struct rose_sock *rose = rose_sk(sk);
|
||||
|
||||
@ -496,8 +502,10 @@ static int rose_listen(struct socket *sock, int backlog)
|
||||
memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
|
||||
sk->sk_max_ack_backlog = backlog;
|
||||
sk->sk_state = TCP_LISTEN;
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
}
|
||||
release_sock(sk);
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
|
||||
ret = tcf_idr_check_alloc(tn, &index, act, bind);
|
||||
if (!ret) {
|
||||
ret = tcf_idr_create(tn, index, est, act,
|
||||
&act_bpf_ops, bind, true, 0);
|
||||
&act_bpf_ops, bind, true, flags);
|
||||
if (ret < 0) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
return ret;
|
||||
|
@ -124,7 +124,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
|
||||
ret = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||
if (!ret) {
|
||||
ret = tcf_idr_create(tn, index, est, a,
|
||||
&act_connmark_ops, bind, false, 0);
|
||||
&act_connmark_ops, bind, false, flags);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
return ret;
|
||||
|
@ -92,7 +92,7 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
cp = rcu_dereference_bh(ca->params);
|
||||
|
||||
tcf_lastuse_update(&ca->tcf_tm);
|
||||
bstats_update(&ca->tcf_bstats, skb);
|
||||
tcf_action_update_bstats(&ca->common, skb);
|
||||
action = READ_ONCE(ca->tcf_action);
|
||||
|
||||
wlen = skb_network_offset(skb);
|
||||
@ -211,8 +211,8 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
|
||||
index = actparm->index;
|
||||
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||
if (!err) {
|
||||
ret = tcf_idr_create(tn, index, est, a,
|
||||
&act_ctinfo_ops, bind, false, 0);
|
||||
ret = tcf_idr_create_from_flags(tn, index, est, a,
|
||||
&act_ctinfo_ops, bind, flags);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
return ret;
|
||||
|
@ -357,7 +357,7 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
if (!err) {
|
||||
ret = tcf_idr_create(tn, index, est, a,
|
||||
&act_gate_ops, bind, false, 0);
|
||||
&act_gate_ops, bind, false, flags);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
return ret;
|
||||
|
@ -553,7 +553,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
if (!exists) {
|
||||
ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
|
||||
bind, true, 0);
|
||||
bind, true, flags);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
kfree(p);
|
||||
|
@ -144,7 +144,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
|
||||
|
||||
if (!exists) {
|
||||
ret = tcf_idr_create(tn, index, est, a, ops, bind,
|
||||
false, 0);
|
||||
false, flags);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
return ret;
|
||||
|
@ -254,7 +254,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
if (!exists) {
|
||||
ret = tcf_idr_create(tn, index, est, a,
|
||||
&act_mpls_ops, bind, true, 0);
|
||||
&act_mpls_ops, bind, true, flags);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
return ret;
|
||||
|
@ -61,7 +61,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
|
||||
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||
if (!err) {
|
||||
ret = tcf_idr_create(tn, index, est, a,
|
||||
&act_nat_ops, bind, false, 0);
|
||||
&act_nat_ops, bind, false, flags);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
return ret;
|
||||
|
@ -189,7 +189,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
|
||||
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||
if (!err) {
|
||||
ret = tcf_idr_create(tn, index, est, a,
|
||||
&act_pedit_ops, bind, false, 0);
|
||||
&act_pedit_ops, bind, false, flags);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
goto out_free;
|
||||
|
@ -87,7 +87,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
if (!exists) {
|
||||
ret = tcf_idr_create(tn, index, NULL, a,
|
||||
&act_police_ops, bind, true, 0);
|
||||
&act_police_ops, bind, true, flags);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
return ret;
|
||||
|
@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
if (!exists) {
|
||||
ret = tcf_idr_create(tn, index, est, a,
|
||||
&act_sample_ops, bind, true, 0);
|
||||
&act_sample_ops, bind, true, flags);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
return ret;
|
||||
|
@ -128,7 +128,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
if (!exists) {
|
||||
ret = tcf_idr_create(tn, index, est, a,
|
||||
&act_simp_ops, bind, false, 0);
|
||||
&act_simp_ops, bind, false, flags);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
return ret;
|
||||
|
@ -176,7 +176,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
if (!exists) {
|
||||
ret = tcf_idr_create(tn, index, est, a,
|
||||
&act_skbedit_ops, bind, true, 0);
|
||||
&act_skbedit_ops, bind, true, act_flags);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
return ret;
|
||||
|
@ -147,7 +147,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
if (!exists) {
|
||||
ret = tcf_idr_create(tn, index, est, a,
|
||||
&act_skbmod_ops, bind, true, 0);
|
||||
&act_skbmod_ops, bind, true, flags);
|
||||
if (ret) {
|
||||
tcf_idr_cleanup(tn, index);
|
||||
return ret;
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <net/act_api.h>
|
||||
#include <net/netlink.h>
|
||||
#include <net/pkt_cls.h>
|
||||
@ -338,6 +339,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
||||
struct tcf_result cr = {};
|
||||
int err, balloc = 0;
|
||||
struct tcf_exts e;
|
||||
bool update_h = false;
|
||||
|
||||
err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
|
||||
if (err < 0)
|
||||
@ -455,10 +457,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
||||
}
|
||||
}
|
||||
|
||||
if (cp->perfect)
|
||||
if (cp->perfect) {
|
||||
r = cp->perfect + handle;
|
||||
else
|
||||
r = tcindex_lookup(cp, handle) ? : &new_filter_result;
|
||||
} else {
|
||||
/* imperfect area is updated in-place using rcu */
|
||||
update_h = !!tcindex_lookup(cp, handle);
|
||||
r = &new_filter_result;
|
||||
}
|
||||
|
||||
if (r == &new_filter_result) {
|
||||
f = kzalloc(sizeof(*f), GFP_KERNEL);
|
||||
@ -484,7 +489,28 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
||||
|
||||
rcu_assign_pointer(tp->root, cp);
|
||||
|
||||
if (r == &new_filter_result) {
|
||||
if (update_h) {
|
||||
struct tcindex_filter __rcu **fp;
|
||||
struct tcindex_filter *cf;
|
||||
|
||||
f->result.res = r->res;
|
||||
tcf_exts_change(&f->result.exts, &r->exts);
|
||||
|
||||
/* imperfect area bucket */
|
||||
fp = cp->h + (handle % cp->hash);
|
||||
|
||||
/* lookup the filter, guaranteed to exist */
|
||||
for (cf = rcu_dereference_bh_rtnl(*fp); cf;
|
||||
fp = &cf->next, cf = rcu_dereference_bh_rtnl(*fp))
|
||||
if (cf->key == (u16)handle)
|
||||
break;
|
||||
|
||||
f->next = cf->next;
|
||||
|
||||
cf = rcu_replace_pointer(*fp, f, 1);
|
||||
tcf_exts_get_net(&cf->result.exts);
|
||||
tcf_queue_work(&cf->rwork, tcindex_destroy_fexts_work);
|
||||
} else if (r == &new_filter_result) {
|
||||
struct tcindex_filter *nfp;
|
||||
struct tcindex_filter __rcu **fp;
|
||||
|
||||
|
@ -405,7 +405,10 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
|
||||
while (cl->cmode == HTB_MAY_BORROW && p && mask) {
|
||||
m = mask;
|
||||
while (m) {
|
||||
int prio = ffz(~m);
|
||||
unsigned int prio = ffz(~m);
|
||||
|
||||
if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
|
||||
break;
|
||||
m &= ~(1 << prio);
|
||||
|
||||
if (p->inner.clprio[prio].feed.rb_node)
|
||||
|
@ -349,11 +349,9 @@ static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp
|
||||
struct sctp_comm_param *commp = p;
|
||||
struct sock *sk = ep->base.sk;
|
||||
const struct inet_diag_req_v2 *r = commp->r;
|
||||
struct sctp_association *assoc =
|
||||
list_entry(ep->asocs.next, struct sctp_association, asocs);
|
||||
|
||||
/* find the ep only once through the transports by this condition */
|
||||
if (tsp->asoc != assoc)
|
||||
if (!list_is_first(&tsp->asoc->asocs, &ep->asocs))
|
||||
return 0;
|
||||
|
||||
if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
|
||||
|
@ -143,6 +143,7 @@ static int hda_codec_driver_probe(struct device *dev)
|
||||
|
||||
error:
|
||||
snd_hda_codec_cleanup_for_unbind(codec);
|
||||
codec->preset = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -159,6 +160,7 @@ static int hda_codec_driver_remove(struct device *dev)
|
||||
if (codec->patch_ops.free)
|
||||
codec->patch_ops.free(codec);
|
||||
snd_hda_codec_cleanup_for_unbind(codec);
|
||||
codec->preset = NULL;
|
||||
module_put(dev->driver->owner);
|
||||
return 0;
|
||||
}
|
||||
|
@ -784,7 +784,6 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
|
||||
snd_array_free(&codec->cvt_setups);
|
||||
snd_array_free(&codec->spdif_out);
|
||||
snd_array_free(&codec->verbs);
|
||||
codec->preset = NULL;
|
||||
codec->follower_dig_outs = NULL;
|
||||
codec->spdif_status_reset = 0;
|
||||
snd_array_free(&codec->mixers);
|
||||
|
@ -1117,6 +1117,7 @@ static const struct hda_device_id snd_hda_id_conexant[] = {
|
||||
HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
|
||||
HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
|
||||
HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto),
|
||||
HDA_CODEC_ENTRY(0x14f120d1, "SN6180", patch_conexant_auto),
|
||||
HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
|
||||
HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
|
||||
HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
|
||||
|
@ -827,7 +827,7 @@ static int alc_subsystem_id(struct hda_codec *codec, const hda_nid_t *ports)
|
||||
alc_setup_gpio(codec, 0x02);
|
||||
break;
|
||||
case 7:
|
||||
alc_setup_gpio(codec, 0x03);
|
||||
alc_setup_gpio(codec, 0x04);
|
||||
break;
|
||||
case 5:
|
||||
default:
|
||||
|
@ -1193,18 +1193,12 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
|
||||
if (pdata) {
|
||||
cs42l56->pdata = *pdata;
|
||||
} else {
|
||||
pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
|
||||
GFP_KERNEL);
|
||||
if (!pdata)
|
||||
return -ENOMEM;
|
||||
|
||||
if (i2c_client->dev.of_node) {
|
||||
ret = cs42l56_handle_of_data(i2c_client,
|
||||
&cs42l56->pdata);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
cs42l56->pdata = *pdata;
|
||||
}
|
||||
|
||||
if (cs42l56->pdata.gpio_nreset) {
|
||||
|
@ -704,6 +704,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
|
||||
links[id].num_platforms = ARRAY_SIZE(platform_component);
|
||||
links[id].nonatomic = true;
|
||||
links[id].dpcm_playback = 1;
|
||||
/* feedback stream or firmware-generated echo reference */
|
||||
links[id].dpcm_capture = 1;
|
||||
|
||||
links[id].no_pcm = 1;
|
||||
links[id].cpus = &cpus[id];
|
||||
links[id].num_cpus = 1;
|
||||
|
@ -212,6 +212,10 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream,
|
||||
int stream_tag;
|
||||
int ret;
|
||||
|
||||
link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
|
||||
if (!link)
|
||||
return -EINVAL;
|
||||
|
||||
/* get stored dma data if resuming from system suspend */
|
||||
link_dev = snd_soc_dai_get_dma_data(dai, substream);
|
||||
if (!link_dev) {
|
||||
@ -232,10 +236,6 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
|
||||
if (!link)
|
||||
return -EINVAL;
|
||||
|
||||
/* set the stream tag in the codec dai dma params */
|
||||
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
|
||||
snd_soc_dai_set_tdm_slot(codec_dai, stream_tag, 0, 0, 0);
|
||||
|
@ -154,3 +154,39 @@
|
||||
.result_unpriv = ACCEPT,
|
||||
.insn_processed = 15,
|
||||
},
|
||||
/* The test performs a conditional 64-bit write to a stack location
|
||||
* fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
|
||||
* then data is read from fp[-8]. This sequence is unsafe.
|
||||
*
|
||||
* The test would be mistakenly marked as safe w/o dst register parent
|
||||
* preservation in verifier.c:copy_register_state() function.
|
||||
*
|
||||
* Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
|
||||
* checkpoint state after conditional 64-bit assignment.
|
||||
*/
|
||||
{
|
||||
"write tracking and register parent chain bug",
|
||||
.insns = {
|
||||
/* r6 = ktime_get_ns() */
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
/* r0 = ktime_get_ns() */
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
/* if r0 > r6 goto +1 */
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1),
|
||||
/* *(u64 *)(r10 - 8) = 0xdeadbeef */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef),
|
||||
/* r1 = 42 */
|
||||
BPF_MOV64_IMM(BPF_REG_1, 42),
|
||||
/* *(u8 *)(r10 - 8) = r1 */
|
||||
BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8),
|
||||
/* r2 = *(u64 *)(r10 - 8) */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8),
|
||||
/* exit(0) */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.flags = BPF_F_TEST_STATE_FREQ,
|
||||
.errstr = "invalid read from stack off -8+1 size 8",
|
||||
.result = REJECT,
|
||||
},
|
||||
|
@ -1,11 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef BUG_H
|
||||
#define BUG_H
|
||||
#ifndef _LINUX_BUG_H
|
||||
#define _LINUX_BUG_H
|
||||
|
||||
#define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond))
|
||||
|
||||
#define BUILD_BUG_ON(x)
|
||||
|
||||
#define BUG() abort()
|
||||
|
||||
#endif /* BUG_H */
|
||||
#endif /* _LINUX_BUG_H */
|
||||
|
7
tools/virtio/linux/build_bug.h
Normal file
7
tools/virtio/linux/build_bug.h
Normal file
@ -0,0 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_BUILD_BUG_H
|
||||
#define _LINUX_BUILD_BUG_H
|
||||
|
||||
#define BUILD_BUG_ON(x)
|
||||
|
||||
#endif /* _LINUX_BUILD_BUG_H */
|
7
tools/virtio/linux/cpumask.h
Normal file
7
tools/virtio/linux/cpumask.h
Normal file
@ -0,0 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_CPUMASK_H
|
||||
#define _LINUX_CPUMASK_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#endif /* _LINUX_CPUMASK_H */
|
7
tools/virtio/linux/gfp.h
Normal file
7
tools/virtio/linux/gfp.h
Normal file
@ -0,0 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LINUX_GFP_H
|
||||
#define __LINUX_GFP_H
|
||||
|
||||
#include <linux/topology.h>
|
||||
|
||||
#endif
|
@ -10,6 +10,7 @@
|
||||
#include <stdarg.h>
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/printk.h>
|
||||
|
12
tools/virtio/linux/kmsan.h
Normal file
12
tools/virtio/linux/kmsan.h
Normal file
@ -0,0 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_KMSAN_H
|
||||
#define _LINUX_KMSAN_H
|
||||
|
||||
#include <linux/gfp.h>
|
||||
|
||||
inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* _LINUX_KMSAN_H */
|
@ -2,6 +2,7 @@
|
||||
#ifndef SCATTERLIST_H
|
||||
#define SCATTERLIST_H
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
struct scatterlist {
|
||||
unsigned long page_link;
|
||||
|
7
tools/virtio/linux/topology.h
Normal file
7
tools/virtio/linux/topology.h
Normal file
@ -0,0 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_TOPOLOGY_H
|
||||
#define _LINUX_TOPOLOGY_H
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
#endif /* _LINUX_TOPOLOGY_H */
|
Loading…
Reference in New Issue
Block a user