Merge 5.10.212 into android12-5.10-lts

Changes in 5.10.212
	platform/x86: touchscreen_dmi: Allow partial (prefix) matches for ACPI names
	crypto: virtio/akcipher - Fix stack overflow on memcpy
	mtd: spinand: gigadevice: Support GD5F1GQ5UExxG
	mtd: spinand: gigadevice: Fix the get ecc status issue
	netlink: Fix kernel-infoleak-after-free in __skb_datagram_iter
	net: ip_tunnel: prevent perpetual headroom growth
	tun: Fix xdp_rxq_info's queue_index when detaching
	ipv6: fix potential "struct net" leak in inet6_rtm_getaddr()
	lan78xx: enable auto speed configuration for LAN7850 if no EEPROM is detected
	net: usb: dm9601: fix wrong return value in dm9601_mdio_read
	Bluetooth: Avoid potential use-after-free in hci_error_reset
	Bluetooth: hci_event: Fix wrongly recorded wakeup BD_ADDR
	Bluetooth: hci_event: Fix handling of HCI_EV_IO_CAPA_REQUEST
	Bluetooth: Enforce validation on max value of connection interval
	netfilter: nf_tables: allow NFPROTO_INET in nft_(match/target)_validate()
	rtnetlink: fix error logic of IFLA_BRIDGE_FLAGS writing back
	efi/capsule-loader: fix incorrect allocation size
	power: supply: bq27xxx-i2c: Do not free non existing IRQ
	ALSA: Drop leftover snd-rtctimer stuff from Makefile
	afs: Fix endless loop in directory parsing
	riscv: Sparse-Memory/vmemmap out-of-bounds fix
	tomoyo: fix UAF write bug in tomoyo_write_control()
	gtp: fix use-after-free and null-ptr-deref in gtp_newlink()
	wifi: nl80211: reject iftype change with mesh ID change
	btrfs: dev-replace: properly validate device names
	dmaengine: fsl-qdma: fix SoC may hang on 16 byte unaligned read
	dmaengine: fsl-qdma: init irq after reg initialization
	mmc: core: Fix eMMC initialization with 1-bit bus connection
	mmc: sdhci-xenon: add timeout for PHY init complete
	mmc: sdhci-xenon: fix PHY init clock stability
	pmdomain: qcom: rpmhpd: Fix enabled_corner aggregation
	x86/cpu/intel: Detect TME keyid bits before setting MTRR mask registers
	mptcp: fix possible deadlock in subflow diag
	ext4: avoid bb_free and bb_fragments inconsistency in mb_free_blocks()
	cachefiles: fix memory leak in cachefiles_add_cache()
	fs,hugetlb: fix NULL pointer dereference in hugetlbs_fill_super
	gpio: 74x164: Enable output pins after registers are reset
	gpiolib: Fix the error path order in gpiochip_add_data_with_key()
	gpio: fix resource unwinding order in error path
	mtd: spinand: gigadevice: fix Quad IO for GD5F1GQ5UExxG
	mptcp: fix double-free on socket dismantle
	Linux 5.10.212

Change-Id: I680869be06e0ddfdbd9f63255616ba316f655cb1
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-04-12 12:29:17 +00:00
commit 52795b4903
36 changed files with 429 additions and 195 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 211
SUBLEVEL = 212
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -44,7 +44,7 @@
* Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
* is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
*/
#define vmemmap ((struct page *)VMEMMAP_START)
#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
#define PCI_IO_SIZE SZ_16M
#define PCI_IO_END VMEMMAP_START

View File

@ -178,6 +178,90 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
return false;
}
#define MSR_IA32_TME_ACTIVATE 0x982
/* Helpers to access TME_ACTIVATE MSR */
#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
#define TME_ACTIVATE_POLICY_AES_XTS_128 0
#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
/* Values for mktme_status (SW only construct) */
#define MKTME_ENABLED 0
#define MKTME_DISABLED 1
#define MKTME_UNINITIALIZED 2
static int mktme_status = MKTME_UNINITIALIZED;
static void detect_tme_early(struct cpuinfo_x86 *c)
{
u64 tme_activate, tme_policy, tme_crypto_algs;
int keyid_bits = 0, nr_keyids = 0;
static u64 tme_activate_cpu0 = 0;
rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
if (mktme_status != MKTME_UNINITIALIZED) {
if (tme_activate != tme_activate_cpu0) {
/* Broken BIOS? */
pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
pr_err_once("x86/tme: MKTME is not usable\n");
mktme_status = MKTME_DISABLED;
/* Proceed. We may need to exclude bits from x86_phys_bits. */
}
} else {
tme_activate_cpu0 = tme_activate;
}
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
pr_info_once("x86/tme: not enabled by BIOS\n");
mktme_status = MKTME_DISABLED;
return;
}
if (mktme_status != MKTME_UNINITIALIZED)
goto detect_keyid_bits;
pr_info("x86/tme: enabled by BIOS\n");
tme_policy = TME_ACTIVATE_POLICY(tme_activate);
if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
tme_crypto_algs);
mktme_status = MKTME_DISABLED;
}
detect_keyid_bits:
keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
nr_keyids = (1UL << keyid_bits) - 1;
if (nr_keyids) {
pr_info_once("x86/mktme: enabled by BIOS\n");
pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
} else {
pr_info_once("x86/mktme: disabled by BIOS\n");
}
if (mktme_status == MKTME_UNINITIALIZED) {
/* MKTME is usable */
mktme_status = MKTME_ENABLED;
}
/*
* KeyID bits effectively lower the number of physical address
* bits. Update cpuinfo_x86::x86_phys_bits accordingly.
*/
c->x86_phys_bits -= keyid_bits;
}
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
@ -329,6 +413,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
*/
if (detect_extended_topology_early(c) < 0)
detect_ht_early(c);
/*
* Adjust the number of physical bits early because it affects the
* valid bits of the MTRR mask registers.
*/
if (cpu_has(c, X86_FEATURE_TME))
detect_tme_early(c);
}
static void bsp_init_intel(struct cpuinfo_x86 *c)
@ -489,90 +580,6 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
#endif
}
#define MSR_IA32_TME_ACTIVATE 0x982
/* Helpers to access TME_ACTIVATE MSR */
#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
#define TME_ACTIVATE_POLICY_AES_XTS_128 0
#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
/* Values for mktme_status (SW only construct) */
#define MKTME_ENABLED 0
#define MKTME_DISABLED 1
#define MKTME_UNINITIALIZED 2
static int mktme_status = MKTME_UNINITIALIZED;
static void detect_tme(struct cpuinfo_x86 *c)
{
u64 tme_activate, tme_policy, tme_crypto_algs;
int keyid_bits = 0, nr_keyids = 0;
static u64 tme_activate_cpu0 = 0;
rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
if (mktme_status != MKTME_UNINITIALIZED) {
if (tme_activate != tme_activate_cpu0) {
/* Broken BIOS? */
pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
pr_err_once("x86/tme: MKTME is not usable\n");
mktme_status = MKTME_DISABLED;
/* Proceed. We may need to exclude bits from x86_phys_bits. */
}
} else {
tme_activate_cpu0 = tme_activate;
}
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
pr_info_once("x86/tme: not enabled by BIOS\n");
mktme_status = MKTME_DISABLED;
return;
}
if (mktme_status != MKTME_UNINITIALIZED)
goto detect_keyid_bits;
pr_info("x86/tme: enabled by BIOS\n");
tme_policy = TME_ACTIVATE_POLICY(tme_activate);
if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
tme_crypto_algs);
mktme_status = MKTME_DISABLED;
}
detect_keyid_bits:
keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
nr_keyids = (1UL << keyid_bits) - 1;
if (nr_keyids) {
pr_info_once("x86/mktme: enabled by BIOS\n");
pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
} else {
pr_info_once("x86/mktme: disabled by BIOS\n");
}
if (mktme_status == MKTME_UNINITIALIZED) {
/* MKTME is usable */
mktme_status = MKTME_ENABLED;
}
/*
* KeyID bits effectively lower the number of physical address
* bits. Update cpuinfo_x86::x86_phys_bits accordingly.
*/
c->x86_phys_bits -= keyid_bits;
}
static void init_cpuid_fault(struct cpuinfo_x86 *c)
{
u64 msr;
@ -708,9 +715,6 @@ static void init_intel(struct cpuinfo_x86 *c)
init_ia32_feat_ctl(c);
if (cpu_has(c, X86_FEATURE_TME))
detect_tme(c);
init_intel_misc_features(c);
if (tsx_ctrl_state == TSX_CTRL_ENABLE)

View File

@ -101,7 +101,8 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
}
static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
struct virtio_crypto_ctrl_header *header, void *para,
struct virtio_crypto_ctrl_header *header,
struct virtio_crypto_akcipher_session_para *para,
const uint8_t *key, unsigned int keylen)
{
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
@ -125,7 +126,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
ctrl = &vc_ctrl_req->ctrl;
memcpy(&ctrl->header, header, sizeof(ctrl->header));
memcpy(&ctrl->u, para, sizeof(ctrl->u));
memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para));
input = &vc_ctrl_req->input;
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);

View File

@ -109,6 +109,7 @@
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
#define FSL_QDMA_CMD_DSEN_OFFSET 19
#define FSL_QDMA_CMD_LWC_OFFSET 16
#define FSL_QDMA_CMD_PF BIT(17)
/* Field definition for Descriptor status */
#define QDMA_CCDF_STATUS_RTE BIT(5)
@ -384,7 +385,8 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
FSL_QDMA_CMD_RWTTYPE_OFFSET);
FSL_QDMA_CMD_RWTTYPE_OFFSET) |
FSL_QDMA_CMD_PF;
sdf->data = QDMA_SDDF_CMD(cmd);
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
@ -1201,10 +1203,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->queue)
return -ENOMEM;
ret = fsl_qdma_irq_init(pdev, fsl_qdma);
if (ret)
return ret;
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
if (fsl_qdma->irq_base < 0)
return fsl_qdma->irq_base;
@ -1243,19 +1241,22 @@ static int fsl_qdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsl_qdma);
ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) {
dev_err(&pdev->dev,
"Can't register NXP Layerscape qDMA engine.\n");
return ret;
}
ret = fsl_qdma_reg_init(fsl_qdma);
if (ret) {
dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
return ret;
}
ret = fsl_qdma_irq_init(pdev, fsl_qdma);
if (ret)
return ret;
ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) {
dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
return ret;
}
return 0;
}

View File

@ -292,7 +292,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
if (!cap_info->phys) {
kfree(cap_info->pages);
kfree(cap_info);

View File

@ -127,8 +127,6 @@ static int gen_74x164_probe(struct spi_device *spi)
if (IS_ERR(chip->gpiod_oe))
return PTR_ERR(chip->gpiod_oe);
gpiod_set_value_cansleep(chip->gpiod_oe, 1);
spi_set_drvdata(spi, chip);
chip->gpio_chip.label = spi->modalias;
@ -153,6 +151,8 @@ static int gen_74x164_probe(struct spi_device *spi)
goto exit_destroy;
}
gpiod_set_value_cansleep(chip->gpiod_oe, 1);
ret = gpiochip_add_data(&chip->gpio_chip, chip);
if (!ret)
return 0;

View File

@ -733,11 +733,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiochip_irqchip_init_valid_mask(gc);
if (ret)
goto err_remove_acpi_chip;
goto err_free_hogs;
ret = gpiochip_irqchip_init_hw(gc);
if (ret)
goto err_remove_acpi_chip;
goto err_remove_irqchip_mask;
ret = gpiochip_add_irqchip(gc, lock_key, request_key);
if (ret)
@ -762,13 +762,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gpiochip_irqchip_remove(gc);
err_remove_irqchip_mask:
gpiochip_irqchip_free_valid_mask(gc);
err_remove_acpi_chip:
acpi_gpiochip_remove(gc);
err_remove_of_chip:
err_free_hogs:
gpiochip_free_hogs(gc);
acpi_gpiochip_remove(gc);
gpiochip_remove_pin_ranges(gc);
err_remove_of_chip:
of_gpiochip_remove(gc);
err_free_gpiochip_mask:
gpiochip_remove_pin_ranges(gc);
gpiochip_free_valid_mask(gc);
err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);

View File

@ -997,10 +997,12 @@ int mmc_select_bus_width(struct mmc_card *card)
static unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_8,
EXT_CSD_BUS_WIDTH_4,
EXT_CSD_BUS_WIDTH_1,
};
static unsigned bus_widths[] = {
MMC_BUS_WIDTH_8,
MMC_BUS_WIDTH_4,
MMC_BUS_WIDTH_1,
};
struct mmc_host *host = card->host;
unsigned idx, bus_width = 0;

View File

@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ktime.h>
#include <linux/iopoll.h>
#include <linux/of_address.h>
#include "sdhci-pltfm.h"
@ -109,6 +110,8 @@
#define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18)
#define XENON_LOGIC_TIMING_VALUE 0x00AA8977
#define XENON_MAX_PHY_TIMEOUT_LOOPS 100
/*
* List offset of PHY registers and some special register values
* in eMMC PHY 5.0 or eMMC PHY 5.1
@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
return 0;
}
static int xenon_check_stability_internal_clk(struct sdhci_host *host)
{
u32 reg;
int err;
err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE,
1100, 20000, false, host, SDHCI_CLOCK_CONTROL);
if (err)
dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n");
return err;
}
/*
* eMMC 5.0/5.1 PHY init/re-init.
* eMMC PHY init should be executed after:
@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
int ret = xenon_check_stability_internal_clk(host);
if (ret)
return ret;
reg = sdhci_readl(host, phy_regs->timing_adj);
reg |= XENON_PHY_INITIALIZAION;
sdhci_writel(host, reg, phy_regs->timing_adj);
@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
/* get the wait time */
wait /= clock;
wait++;
/* wait for host eMMC PHY init completes */
udelay(wait);
reg = sdhci_readl(host, phy_regs->timing_adj);
reg &= XENON_PHY_INITIALIZAION;
if (reg) {
/*
* AC5X spec says bit must be polled until zero.
* We see cases in which timeout can take longer
* than the standard calculation on AC5X, which is
* expected following the spec comment above.
* According to the spec, we must wait as long as
* it takes for that bit to toggle on AC5X.
* Cap that with 100 delay loops so we won't get
* stuck here forever:
*/
ret = read_poll_timeout(sdhci_readl, reg,
!(reg & XENON_PHY_INITIALIZAION),
wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait,
false, host, phy_regs->timing_adj);
if (ret)
dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n",
wait);
return -ETIMEDOUT;
}
wait * XENON_MAX_PHY_TIMEOUT_LOOPS);
return 0;
return ret;
}
#define ARMADA_3700_SOC_PAD_1_8V 0x1

View File

@ -13,7 +13,10 @@
#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
#define GD5FXGQ4UEXXG_REG_STATUS2 0xf0
#define GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS (1 << 4)
#define GD5FXGQ5XE_STATUS_ECC_4_BITFLIPS (3 << 4)
#define GD5FXGQXXEXXG_REG_STATUS2 0xf0
#define GD5FXGQ4UXFXXG_STATUS_ECC_MASK (7 << 4)
#define GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS (0 << 4)
@ -36,6 +39,14 @@ static SPINAND_OP_VARIANTS(read_cache_variants_f,
SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
@ -102,7 +113,7 @@ static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
static int gd5fxgq4_variant2_ooblayout_ecc(struct mtd_info *mtd, int section,
static int gd5fxgqx_variant2_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
@ -114,7 +125,7 @@ static int gd5fxgq4_variant2_ooblayout_ecc(struct mtd_info *mtd, int section,
return 0;
}
static int gd5fxgq4_variant2_ooblayout_free(struct mtd_info *mtd, int section,
static int gd5fxgqx_variant2_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
@ -127,9 +138,10 @@ static int gd5fxgq4_variant2_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
static const struct mtd_ooblayout_ops gd5fxgq4_variant2_ooblayout = {
.ecc = gd5fxgq4_variant2_ooblayout_ecc,
.free = gd5fxgq4_variant2_ooblayout_free,
/* Valid for Q4/Q5 and Q6 (untested) devices */
static const struct mtd_ooblayout_ops gd5fxgqx_variant2_ooblayout = {
.ecc = gd5fxgqx_variant2_ooblayout_ecc,
.free = gd5fxgqx_variant2_ooblayout_free,
};
static int gd5fxgq4xc_ooblayout_256_ecc(struct mtd_info *mtd, int section,
@ -165,8 +177,8 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQ4UEXXG_REG_STATUS2,
&status2);
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@ -187,6 +199,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
* report the maximum of 4 in this case
*/
/* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
status2 = *(spinand->scratchbuf);
return ((status & STATUS_ECC_MASK) >> 2) |
((status2 & STATUS_ECC_MASK) >> 4);
@ -203,6 +216,44 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
return 0;
case GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS:
/*
* Read status2 register to determine a more fine grained
* bit error status
*/
ret = spi_mem_exec_op(spinand->spimem, &op);
if (ret)
return ret;
/*
* 1 ... 4 bits are flipped (and corrected)
*/
/* bits sorted this way (1...0): ECCSE1, ECCSE0 */
status2 = *(spinand->scratchbuf);
return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
default:
break;
}
return -EINVAL;
}
static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
@ -282,7 +333,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F1GQ4UFxxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48),
@ -292,8 +343,18 @@ static const struct spinand_info gigadevice_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4ufxxg_ecc_get_status)),
SPINAND_INFO("GD5F1GQ5UExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x51),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq5xexxg_ecc_get_status)),
};
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {

View File

@ -1406,26 +1406,26 @@ static int __init gtp_init(void)
get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
err = rtnl_link_register(&gtp_link_ops);
err = register_pernet_subsys(&gtp_net_ops);
if (err < 0)
goto error_out;
err = register_pernet_subsys(&gtp_net_ops);
err = rtnl_link_register(&gtp_link_ops);
if (err < 0)
goto unreg_rtnl_link;
goto unreg_pernet_subsys;
err = genl_register_family(&gtp_genl_family);
if (err < 0)
goto unreg_pernet_subsys;
goto unreg_rtnl_link;
pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
sizeof(struct pdp_ctx));
return 0;
unreg_pernet_subsys:
unregister_pernet_subsys(&gtp_net_ops);
unreg_rtnl_link:
rtnl_link_unregister(&gtp_link_ops);
unreg_pernet_subsys:
unregister_pernet_subsys(&gtp_net_ops);
error_out:
pr_err("error loading GTP module loaded\n");
return err;

View File

@ -665,6 +665,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->tfiles[tun->numqueues - 1]);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
ntfile->xdp_rxq.queue_index = index;
rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
NULL);

View File

@ -231,7 +231,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
err = dm_read_shared_word(dev, 1, loc, &res);
if (err < 0) {
netdev_err(dev->net, "MDIO read error: %d\n", err);
return err;
return 0;
}
netdev_dbg(dev->net,

View File

@ -2527,7 +2527,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
if (dev->chipid == ID_REV_CHIP_ID_7801_)
buf &= ~MAC_CR_GMII_EN_;
if (dev->chipid == ID_REV_CHIP_ID_7800_) {
if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
dev->chipid == ID_REV_CHIP_ID_7850_) {
ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
if (!ret && sig != EEPROM_INDICATOR) {
/* Implies there is no external eeprom. Set mac speed */

View File

@ -50,7 +50,7 @@ static const struct property_entry chuwi_hi8_air_props[] = {
};
static const struct ts_dmi_data chuwi_hi8_air_data = {
.acpi_name = "MSSL1680:00",
.acpi_name = "MSSL1680",
.properties = chuwi_hi8_air_props,
};
@ -1648,7 +1648,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
int error;
if (has_acpi_companion(dev) &&
!strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
strstarts(client->name, ts_data->acpi_name)) {
error = device_add_properties(dev, ts_data->properties);
if (error)
dev_err(dev, "failed to add properties: %d\n", error);

View File

@ -209,7 +209,9 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
{
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
free_irq(client->irq, di);
if (client->irq)
free_irq(client->irq, di);
bq27xxx_battery_teardown(di);
mutex_lock(&battery_mutex);

View File

@ -261,12 +261,15 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
unsigned int active_corner, sleep_corner;
unsigned int this_active_corner = 0, this_sleep_corner = 0;
unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
unsigned int peer_enabled_corner;
to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
if (peer && peer->enabled)
to_active_sleep(peer, peer->corner, &peer_active_corner,
if (peer && peer->enabled) {
peer_enabled_corner = max(peer->corner, peer->enable_corner);
to_active_sleep(peer, peer_enabled_corner, &peer_active_corner,
&peer_sleep_corner);
}
active_corner = max(this_active_corner, peer_active_corner);

View File

@ -423,8 +423,10 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
dire->u.name[0] == '.' &&
ctx->actor != afs_lookup_filldir &&
ctx->actor != afs_lookup_one_filldir &&
memcmp(dire->u.name, ".__afs", 6) == 0)
memcmp(dire->u.name, ".__afs", 6) == 0) {
ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
continue;
}
/* found the next entry */
if (!dir_emit(ctx, dire->u.name, nlen,

View File

@ -566,6 +566,23 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
return ret;
}
static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args)
{
if (args->start.srcdevid == 0) {
if (memchr(args->start.srcdev_name, 0,
sizeof(args->start.srcdev_name)) == NULL)
return -ENAMETOOLONG;
} else {
args->start.srcdev_name[0] = 0;
}
if (memchr(args->start.tgtdev_name, 0,
sizeof(args->start.tgtdev_name)) == NULL)
return -ENAMETOOLONG;
return 0;
}
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args)
{
@ -578,10 +595,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
default:
return -EINVAL;
}
if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') ||
args->start.tgtdev_name[0] == '\0')
return -EINVAL;
ret = btrfs_check_replace_dev_names(args);
if (ret < 0)
return ret;
ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
args->start.srcdevid,

View File

@ -245,6 +245,8 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
kmem_cache_free(cachefiles_object_jar, fsdef);
error_root_object:
cachefiles_end_secure(cache, saved_cred);
put_cred(cache->cache_cred);
cache->cache_cred = NULL;
pr_err("Failed to register: %d\n", ret);
return ret;
}
@ -265,6 +267,7 @@ void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
dput(cache->graveyard);
mntput(cache->mnt);
put_cred(cache->cache_cred);
kfree(cache->rootdirname);
kfree(cache->secctx);

View File

@ -1494,11 +1494,6 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
mb_check_buddy(e4b);
mb_free_blocks_double(inode, e4b, first, count);
this_cpu_inc(discard_pa_seq);
e4b->bd_info->bb_free += count;
if (first < e4b->bd_info->bb_first_free)
e4b->bd_info->bb_first_free = first;
/* access memory sequentially: check left neighbour,
* clear range and then check right neighbour
*/
@ -1512,23 +1507,31 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t blocknr;
/*
* Fastcommit replay can free already freed blocks which
* corrupts allocation info. Regenerate it.
*/
if (sbi->s_mount_state & EXT4_FC_REPLAY) {
mb_regenerate_buddy(e4b);
goto check;
}
blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += EXT4_C2B(sbi, block);
if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
ext4_grp_locked_error(sb, e4b->bd_group,
inode ? inode->i_ino : 0,
blocknr,
"freeing already freed block (bit %u); block bitmap corrupt.",
block);
ext4_mark_group_bitmap_corrupted(
sb, e4b->bd_group,
ext4_grp_locked_error(sb, e4b->bd_group,
inode ? inode->i_ino : 0, blocknr,
"freeing already freed block (bit %u); block bitmap corrupt.",
block);
ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
EXT4_GROUP_INFO_BBITMAP_CORRUPT);
} else {
mb_regenerate_buddy(e4b);
}
goto done;
return;
}
this_cpu_inc(discard_pa_seq);
e4b->bd_info->bb_free += count;
if (first < e4b->bd_info->bb_first_free)
e4b->bd_info->bb_first_free = first;
/* let's maintain fragments counter */
if (left_is_free && right_is_free)
e4b->bd_info->bb_fragments--;
@ -1553,8 +1556,8 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
if (first <= last)
mb_buddy_mark_free(e4b, first >> 1, last >> 1);
done:
mb_set_largest_free_order(sb, e4b->bd_info);
check:
mb_check_buddy(e4b);
}

View File

@ -1234,6 +1234,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
{
struct hugetlbfs_fs_context *ctx = fc->fs_private;
struct fs_parse_result result;
struct hstate *h;
char *rest;
unsigned long ps;
int opt;
@ -1278,11 +1279,12 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
case Opt_pagesize:
ps = memparse(param->string, &rest);
ctx->hstate = size_to_hstate(ps);
if (!ctx->hstate) {
h = size_to_hstate(ps);
if (!h) {
pr_err("Unsupported page size %lu MB\n", ps >> 20);
return -EINVAL;
}
ctx->hstate = h;
return 0;
case Opt_min_size:

View File

@ -2318,6 +2318,7 @@ static void hci_error_reset(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
hci_dev_hold(hdev);
BT_DBG("%s", hdev->name);
if (hdev->hw_error)
@ -2325,10 +2326,10 @@ static void hci_error_reset(struct work_struct *work)
else
bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
if (hci_dev_do_close(hdev))
return;
if (!hci_dev_do_close(hdev))
hci_dev_do_open(hdev);
hci_dev_do_open(hdev);
hci_dev_put(hdev);
}
void hci_uuids_clear(struct hci_dev *hdev)

View File

@ -4609,9 +4609,12 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn || !hci_conn_ssp_enabled(conn))
if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
goto unlock;
/* Assume remote supports SSP since it has triggered this event */
set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
hci_conn_hold(conn);
if (!hci_dev_test_flag(hdev, HCI_MGMT))
@ -5919,6 +5922,10 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_UNKNOWN_CONN_ID);
if (max > hcon->le_conn_max_interval)
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_INVALID_LL_PARAMS);
if (hci_check_conn_params(min, max, latency, timeout))
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_INVALID_LL_PARAMS);
@ -6136,10 +6143,10 @@ static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
* keep track of the bdaddr of the connection event that woke us up.
*/
if (event == HCI_EV_CONN_REQUEST) {
bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
bacpy(&hdev->wake_addr, &conn_request->bdaddr);
hdev->wake_addr_type = BDADDR_BREDR;
} else if (event == HCI_EV_CONN_COMPLETE) {
bacpy(&hdev->wake_addr, &conn_request->bdaddr);
bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
hdev->wake_addr_type = BDADDR_BREDR;
} else if (event == HCI_EV_LE_META) {
struct hci_ev_le_meta *le_ev = (void *)skb->data;

View File

@ -5609,7 +5609,13 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
memset(&rsp, 0, sizeof(rsp));
err = hci_check_conn_params(min, max, latency, to_multiplier);
if (max > hcon->le_conn_max_interval) {
BT_DBG("requested connection interval exceeds current bounds.");
err = -EINVAL;
} else {
err = hci_check_conn_params(min, max, latency, to_multiplier);
}
if (err)
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else

View File

@ -4903,10 +4903,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net *net = sock_net(skb->sk);
struct ifinfomsg *ifm;
struct net_device *dev;
struct nlattr *br_spec, *attr = NULL;
struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
int rem, err = -EOPNOTSUPP;
u16 flags = 0;
bool have_flags = false;
if (nlmsg_len(nlh) < sizeof(*ifm))
return -EINVAL;
@ -4924,11 +4923,11 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
if (nla_len(attr) < sizeof(flags))
return -EINVAL;
have_flags = true;
br_flags_attr = attr;
flags = nla_get_u16(attr);
}
@ -4972,8 +4971,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
}
}
if (have_flags)
memcpy(nla_data(attr), &flags, sizeof(flags));
if (br_flags_attr)
memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
out:
return err;
}

View File

@ -540,6 +540,20 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
return 0;
}
static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom)
{
/* we must cap headroom to some upperlimit, else pskb_expand_head
* will overflow header offsets in skb_headers_offset_update().
*/
static const unsigned int max_allowed = 512;
if (headroom > max_allowed)
headroom = max_allowed;
if (headroom > READ_ONCE(dev->needed_headroom))
WRITE_ONCE(dev->needed_headroom, headroom);
}
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
u8 proto, int tunnel_hlen)
{
@ -613,13 +627,13 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
if (headroom > READ_ONCE(dev->needed_headroom))
WRITE_ONCE(dev->needed_headroom, headroom);
if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
if (skb_cow_head(skb, headroom)) {
ip_rt_put(rt);
goto tx_dropped;
}
ip_tunnel_adj_headroom(dev, headroom);
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
df, !net_eq(tunnel->net, dev_net(dev)));
return;
@ -797,16 +811,16 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
if (max_headroom > READ_ONCE(dev->needed_headroom))
WRITE_ONCE(dev->needed_headroom, max_headroom);
if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
if (skb_cow_head(skb, max_headroom)) {
ip_rt_put(rt);
dev->stats.tx_dropped++;
kfree_skb(skb);
return;
}
ip_tunnel_adj_headroom(dev, max_headroom);
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
df, !net_eq(tunnel->net, dev_net(dev)));
return;

View File

@ -5465,9 +5465,10 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
}
addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
if (!addr)
return -EINVAL;
if (!addr) {
err = -EINVAL;
goto errout;
}
ifm = nlmsg_data(nlh);
if (ifm->ifa_index)
dev = dev_get_by_index(tgt_net, ifm->ifa_index);

View File

@ -20,6 +20,9 @@ static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
u32 flags = 0;
int err;
if (inet_sk_state_load(sk) == TCP_LISTEN)
return 0;
start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP);
if (!start)
return -EMSGSIZE;

View File

@ -2052,8 +2052,50 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
}
static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk)
{
const struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_txoptions *opt;
struct ipv6_pinfo *newnp;
newnp = inet6_sk(newsk);
rcu_read_lock();
opt = rcu_dereference(np->opt);
if (opt) {
opt = ipv6_dup_options(newsk, opt);
if (!opt)
net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__);
}
RCU_INIT_POINTER(newnp->opt, opt);
rcu_read_unlock();
}
#endif
static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk)
{
struct ip_options_rcu *inet_opt, *newopt = NULL;
const struct inet_sock *inet = inet_sk(sk);
struct inet_sock *newinet;
newinet = inet_sk(newsk);
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt) {
newopt = sock_kmalloc(newsk, sizeof(*inet_opt) +
inet_opt->opt.optlen, GFP_ATOMIC);
if (newopt)
memcpy(newopt, inet_opt, sizeof(*inet_opt) +
inet_opt->opt.optlen);
else
net_warn_ratelimited("%s: Failed to copy ip options\n", __func__);
}
RCU_INIT_POINTER(newinet->inet_opt, newopt);
rcu_read_unlock();
}
struct sock *mptcp_sk_clone(const struct sock *sk,
const struct mptcp_options_received *mp_opt,
struct request_sock *req)
@ -2073,6 +2115,13 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
__mptcp_init_sock(nsk);
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
if (nsk->sk_family == AF_INET6)
mptcp_copy_ip6_options(nsk, sk);
else
#endif
mptcp_copy_ip_options(nsk, sk);
msk = mptcp_sk(nsk);
msk->local_key = subflow_req->local_key;
msk->token = subflow_req->token;

View File

@ -336,10 +336,20 @@ static int nft_target_validate(const struct nft_ctx *ctx,
if (ctx->family != NFPROTO_IPV4 &&
ctx->family != NFPROTO_IPV6 &&
ctx->family != NFPROTO_INET &&
ctx->family != NFPROTO_BRIDGE &&
ctx->family != NFPROTO_ARP)
return -EOPNOTSUPP;
ret = nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING));
if (ret)
return ret;
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
@ -584,10 +594,20 @@ static int nft_match_validate(const struct nft_ctx *ctx,
if (ctx->family != NFPROTO_IPV4 &&
ctx->family != NFPROTO_IPV6 &&
ctx->family != NFPROTO_INET &&
ctx->family != NFPROTO_BRIDGE &&
ctx->family != NFPROTO_ARP)
return -EOPNOTSUPP;
ret = nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING));
if (ret)
return ret;
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);

View File

@ -157,7 +157,7 @@ static inline u32 netlink_group_mask(u32 group)
static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
gfp_t gfp_mask)
{
unsigned int len = skb_end_offset(skb);
unsigned int len = skb->len;
struct sk_buff *new;
new = alloc_skb(len, gfp_mask);

View File

@ -3775,6 +3775,8 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
if (ntype != NL80211_IFTYPE_MESH_POINT)
return -EINVAL;
if (otype != NL80211_IFTYPE_MESH_POINT)
return -EINVAL;
if (netif_running(dev))
return -EBUSY;

View File

@ -2657,13 +2657,14 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
{
int error = buffer_len;
size_t avail_len = buffer_len;
char *cp0 = head->write_buf;
char *cp0;
int idx;
if (!head->write)
return -EINVAL;
if (mutex_lock_interruptible(&head->io_sem))
return -EINTR;
cp0 = head->write_buf;
head->read_user_buf_avail = 0;
idx = tomoyo_read_lock();
/* Read a line and dispatch it to the policy handler. */

View File

@ -32,7 +32,6 @@ snd-pcm-dmaengine-objs := pcm_dmaengine.o
snd-rawmidi-objs := rawmidi.o
snd-timer-objs := timer.o
snd-hrtimer-objs := hrtimer.o
snd-rtctimer-objs := rtctimer.o
snd-hwdep-objs := hwdep.o
snd-seq-device-objs := seq_device.o