This is the 5.10.179 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmRI7pUACgkQONu9yGCS
 aT4cCRAA0YwtiFA5PDxWdBVW2f/6ad7NL4cCUATt7yd68j22SKifIxmsI4J3WnmT
 K8p7yvc7WstuvCyoRT+9LpR969jDa/ao5jQQDky+9nFn39RK2pUQ1S4tQhRr0QWP
 /QrVbecT4X3rn126JhEMauR97Ma5yp0XMj9lOVIac40irf0UyRrvNHciGLfL37Zy
 2Q7AOOJGrA9IREpj+uaG4r8QWZtvVYMCZkIgqZDdnEgfjZew+2w8j+4boL6anxpM
 0f+6ZFT5OHUabwuBsw+4ee6eRE0K3iaAzde8pIZ2y1/ihYgQ+VlMwcLRncuE/34X
 dUG1aQyfbcMdukzWO2fay0on/7NF/U2ljS8WTFjWeCGWXzKRxxbmgXD/WRpBba6V
 NZQB/LroXv+8HVAzlfnZoHD9ojRg8b3exxjy70hUvgAING2CXMqX7KILalFKQvBz
 Ish5e5cxUBP2khMo1caPCU04dy3t/CF68UBrx4s8+RJFvGBmTykhfUx+DhS8usmu
 y0GrvyBfCXb1CW56ZZaip2jLv5IiOUL9KzKpPli1PV9K+He6aa2mTtvKzVBUalZf
 qVzMTifW6JskpxW58I0xKqiaHY5pZVfv0EX65Gs0gVYskSpSLu5MINMvBl5F1sDf
 DdrJ+ZivMUNU5eGUf99IQgXuYFPWigEzsXQRfwHr78kFP4wIPxg=
 =Ubp5
 -----END PGP SIGNATURE-----

Merge 5.10.179 into android12-5.10-lts

Changes in 5.10.179
	ARM: dts: rockchip: fix a typo error for rk3288 spdif node
	arm64: dts: qcom: ipq8074-hk01: enable QMP device, not the PHY node
	arm64: dts: meson-g12-common: specify full DMC range
	arm64: dts: imx8mm-evk: correct pmic clock source
	netfilter: br_netfilter: fix recent physdev match breakage
	regulator: fan53555: Explicitly include bits header
	net: sched: sch_qfq: prevent slab-out-of-bounds in qfq_activate_agg
	virtio_net: bugfix overflow inside xdp_linearize_page()
	sfc: Split STATE_READY in to STATE_NET_DOWN and STATE_NET_UP.
	sfc: Fix use-after-free due to selftest_work
	netfilter: nf_tables: fix ifdef to also consider nf_tables=m
	i40e: fix accessing vsi->active_filters without holding lock
	i40e: fix i40e_setup_misc_vector() error handling
	mlxfw: fix null-ptr-deref in mlxfw_mfa2_tlv_next()
	net: rpl: fix rpl header size calculation
	mlxsw: pci: Fix possible crash during initialization
	bpf: Fix incorrect verifier pruning due to missing register precision taints
	e1000e: Disable TSO on i219-LM card to increase speed
	f2fs: Fix f2fs_truncate_partial_nodes ftrace event
	Input: i8042 - add quirk for Fujitsu Lifebook A574/H
	selftests: sigaltstack: fix -Wuninitialized
	scsi: megaraid_sas: Fix fw_crash_buffer_show()
	scsi: core: Improve scsi_vpd_inquiry() checks
	net: dsa: b53: mmap: add phy ops
	s390/ptrace: fix PTRACE_GET_LAST_BREAK error handling
	nvme-tcp: fix a possible UAF when failing to allocate an io queue
	xen/netback: use same error messages for same errors
	powerpc/doc: Fix htmldocs errors
	xfs: drop submit side trans alloc for append ioends
	iio: light: tsl2772: fix reading proximity-diodes from device tree
	nilfs2: initialize unused bytes in segment summary blocks
	memstick: fix memory leak if card device is never registered
	kernel/sys.c: fix and improve control flow in __sys_setres[ug]id()
	mmc: sdhci_am654: Set HIGH_SPEED_ENA for SDR12 and SDR25
	mm/khugepaged: check again on anon uffd-wp during isolation
	sched/uclamp: Make task_fits_capacity() use util_fits_cpu()
	sched/uclamp: Fix fits_capacity() check in feec()
	sched/uclamp: Make select_idle_capacity() use util_fits_cpu()
	sched/uclamp: Make asym_fits_capacity() use util_fits_cpu()
	sched/uclamp: Make cpu_overutilized() use util_fits_cpu()
	sched/uclamp: Cater for uclamp in find_energy_efficient_cpu()'s early exit condition
	sched/fair: Detect capacity inversion
	sched/fair: Consider capacity inversion in util_fits_cpu()
	sched/uclamp: Fix a uninitialized variable warnings
	sched/fair: Fixes for capacity inversion detection
	MIPS: Define RUNTIME_DISCARD_EXIT in LD script
	docs: futex: Fix kernel-doc references after code split-up preparation
	purgatory: fix disabling debug info
	virtiofs: clean up error handling in virtio_fs_get_tree()
	virtiofs: split requests that exceed virtqueue size
	fuse: check s_root when destroying sb
	fuse: fix attr version comparison in fuse_read_update_size()
	fuse: always revalidate rename target dentry
	fuse: fix deadlock between atomic O_TRUNC and page invalidation
	Revert "ext4: fix use-after-free in ext4_xattr_set_entry"
	ext4: remove duplicate definition of ext4_xattr_ibody_inline_set()
	ext4: fix use-after-free in ext4_xattr_set_entry
	udp: Call inet6_destroy_sock() in setsockopt(IPV6_ADDRFORM).
	tcp/udp: Call inet6_destroy_sock() in IPv6 sk->sk_destruct().
	inet6: Remove inet6_destroy_sock() in sk->sk_prot->destroy().
	dccp: Call inet6_destroy_sock() via sk->sk_destruct().
	sctp: Call inet6_destroy_sock() via sk->sk_destruct().
	pwm: meson: Explicitly set .polarity in .get_state()
	pwm: iqs620a: Explicitly set .polarity in .get_state()
	pwm: hibvt: Explicitly set .polarity in .get_state()
	iio: adc: at91-sama5d2_adc: fix an error code in at91_adc_allocate_trigger()
	ASoC: fsl_asrc_dma: fix potential null-ptr-deref
	ASN.1: Fix check for strdup() success
	Linux 5.10.179

Change-Id: I54e476aa9b199a4711a091c77583739ed82af5ad
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-01 11:46:18 +00:00
commit 4c20c2c837
78 changed files with 742 additions and 376 deletions

View File

@ -1358,7 +1358,7 @@ Mutex API reference
Futex API reference Futex API reference
=================== ===================
.. kernel-doc:: kernel/futex.c .. kernel-doc:: kernel/futex/core.c
:internal: :internal:
Further reading Further reading

View File

@ -1,6 +1,6 @@
============================ ============================
NUMA resource associativity NUMA resource associativity
============================= ============================
Associativity represents the groupings of the various platform resources into Associativity represents the groupings of the various platform resources into
domains of substantially similar mean performance relative to resources outside domains of substantially similar mean performance relative to resources outside
@ -20,11 +20,11 @@ A value of 1 indicates the usage of Form 1 associativity. For Form 2 associativi
bit 2 of byte 5 in the "ibm,architecture-vec-5" property is used. bit 2 of byte 5 in the "ibm,architecture-vec-5" property is used.
Form 0 Form 0
----- ------
Form 0 associativity supports only two NUMA distances (LOCAL and REMOTE). Form 0 associativity supports only two NUMA distances (LOCAL and REMOTE).
Form 1 Form 1
----- ------
With Form 1 a combination of ibm,associativity-reference-points, and ibm,associativity With Form 1 a combination of ibm,associativity-reference-points, and ibm,associativity
device tree properties are used to determine the NUMA distance between resource groups/domains. device tree properties are used to determine the NUMA distance between resource groups/domains.
@ -78,17 +78,18 @@ numa-lookup-index-table.
For ex: For ex:
ibm,numa-lookup-index-table = <3 0 8 40>; ibm,numa-lookup-index-table = <3 0 8 40>;
ibm,numa-distace-table = <9>, /bits/ 8 < 10 20 80 ibm,numa-distace-table = <9>, /bits/ 8 < 10 20 80 20 10 160 80 160 10>;
20 10 160
80 160 10>; ::
| 0 8 40
--|------------ | 0 8 40
| --|------------
0 | 10 20 80 |
| 0 | 10 20 80
8 | 20 10 160 |
| 8 | 20 10 160
40| 80 160 10 |
40| 80 160 10
A possible "ibm,associativity" property for resources in node 0, 8 and 40 A possible "ibm,associativity" property for resources in node 0, 8 and 40

View File

@ -7,6 +7,7 @@ powerpc
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
associativity
booting booting
bootwrapper bootwrapper
cpu_families cpu_families

View File

@ -1400,7 +1400,7 @@ Riferimento per l'API dei Mutex
Riferimento per l'API dei Futex Riferimento per l'API dei Futex
=============================== ===============================
.. kernel-doc:: kernel/futex.c .. kernel-doc:: kernel/futex/core.c
:internal: :internal:
Approfondimenti Approfondimenti

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 178 SUBLEVEL = 179
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

View File

@ -959,7 +959,7 @@ wdt: watchdog@ff800000 {
status = "disabled"; status = "disabled";
}; };
spdif: sound@ff88b0000 { spdif: sound@ff8b0000 {
compatible = "rockchip,rk3288-spdif", "rockchip,rk3066-spdif"; compatible = "rockchip,rk3288-spdif", "rockchip,rk3066-spdif";
reg = <0x0 0xff8b0000 0x0 0x10000>; reg = <0x0 0xff8b0000 0x0 0x10000>;
#sound-dai-cells = <0>; #sound-dai-cells = <0>;

View File

@ -1604,10 +1604,9 @@ usb2_phy0: phy@36000 {
dmc: bus@38000 { dmc: bus@38000 {
compatible = "simple-bus"; compatible = "simple-bus";
reg = <0x0 0x38000 0x0 0x400>;
#address-cells = <2>; #address-cells = <2>;
#size-cells = <2>; #size-cells = <2>;
ranges = <0x0 0x0 0x0 0x38000 0x0 0x400>; ranges = <0x0 0x0 0x0 0x38000 0x0 0x2000>;
canvas: video-lut@48 { canvas: video-lut@48 {
compatible = "amlogic,canvas"; compatible = "amlogic,canvas";

View File

@ -128,7 +128,7 @@ pmic@4b {
rohm,reset-snvs-powered; rohm,reset-snvs-powered;
#clock-cells = <0>; #clock-cells = <0>;
clocks = <&osc_32k 0>; clocks = <&osc_32k>;
clock-output-names = "clk-32k-out"; clock-output-names = "clk-32k-out";
regulators { regulators {

View File

@ -60,11 +60,11 @@ &pcie1 {
perst-gpio = <&tlmm 58 0x1>; perst-gpio = <&tlmm 58 0x1>;
}; };
&pcie_phy0 { &pcie_qmp0 {
status = "okay"; status = "okay";
}; };
&pcie_phy1 { &pcie_qmp1 {
status = "okay"; status = "okay";
}; };

View File

@ -15,6 +15,8 @@
#define EMITS_PT_NOTE #define EMITS_PT_NOTE
#endif #endif
#define RUNTIME_DISCARD_EXIT
#include <asm-generic/vmlinux.lds.h> #include <asm-generic/vmlinux.lds.h>
#undef mips #undef mips

View File

@ -500,9 +500,7 @@ long arch_ptrace(struct task_struct *child, long request,
} }
return 0; return 0;
case PTRACE_GET_LAST_BREAK: case PTRACE_GET_LAST_BREAK:
put_user(child->thread.last_break, return put_user(child->thread.last_break, (unsigned long __user *)data);
(unsigned long __user *) data);
return 0;
case PTRACE_ENABLE_TE: case PTRACE_ENABLE_TE:
if (!MACHINE_HAS_TE) if (!MACHINE_HAS_TE)
return -EIO; return -EIO;
@ -854,9 +852,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
} }
return 0; return 0;
case PTRACE_GET_LAST_BREAK: case PTRACE_GET_LAST_BREAK:
put_user(child->thread.last_break, return put_user(child->thread.last_break, (unsigned int __user *)data);
(unsigned int __user *) data);
return 0;
} }
return compat_ptrace_request(child, request, addr, data); return compat_ptrace_request(child, request, addr, data);
} }

View File

@ -64,8 +64,7 @@ CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE) CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
CFLAGS_string.o += $(PURGATORY_CFLAGS) CFLAGS_string.o += $(PURGATORY_CFLAGS)
AFLAGS_REMOVE_setup-x86_$(BITS).o += -Wa,-gdwarf-2 asflags-remove-y += -g -Wa,-gdwarf-2
AFLAGS_REMOVE_entry64.o += -Wa,-gdwarf-2
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld) $(call if_changed,ld)

View File

@ -1002,7 +1002,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name, trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name,
indio->id, trigger_name); indio->id, trigger_name);
if (!trig) if (!trig)
return NULL; return ERR_PTR(-ENOMEM);
trig->dev.parent = indio->dev.parent; trig->dev.parent = indio->dev.parent;
iio_trigger_set_drvdata(trig, indio); iio_trigger_set_drvdata(trig, indio);

View File

@ -606,6 +606,7 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
return -EINVAL; return -EINVAL;
} }
} }
chip->settings.prox_diode = prox_diode_mask;
return 0; return 0;
} }

View File

@ -601,6 +601,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
}, },
.driver_data = (void *)(SERIO_QUIRK_NOMUX) .driver_data = (void *)(SERIO_QUIRK_NOMUX)
}, },
{
/* Fujitsu Lifebook A574/H */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "FMVA0501PZ"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{ {
/* Gigabyte M912 */ /* Gigabyte M912 */
.matches = { .matches = {

View File

@ -412,6 +412,7 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host)
return card; return card;
err_out: err_out:
host->card = old_card; host->card = old_card;
kfree_const(card->dev.kobj.name);
kfree(card); kfree(card);
return NULL; return NULL;
} }
@ -470,8 +471,10 @@ static void memstick_check(struct work_struct *work)
put_device(&card->dev); put_device(&card->dev);
host->card = NULL; host->card = NULL;
} }
} else } else {
kfree_const(card->dev.kobj.name);
kfree(card); kfree(card);
}
} }
out_power_off: out_power_off:

View File

@ -351,8 +351,6 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
*/ */
case MMC_TIMING_SD_HS: case MMC_TIMING_SD_HS:
case MMC_TIMING_MMC_HS: case MMC_TIMING_MMC_HS:
case MMC_TIMING_UHS_SDR12:
case MMC_TIMING_UHS_SDR25:
val &= ~SDHCI_CTRL_HISPD; val &= ~SDHCI_CTRL_HISPD;
} }
} }

View File

@ -215,6 +215,18 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
return 0; return 0;
} }
static int b53_mmap_phy_read16(struct b53_device *dev, int addr, int reg,
u16 *value)
{
return -EIO;
}
static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
u16 value)
{
return -EIO;
}
static const struct b53_io_ops b53_mmap_ops = { static const struct b53_io_ops b53_mmap_ops = {
.read8 = b53_mmap_read8, .read8 = b53_mmap_read8,
.read16 = b53_mmap_read16, .read16 = b53_mmap_read16,
@ -226,6 +238,8 @@ static const struct b53_io_ops b53_mmap_ops = {
.write32 = b53_mmap_write32, .write32 = b53_mmap_write32,
.write48 = b53_mmap_write48, .write48 = b53_mmap_write48,
.write64 = b53_mmap_write64, .write64 = b53_mmap_write64,
.phy_read16 = b53_mmap_phy_read16,
.phy_write16 = b53_mmap_phy_write16,
}; };
static int b53_mmap_probe(struct platform_device *pdev) static int b53_mmap_probe(struct platform_device *pdev)

View File

@ -5294,31 +5294,6 @@ static void e1000_watchdog_task(struct work_struct *work)
ew32(TARC(0), tarc0); ew32(TARC(0), tarc0);
} }
/* disable TSO for pcie and 10/100 speeds, to avoid
* some hardware issues
*/
if (!(adapter->flags & FLAG_TSO_FORCE)) {
switch (adapter->link_speed) {
case SPEED_10:
case SPEED_100:
e_info("10/100 speed: disabling TSO\n");
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
break;
case SPEED_1000:
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
break;
default:
/* oops */
break;
}
if (hw->mac.type == e1000_pch_spt) {
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
}
}
/* enable transmits in the hardware, need to do this /* enable transmits in the hardware, need to do this
* after setting TARC(0) * after setting TARC(0)
*/ */
@ -7477,6 +7452,32 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM); NETIF_F_HW_CSUM);
/* disable TSO for pcie and 10/100 speeds to avoid
* some hardware issues and for i219 to fix transfer
* speed being capped at 60%
*/
if (!(adapter->flags & FLAG_TSO_FORCE)) {
switch (adapter->link_speed) {
case SPEED_10:
case SPEED_100:
e_info("10/100 speed: disabling TSO\n");
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
break;
case SPEED_1000:
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
break;
default:
/* oops */
break;
}
if (hw->mac.type == e1000_pch_spt) {
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
}
}
/* Set user-changeable features (subset of all device features) */ /* Set user-changeable features (subset of all device features) */
netdev->hw_features = netdev->features; netdev->hw_features = netdev->features;
netdev->hw_features |= NETIF_F_RXFCS; netdev->hw_features |= NETIF_F_RXFCS;

View File

@ -10448,8 +10448,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
pf->hw.aq.asq_last_status)); pf->hw.aq.asq_last_status));
} }
/* reinit the misc interrupt */ /* reinit the misc interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED) if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
ret = i40e_setup_misc_vector(pf); ret = i40e_setup_misc_vector(pf);
if (ret)
goto end_unlock;
}
/* Add a filter to drop all Flow control frames from any VSI from being /* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out * transmitted. By doing so we stop a malicious VF from sending out
@ -13458,15 +13461,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->id = ctxt.vsi_number; vsi->id = ctxt.vsi_number;
} }
vsi->active_filters = 0;
clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
spin_lock_bh(&vsi->mac_filter_hash_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
vsi->active_filters = 0;
/* If macvlan filters already exist, force them to get loaded */ /* If macvlan filters already exist, force them to get loaded */
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
f->state = I40E_FILTER_NEW; f->state = I40E_FILTER_NEW;
f_count++; f_count++;
} }
spin_unlock_bh(&vsi->mac_filter_hash_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
if (f_count) { if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;

View File

@ -31,6 +31,8 @@ mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) { if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) {
multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv); multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv);
if (!multi)
return NULL;
tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len)); tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len));
} }

View File

@ -26,7 +26,7 @@
#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000 #define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000 #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 200 #define MLXSW_PCI_SW_RESET_WAIT_MSECS 400
#define MLXSW_PCI_FW_READY 0xA1844 #define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF #define MLXSW_PCI_FW_READY_MASK 0xFFFF
#define MLXSW_PCI_FW_READY_MAGIC 0x5E #define MLXSW_PCI_FW_READY_MAGIC 0x5E

View File

@ -96,6 +96,8 @@ static int ef100_net_stop(struct net_device *net_dev)
efx_mcdi_free_vis(efx); efx_mcdi_free_vis(efx);
efx_remove_interrupts(efx); efx_remove_interrupts(efx);
efx->state = STATE_NET_DOWN;
return 0; return 0;
} }
@ -172,6 +174,8 @@ static int ef100_net_open(struct net_device *net_dev)
efx_link_status_changed(efx); efx_link_status_changed(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
efx->state = STATE_NET_UP;
return 0; return 0;
fail: fail:
@ -272,7 +276,7 @@ int ef100_register_netdev(struct efx_nic *efx)
/* Always start with carrier off; PHY events will detect the link */ /* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(net_dev); netif_carrier_off(net_dev);
efx->state = STATE_READY; efx->state = STATE_NET_DOWN;
rtnl_unlock(); rtnl_unlock();
efx_init_mcdi_logging(efx); efx_init_mcdi_logging(efx);

View File

@ -105,14 +105,6 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
u32 flags); u32 flags);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
if ((efx->state == STATE_READY) || \
(efx->state == STATE_RECOVERY) || \
(efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \
} while (0)
/************************************************************************** /**************************************************************************
* *
* Port handling * Port handling
@ -377,6 +369,8 @@ static int efx_probe_all(struct efx_nic *efx)
if (rc) if (rc)
goto fail5; goto fail5;
efx->state = STATE_NET_DOWN;
return 0; return 0;
fail5: fail5:
@ -543,7 +537,9 @@ int efx_net_open(struct net_device *net_dev)
efx_start_all(efx); efx_start_all(efx);
if (efx->state == STATE_DISABLED || efx->reset_pending) if (efx->state == STATE_DISABLED || efx->reset_pending)
netif_device_detach(efx->net_dev); netif_device_detach(efx->net_dev);
efx_selftest_async_start(efx); else
efx->state = STATE_NET_UP;
return 0; return 0;
} }
@ -721,8 +717,6 @@ static int efx_register_netdev(struct efx_nic *efx)
* already requested. If so, the NIC is probably hosed so we * already requested. If so, the NIC is probably hosed so we
* abort. * abort.
*/ */
efx->state = STATE_READY;
smp_mb(); /* ensure we change state before checking reset_pending */
if (efx->reset_pending) { if (efx->reset_pending) {
netif_err(efx, probe, efx->net_dev, netif_err(efx, probe, efx->net_dev,
"aborting probe due to scheduled reset\n"); "aborting probe due to scheduled reset\n");
@ -750,6 +744,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_associate(efx); efx_associate(efx);
efx->state = STATE_NET_DOWN;
rtnl_unlock(); rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@ -851,7 +847,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
/* Flush reset_work. It can no longer be scheduled since we /* Flush reset_work. It can no longer be scheduled since we
* are not READY. * are not READY.
*/ */
BUG_ON(efx->state == STATE_READY); WARN_ON(efx_net_active(efx->state));
efx_flush_reset_workqueue(efx); efx_flush_reset_workqueue(efx);
efx_disable_interrupts(efx); efx_disable_interrupts(efx);
@ -1196,13 +1192,13 @@ static int efx_pm_freeze(struct device *dev)
rtnl_lock(); rtnl_lock();
if (efx->state != STATE_DISABLED) { if (efx_net_active(efx->state)) {
efx->state = STATE_UNINIT;
efx_device_detach_sync(efx); efx_device_detach_sync(efx);
efx_stop_all(efx); efx_stop_all(efx);
efx_disable_interrupts(efx); efx_disable_interrupts(efx);
efx->state = efx_freeze(efx->state);
} }
rtnl_unlock(); rtnl_unlock();
@ -1217,7 +1213,7 @@ static int efx_pm_thaw(struct device *dev)
rtnl_lock(); rtnl_lock();
if (efx->state != STATE_DISABLED) { if (efx_frozen(efx->state)) {
rc = efx_enable_interrupts(efx); rc = efx_enable_interrupts(efx);
if (rc) if (rc)
goto fail; goto fail;
@ -1230,7 +1226,7 @@ static int efx_pm_thaw(struct device *dev)
efx_device_attach_if_not_resetting(efx); efx_device_attach_if_not_resetting(efx);
efx->state = STATE_READY; efx->state = efx_thaw(efx->state);
efx->type->resume_wol(efx); efx->type->resume_wol(efx);
} }

View File

@ -542,6 +542,8 @@ void efx_start_all(struct efx_nic *efx)
/* Start the hardware monitor if there is one */ /* Start the hardware monitor if there is one */
efx_start_monitor(efx); efx_start_monitor(efx);
efx_selftest_async_start(efx);
/* Link state detection is normally event-driven; we have /* Link state detection is normally event-driven; we have
* to poll now because we could have missed a change * to poll now because we could have missed a change
*/ */
@ -897,7 +899,7 @@ static void efx_reset_work(struct work_struct *data)
* have changed by now. Now that we have the RTNL lock, * have changed by now. Now that we have the RTNL lock,
* it cannot change again. * it cannot change again.
*/ */
if (efx->state == STATE_READY) if (efx_net_active(efx->state))
(void)efx_reset(efx, method); (void)efx_reset(efx, method);
rtnl_unlock(); rtnl_unlock();
@ -907,7 +909,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{ {
enum reset_type method; enum reset_type method;
if (efx->state == STATE_RECOVERY) { if (efx_recovering(efx->state)) {
netif_dbg(efx, drv, efx->net_dev, netif_dbg(efx, drv, efx->net_dev,
"recovering: skip scheduling %s reset\n", "recovering: skip scheduling %s reset\n",
RESET_TYPE(type)); RESET_TYPE(type));
@ -942,7 +944,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/* If we're not READY then just leave the flags set as the cue /* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later. * to abort probing or reschedule the reset later.
*/ */
if (READ_ONCE(efx->state) != STATE_READY) if (!efx_net_active(READ_ONCE(efx->state)))
return; return;
/* efx_process_channel() will no longer read events once a /* efx_process_channel() will no longer read events once a
@ -1214,7 +1216,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
rtnl_lock(); rtnl_lock();
if (efx->state != STATE_DISABLED) { if (efx->state != STATE_DISABLED) {
efx->state = STATE_RECOVERY; efx->state = efx_recover(efx->state);
efx->reset_pending = 0; efx->reset_pending = 0;
efx_device_detach_sync(efx); efx_device_detach_sync(efx);
@ -1268,7 +1270,7 @@ static void efx_io_resume(struct pci_dev *pdev)
netif_err(efx, hw, efx->net_dev, netif_err(efx, hw, efx->net_dev,
"efx_reset failed after PCI error (%d)\n", rc); "efx_reset failed after PCI error (%d)\n", rc);
} else { } else {
efx->state = STATE_READY; efx->state = efx_recovered(efx->state);
netif_dbg(efx, hw, efx->net_dev, netif_dbg(efx, hw, efx->net_dev,
"Done resetting and resuming IO after PCI error.\n"); "Done resetting and resuming IO after PCI error.\n");
} }

View File

@ -45,9 +45,7 @@ int efx_reconfigure_port(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \ #define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \ do { \
if ((efx->state == STATE_READY) || \ if (efx->state != STATE_UNINIT) \
(efx->state == STATE_RECOVERY) || \
(efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \ ASSERT_RTNL(); \
} while (0) } while (0)
@ -64,7 +62,7 @@ void efx_port_dummy_op_void(struct efx_nic *efx);
static inline int efx_check_disabled(struct efx_nic *efx) static inline int efx_check_disabled(struct efx_nic *efx)
{ {
if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { if (efx->state == STATE_DISABLED || efx_recovering(efx->state)) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"device is disabled due to earlier errors\n"); "device is disabled due to earlier errors\n");
return -EIO; return -EIO;

View File

@ -137,7 +137,7 @@ void efx_ethtool_self_test(struct net_device *net_dev,
if (!efx_tests) if (!efx_tests)
goto fail; goto fail;
if (efx->state != STATE_READY) { if (!efx_net_active(efx->state)) {
rc = -EBUSY; rc = -EBUSY;
goto out; goto out;
} }

View File

@ -627,12 +627,54 @@ enum efx_int_mode {
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
enum nic_state { enum nic_state {
STATE_UNINIT = 0, /* device being probed/removed or is frozen */ STATE_UNINIT = 0, /* device being probed/removed */
STATE_READY = 1, /* hardware ready and netdev registered */ STATE_NET_DOWN, /* hardware probed and netdev registered */
STATE_DISABLED = 2, /* device disabled due to hardware errors */ STATE_NET_UP, /* ready for traffic */
STATE_RECOVERY = 3, /* device recovering from PCI error */ STATE_DISABLED, /* device disabled due to hardware errors */
STATE_RECOVERY = 0x100,/* recovering from PCI error */
STATE_FROZEN = 0x200, /* frozen by power management */
}; };
static inline bool efx_net_active(enum nic_state state)
{
return state == STATE_NET_DOWN || state == STATE_NET_UP;
}
static inline bool efx_frozen(enum nic_state state)
{
return state & STATE_FROZEN;
}
static inline bool efx_recovering(enum nic_state state)
{
return state & STATE_RECOVERY;
}
static inline enum nic_state efx_freeze(enum nic_state state)
{
WARN_ON(!efx_net_active(state));
return state | STATE_FROZEN;
}
static inline enum nic_state efx_thaw(enum nic_state state)
{
WARN_ON(!efx_frozen(state));
return state & ~STATE_FROZEN;
}
static inline enum nic_state efx_recover(enum nic_state state)
{
WARN_ON(!efx_net_active(state));
return state | STATE_RECOVERY;
}
static inline enum nic_state efx_recovered(enum nic_state state)
{
WARN_ON(!efx_recovering(state));
return state & ~STATE_RECOVERY;
}
/* Forward declaration */ /* Forward declaration */
struct efx_nic; struct efx_nic;

View File

@ -646,8 +646,13 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
int page_off, int page_off,
unsigned int *len) unsigned int *len)
{ {
struct page *page = alloc_page(GFP_ATOMIC); int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
struct page *page;
if (page_off + *len + tailroom > PAGE_SIZE)
return NULL;
page = alloc_page(GFP_ATOMIC);
if (!page) if (!page)
return NULL; return NULL;
@ -655,7 +660,6 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
page_off += *len; page_off += *len;
while (--*num_buf) { while (--*num_buf) {
int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
unsigned int buflen; unsigned int buflen;
void *buf; void *buf;
int off; int off;

View File

@ -996,10 +996,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
/* No crossing a page as the payload mustn't fragment. */ /* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) { if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
netdev_err(queue->vif->dev, netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
"txreq.offset: %u, size: %u, end: %lu\n", txreq.offset, txreq.size);
txreq.offset, txreq.size,
(unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
xenvif_fatal_tx_err(queue->vif); xenvif_fatal_tx_err(queue->vif);
break; break;
} }

View File

@ -1535,22 +1535,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
if (ret) if (ret)
goto err_init_connect; goto err_init_connect;
queue->rd_enabled = true;
set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
nvme_tcp_init_recv_ctx(queue);
write_lock_bh(&queue->sock->sk->sk_callback_lock);
queue->sock->sk->sk_user_data = queue;
queue->state_change = queue->sock->sk->sk_state_change;
queue->data_ready = queue->sock->sk->sk_data_ready;
queue->write_space = queue->sock->sk->sk_write_space;
queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
queue->sock->sk->sk_state_change = nvme_tcp_state_change;
queue->sock->sk->sk_write_space = nvme_tcp_write_space;
#ifdef CONFIG_NET_RX_BUSY_POLL
queue->sock->sk->sk_ll_usec = 1;
#endif
write_unlock_bh(&queue->sock->sk->sk_callback_lock);
return 0; return 0;
@ -1569,7 +1554,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
return ret; return ret;
} }
static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue) static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
{ {
struct socket *sock = queue->sock; struct socket *sock = queue->sock;
@ -1584,7 +1569,7 @@ static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
{ {
kernel_sock_shutdown(queue->sock, SHUT_RDWR); kernel_sock_shutdown(queue->sock, SHUT_RDWR);
nvme_tcp_restore_sock_calls(queue); nvme_tcp_restore_sock_ops(queue);
cancel_work_sync(&queue->io_work); cancel_work_sync(&queue->io_work);
} }
@ -1599,21 +1584,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
mutex_unlock(&queue->queue_lock); mutex_unlock(&queue->queue_lock);
} }
static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
{
write_lock_bh(&queue->sock->sk->sk_callback_lock);
queue->sock->sk->sk_user_data = queue;
queue->state_change = queue->sock->sk->sk_state_change;
queue->data_ready = queue->sock->sk->sk_data_ready;
queue->write_space = queue->sock->sk->sk_write_space;
queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
queue->sock->sk->sk_state_change = nvme_tcp_state_change;
queue->sock->sk->sk_write_space = nvme_tcp_write_space;
#ifdef CONFIG_NET_RX_BUSY_POLL
queue->sock->sk->sk_ll_usec = 1;
#endif
write_unlock_bh(&queue->sock->sk->sk_callback_lock);
}
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
{ {
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[idx];
int ret; int ret;
queue->rd_enabled = true;
nvme_tcp_init_recv_ctx(queue);
nvme_tcp_setup_sock_ops(queue);
if (idx) if (idx)
ret = nvmf_connect_io_queue(nctrl, idx, false); ret = nvmf_connect_io_queue(nctrl, idx, false);
else else
ret = nvmf_connect_admin_queue(nctrl); ret = nvmf_connect_admin_queue(nctrl);
if (!ret) { if (!ret) {
set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags); set_bit(NVME_TCP_Q_LIVE, &queue->flags);
} else { } else {
if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags)) if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
__nvme_tcp_stop_queue(&ctrl->queues[idx]); __nvme_tcp_stop_queue(queue);
dev_err(nctrl->device, dev_err(nctrl->device,
"failed to connect queue: %d ret=%d\n", idx, ret); "failed to connect queue: %d ret=%d\n", idx, ret);
} }

View File

@ -146,6 +146,7 @@ static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
value = readl(base + PWM_CTRL_ADDR(pwm->hwpwm)); value = readl(base + PWM_CTRL_ADDR(pwm->hwpwm));
state->enabled = (PWM_ENABLE_MASK & value); state->enabled = (PWM_ENABLE_MASK & value);
state->polarity = (PWM_POLARITY_MASK & value) ? PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
} }
static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,

View File

@ -132,6 +132,7 @@ static void iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
mutex_unlock(&iqs620_pwm->lock); mutex_unlock(&iqs620_pwm->lock);
state->period = IQS620_PWM_PERIOD_NS; state->period = IQS620_PWM_PERIOD_NS;
state->polarity = PWM_POLARITY_NORMAL;
} }
static int iqs620_pwm_notifier(struct notifier_block *notifier, static int iqs620_pwm_notifier(struct notifier_block *notifier,

View File

@ -168,6 +168,12 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
duty = state->duty_cycle; duty = state->duty_cycle;
period = state->period; period = state->period;
/*
* Note this is wrong. The result is an output wave that isn't really
* inverted and so is wrongly identified by .get_state as normal.
* Fixing this needs some care however as some machines might rely on
* this.
*/
if (state->polarity == PWM_POLARITY_INVERSED) if (state->polarity == PWM_POLARITY_INVERSED)
duty = period - duty; duty = period - duty;
@ -366,6 +372,7 @@ static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
state->period = 0; state->period = 0;
state->duty_cycle = 0; state->duty_cycle = 0;
} }
state->polarity = PWM_POLARITY_NORMAL;
} }
static const struct pwm_ops meson_pwm_ops = { static const struct pwm_ops meson_pwm_ops = {

View File

@ -8,18 +8,19 @@
// Copyright (c) 2012 Marvell Technology Ltd. // Copyright (c) 2012 Marvell Technology Ltd.
// Yunfan Zhang <yfzhang@marvell.com> // Yunfan Zhang <yfzhang@marvell.com>
#include <linux/module.h> #include <linux/bits.h>
#include <linux/param.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/param.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h> #include <linux/regulator/driver.h>
#include <linux/regulator/fan53555.h>
#include <linux/regulator/machine.h> #include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h> #include <linux/regulator/of_regulator.h>
#include <linux/of_device.h>
#include <linux/i2c.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/regulator/fan53555.h>
/* Voltage setting */ /* Voltage setting */
#define FAN53555_VSEL0 0x00 #define FAN53555_VSEL0 0x00

View File

@ -3248,7 +3248,7 @@ fw_crash_buffer_show(struct device *cdev,
spin_lock_irqsave(&instance->crashdump_lock, flags); spin_lock_irqsave(&instance->crashdump_lock, flags);
buff_offset = instance->fw_crash_buffer_offset; buff_offset = instance->fw_crash_buffer_offset;
if (!instance->crash_dump_buf && if (!instance->crash_dump_buf ||
!((instance->fw_crash_state == AVAILABLE) || !((instance->fw_crash_state == AVAILABLE) ||
(instance->fw_crash_state == COPYING))) { (instance->fw_crash_state == COPYING))) {
dev_err(&instance->pdev->dev, dev_err(&instance->pdev->dev,

View File

@ -317,11 +317,18 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
if (result) if (result)
return -EIO; return -EIO;
/* Sanity check that we got the page back that we asked for */ /*
* Sanity check that we got the page back that we asked for and that
* the page size is not 0.
*/
if (buffer[1] != page) if (buffer[1] != page)
return -EIO; return -EIO;
return get_unaligned_be16(&buffer[2]) + 4; result = get_unaligned_be16(&buffer[2]);
if (!result)
return -EIO;
return result + 4;
} }
/** /**

View File

@ -205,7 +205,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
if (inode && fuse_is_bad(inode)) if (inode && fuse_is_bad(inode))
goto invalid; goto invalid;
else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) || else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
(flags & (LOOKUP_EXCL | LOOKUP_REVAL))) { (flags & (LOOKUP_EXCL | LOOKUP_REVAL | LOOKUP_RENAME_TARGET))) {
struct fuse_entry_out outarg; struct fuse_entry_out outarg;
FUSE_ARGS(args); FUSE_ARGS(args);
struct fuse_forget_link *forget; struct fuse_forget_link *forget;
@ -578,6 +578,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
struct fuse_entry_out outentry; struct fuse_entry_out outentry;
struct fuse_inode *fi; struct fuse_inode *fi;
struct fuse_file *ff; struct fuse_file *ff;
bool trunc = flags & O_TRUNC;
/* Userspace expects S_IFREG in create mode */ /* Userspace expects S_IFREG in create mode */
BUG_ON((mode & S_IFMT) != S_IFREG); BUG_ON((mode & S_IFMT) != S_IFREG);
@ -646,6 +647,10 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
} else { } else {
file->private_data = ff; file->private_data = ff;
fuse_finish_open(inode, file); fuse_finish_open(inode, file);
if (fm->fc->atomic_o_trunc && trunc)
truncate_pagecache(inode, 0);
else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
invalidate_inode_pages2(inode->i_mapping);
} }
return err; return err;

View File

@ -206,14 +206,10 @@ void fuse_finish_open(struct inode *inode, struct file *file)
fi->attr_version = atomic64_inc_return(&fc->attr_version); fi->attr_version = atomic64_inc_return(&fc->attr_version);
i_size_write(inode, 0); i_size_write(inode, 0);
spin_unlock(&fi->lock); spin_unlock(&fi->lock);
truncate_pagecache(inode, 0);
fuse_invalidate_attr(inode); fuse_invalidate_attr(inode);
if (fc->writeback_cache) if (fc->writeback_cache)
file_update_time(file); file_update_time(file);
} else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) {
invalidate_inode_pages2(inode->i_mapping);
} }
if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
fuse_link_write_file(file); fuse_link_write_file(file);
} }
@ -236,30 +232,39 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
if (err) if (err)
return err; return err;
if (is_wb_truncate || dax_truncate) { if (is_wb_truncate || dax_truncate)
inode_lock(inode); inode_lock(inode);
fuse_set_nowrite(inode);
}
if (dax_truncate) { if (dax_truncate) {
down_write(&get_fuse_inode(inode)->i_mmap_sem); down_write(&get_fuse_inode(inode)->i_mmap_sem);
err = fuse_dax_break_layouts(inode, 0, 0); err = fuse_dax_break_layouts(inode, 0, 0);
if (err) if (err)
goto out; goto out_inode_unlock;
} }
if (is_wb_truncate || dax_truncate)
fuse_set_nowrite(inode);
err = fuse_do_open(fm, get_node_id(inode), file, isdir); err = fuse_do_open(fm, get_node_id(inode), file, isdir);
if (!err) if (!err)
fuse_finish_open(inode, file); fuse_finish_open(inode, file);
out: if (is_wb_truncate || dax_truncate)
fuse_release_nowrite(inode);
if (!err) {
struct fuse_file *ff = file->private_data;
if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC))
truncate_pagecache(inode, 0);
else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
invalidate_inode_pages2(inode->i_mapping);
}
if (dax_truncate) if (dax_truncate)
up_write(&get_fuse_inode(inode)->i_mmap_sem); up_write(&get_fuse_inode(inode)->i_mmap_sem);
if (is_wb_truncate | dax_truncate) { out_inode_unlock:
fuse_release_nowrite(inode); if (is_wb_truncate || dax_truncate)
inode_unlock(inode); inode_unlock(inode);
}
return err; return err;
} }
@ -784,7 +789,7 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fi->lock); spin_lock(&fi->lock);
if (attr_ver == fi->attr_version && size < inode->i_size && if (attr_ver >= fi->attr_version && size < inode->i_size &&
!test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
fi->attr_version = atomic64_inc_return(&fc->attr_version); fi->attr_version = atomic64_inc_return(&fc->attr_version);
i_size_write(inode, size); i_size_write(inode, size);

View File

@ -571,6 +571,9 @@ struct fuse_conn {
/** Maxmum number of pages that can be used in a single request */ /** Maxmum number of pages that can be used in a single request */
unsigned int max_pages; unsigned int max_pages;
/** Constrain ->max_pages to this value during feature negotiation */
unsigned int max_pages_limit;
/** Input queue */ /** Input queue */
struct fuse_iqueue iq; struct fuse_iqueue iq;

View File

@ -713,6 +713,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
fc->user_ns = get_user_ns(user_ns); fc->user_ns = get_user_ns(user_ns);
fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
fc->max_pages_limit = FUSE_MAX_MAX_PAGES;
INIT_LIST_HEAD(&fc->mounts); INIT_LIST_HEAD(&fc->mounts);
list_add(&fm->fc_entry, &fc->mounts); list_add(&fm->fc_entry, &fc->mounts);
@ -1059,7 +1060,7 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
fc->abort_err = 1; fc->abort_err = 1;
if (arg->flags & FUSE_MAX_PAGES) { if (arg->flags & FUSE_MAX_PAGES) {
fc->max_pages = fc->max_pages =
min_t(unsigned int, FUSE_MAX_MAX_PAGES, min_t(unsigned int, fc->max_pages_limit,
max_t(unsigned int, arg->max_pages, 1)); max_t(unsigned int, arg->max_pages, 1));
} }
if (IS_ENABLED(CONFIG_FUSE_DAX) && if (IS_ENABLED(CONFIG_FUSE_DAX) &&
@ -1617,7 +1618,7 @@ static void fuse_kill_sb_blk(struct super_block *sb)
struct fuse_mount *fm = get_fuse_mount_super(sb); struct fuse_mount *fm = get_fuse_mount_super(sb);
bool last; bool last;
if (fm) { if (sb->s_root) {
last = fuse_mount_remove(fm); last = fuse_mount_remove(fm);
if (last) if (last)
fuse_conn_destroy(fm); fuse_conn_destroy(fm);

View File

@ -18,6 +18,12 @@
#include <linux/uio.h> #include <linux/uio.h>
#include "fuse_i.h" #include "fuse_i.h"
/* Used to help calculate the FUSE connection's max_pages limit for a request's
* size. Parts of the struct fuse_req are sliced into scattergather lists in
* addition to the pages used, so this can help account for that overhead.
*/
#define FUSE_HEADER_OVERHEAD 4
/* List of virtio-fs device instances and a lock for the list. Also provides /* List of virtio-fs device instances and a lock for the list. Also provides
* mutual exclusion in device removal and mounting path * mutual exclusion in device removal and mounting path
*/ */
@ -1395,7 +1401,7 @@ static void virtio_kill_sb(struct super_block *sb)
bool last; bool last;
/* If mount failed, we can still be called without any fc */ /* If mount failed, we can still be called without any fc */
if (fm) { if (sb->s_root) {
last = fuse_mount_remove(fm); last = fuse_mount_remove(fm);
if (last) if (last)
virtio_fs_conn_destroy(fm); virtio_fs_conn_destroy(fm);
@ -1428,9 +1434,10 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
{ {
struct virtio_fs *fs; struct virtio_fs *fs;
struct super_block *sb; struct super_block *sb;
struct fuse_conn *fc; struct fuse_conn *fc = NULL;
struct fuse_mount *fm; struct fuse_mount *fm;
int err; unsigned int virtqueue_size;
int err = -EIO;
/* This gets a reference on virtio_fs object. This ptr gets installed /* This gets a reference on virtio_fs object. This ptr gets installed
* in fc->iq->priv. Once fuse_conn is going away, it calls ->put() * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
@ -1442,28 +1449,28 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
return -EINVAL; return -EINVAL;
} }
virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq);
if (WARN_ON(virtqueue_size <= FUSE_HEADER_OVERHEAD))
goto out_err;
err = -ENOMEM;
fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL); fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
if (!fc) { if (!fc)
mutex_lock(&virtio_fs_mutex); goto out_err;
virtio_fs_put(fs);
mutex_unlock(&virtio_fs_mutex);
return -ENOMEM;
}
fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL); fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
if (!fm) { if (!fm)
mutex_lock(&virtio_fs_mutex); goto out_err;
virtio_fs_put(fs);
mutex_unlock(&virtio_fs_mutex);
kfree(fc);
return -ENOMEM;
}
fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs); fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
fc->release = fuse_free_conn; fc->release = fuse_free_conn;
fc->delete_stale = true; fc->delete_stale = true;
fc->auto_submounts = true; fc->auto_submounts = true;
/* Tell FUSE to split requests that exceed the virtqueue's size */
fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
virtqueue_size - FUSE_HEADER_OVERHEAD);
fsc->s_fs_info = fm; fsc->s_fs_info = fm;
sb = sget_fc(fsc, virtio_fs_test_super, virtio_fs_set_super); sb = sget_fc(fsc, virtio_fs_test_super, virtio_fs_set_super);
fuse_mount_put(fm); fuse_mount_put(fm);
@ -1485,6 +1492,13 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
WARN_ON(fsc->root); WARN_ON(fsc->root);
fsc->root = dget(sb->s_root); fsc->root = dget(sb->s_root);
return 0; return 0;
out_err:
kfree(fc);
mutex_lock(&virtio_fs_mutex);
virtio_fs_put(fs);
mutex_unlock(&virtio_fs_mutex);
return err;
} }
static const struct fs_context_operations virtio_fs_context_ops = { static const struct fs_context_operations virtio_fs_context_ops = {

View File

@ -435,6 +435,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
return 0; return 0;
} }
/**
* nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
* @sci: segment constructor object
*
* nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
* the current segment summary block.
*/
static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
{
struct nilfs_segsum_pointer *ssp;
ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
if (ssp->offset < ssp->bh->b_size)
memset(ssp->bh->b_data + ssp->offset, 0,
ssp->bh->b_size - ssp->offset);
}
static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci) static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
{ {
sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
@ -443,6 +460,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
* The current segment is filled up * The current segment is filled up
* (internal code) * (internal code)
*/ */
nilfs_segctor_zeropad_segsum(sci);
sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg); sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
return nilfs_segctor_reset_segment_buffer(sci); return nilfs_segctor_reset_segment_buffer(sci);
} }
@ -547,6 +565,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
goto retry; goto retry;
} }
if (unlikely(required)) { if (unlikely(required)) {
nilfs_segctor_zeropad_segsum(sci);
err = nilfs_segbuf_extend_segsum(segbuf); err = nilfs_segbuf_extend_segsum(segbuf);
if (unlikely(err)) if (unlikely(err))
goto failed; goto failed;
@ -1536,6 +1555,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
sci->sc_stage = prev_stage; sci->sc_stage = prev_stage;
} }
nilfs_segctor_zeropad_segsum(sci);
nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile); nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
return 0; return 0;

View File

@ -39,33 +39,6 @@ static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
XFS_I(ioend->io_inode)->i_d.di_size; XFS_I(ioend->io_inode)->i_d.di_size;
} }
STATIC int
xfs_setfilesize_trans_alloc(
struct iomap_ioend *ioend)
{
struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
struct xfs_trans *tp;
int error;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
if (error)
return error;
ioend->io_private = tp;
/*
* We may pass freeze protection with a transaction. So tell lockdep
* we released it.
*/
__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
/*
* We hand off the transaction to the completion thread now, so
* clear the flag here.
*/
xfs_trans_clear_context(tp);
return 0;
}
/* /*
* Update on-disk file size now that data has been written to disk. * Update on-disk file size now that data has been written to disk.
*/ */
@ -191,12 +164,10 @@ xfs_end_ioend(
error = xfs_reflink_end_cow(ip, offset, size); error = xfs_reflink_end_cow(ip, offset, size);
else if (ioend->io_type == IOMAP_UNWRITTEN) else if (ioend->io_type == IOMAP_UNWRITTEN)
error = xfs_iomap_write_unwritten(ip, offset, size, false); error = xfs_iomap_write_unwritten(ip, offset, size, false);
else
ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private);
if (!error && xfs_ioend_is_append(ioend))
error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
done: done:
if (ioend->io_private)
error = xfs_setfilesize_ioend(ioend, error);
iomap_finish_ioends(ioend, error); iomap_finish_ioends(ioend, error);
memalloc_nofs_restore(nofs_flag); memalloc_nofs_restore(nofs_flag);
} }
@ -246,7 +217,7 @@ xfs_end_io(
static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend) static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend)
{ {
return ioend->io_private || return xfs_ioend_is_append(ioend) ||
ioend->io_type == IOMAP_UNWRITTEN || ioend->io_type == IOMAP_UNWRITTEN ||
(ioend->io_flags & IOMAP_F_SHARED); (ioend->io_flags & IOMAP_F_SHARED);
} }
@ -259,8 +230,6 @@ xfs_end_bio(
struct xfs_inode *ip = XFS_I(ioend->io_inode); struct xfs_inode *ip = XFS_I(ioend->io_inode);
unsigned long flags; unsigned long flags;
ASSERT(xfs_ioend_needs_workqueue(ioend));
spin_lock_irqsave(&ip->i_ioend_lock, flags); spin_lock_irqsave(&ip->i_ioend_lock, flags);
if (list_empty(&ip->i_ioend_list)) if (list_empty(&ip->i_ioend_list))
WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue, WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
@ -510,14 +479,6 @@ xfs_prepare_ioend(
ioend->io_offset, ioend->io_size); ioend->io_offset, ioend->io_size);
} }
/* Reserve log space if we might write beyond the on-disk inode size. */
if (!status &&
((ioend->io_flags & IOMAP_F_SHARED) ||
ioend->io_type != IOMAP_UNWRITTEN) &&
xfs_ioend_is_append(ioend) &&
!ioend->io_private)
status = xfs_setfilesize_trans_alloc(ioend);
memalloc_nofs_restore(nofs_flag); memalloc_nofs_restore(nofs_flag);
if (xfs_ioend_needs_workqueue(ioend)) if (xfs_ioend_needs_workqueue(ioend))

View File

@ -260,6 +260,7 @@ struct nf_bridge_info {
u8 pkt_otherhost:1; u8 pkt_otherhost:1;
u8 in_prerouting:1; u8 in_prerouting:1;
u8 bridged_dnat:1; u8 bridged_dnat:1;
u8 sabotage_in_done:1;
__u16 frag_max_size; __u16 frag_max_size;
struct net_device *physindev; struct net_device *physindev;
@ -4322,7 +4323,7 @@ static inline void nf_reset_ct(struct sk_buff *skb)
static inline void nf_reset_trace(struct sk_buff *skb) static inline void nf_reset_trace(struct sk_buff *skb)
{ {
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
skb->nf_trace = 0; skb->nf_trace = 0;
#endif #endif
} }
@ -4342,7 +4343,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
dst->_nfct = src->_nfct; dst->_nfct = src->_nfct;
nf_conntrack_get(skb_nfct(src)); nf_conntrack_get(skb_nfct(src));
#endif #endif
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
if (copy) if (copy)
dst->nf_trace = src->nf_trace; dst->nf_trace = src->nf_trace;
#endif #endif

View File

@ -1106,6 +1106,8 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu); void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
void inet6_cleanup_sock(struct sock *sk);
void inet6_sock_destruct(struct sock *sk);
int inet6_release(struct socket *sock); int inet6_release(struct socket *sock);
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int inet6_getname(struct socket *sock, struct sockaddr *uaddr,

View File

@ -268,7 +268,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
} }
/* net/ipv4/udp.c */ /* net/ipv4/udp.c */
void udp_destruct_sock(struct sock *sk); void udp_destruct_common(struct sock *sk);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len); void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb); int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb); void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);

View File

@ -24,14 +24,6 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset,
return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT; return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
} }
/* Designate sk as UDP-Lite socket */
static inline int udplite_sk_init(struct sock *sk)
{
udp_init_sock(sk);
udp_sk(sk)->pcflag = UDPLITE_BIT;
return 0;
}
/* /*
* Checksumming routines * Checksumming routines
*/ */

View File

@ -520,7 +520,7 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(ino_t, ino) __field(ino_t, ino)
__field(nid_t, nid[3]) __array(nid_t, nid, 3)
__field(int, depth) __field(int, depth)
__field(int, err) __field(int, err)
), ),

View File

@ -1931,6 +1931,21 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
} }
} else if (opcode == BPF_EXIT) { } else if (opcode == BPF_EXIT) {
return -ENOTSUPP; return -ENOTSUPP;
} else if (BPF_SRC(insn->code) == BPF_X) {
if (!(*reg_mask & (dreg | sreg)))
return 0;
/* dreg <cond> sreg
* Both dreg and sreg need precision before
* this insn. If only sreg was marked precise
* before it would be equally necessary to
* propagate it to dreg.
*/
*reg_mask |= (sreg | dreg);
/* else dreg <cond> K
* Only dreg still needs precision before
* this insn, so for the K-based conditional
* there is nothing new to be marked.
*/
} }
} else if (class == BPF_LD) { } else if (class == BPF_LD) {
if (!(*reg_mask & dreg)) if (!(*reg_mask & dreg))

View File

@ -1018,7 +1018,7 @@ static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
return; return;
WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); uclamp_rq_set(rq, clamp_id, clamp_value);
} }
static inline static inline
@ -1203,8 +1203,8 @@ static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
if (bucket->tasks == 1 || uc_se->value > bucket->value) if (bucket->tasks == 1 || uc_se->value > bucket->value)
bucket->value = uc_se->value; bucket->value = uc_se->value;
if (uc_se->value > READ_ONCE(uc_rq->value)) if (uc_se->value > uclamp_rq_get(rq, clamp_id))
WRITE_ONCE(uc_rq->value, uc_se->value); uclamp_rq_set(rq, clamp_id, uc_se->value);
} }
/* /*
@ -1270,7 +1270,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
if (likely(bucket->tasks)) if (likely(bucket->tasks))
return; return;
rq_clamp = READ_ONCE(uc_rq->value); rq_clamp = uclamp_rq_get(rq, clamp_id);
/* /*
* Defensive programming: this should never happen. If it happens, * Defensive programming: this should never happen. If it happens,
* e.g. due to future modification, warn and fixup the expected value. * e.g. due to future modification, warn and fixup the expected value.
@ -1278,7 +1278,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
SCHED_WARN_ON(bucket->value > rq_clamp); SCHED_WARN_ON(bucket->value > rq_clamp);
if (bucket->value >= rq_clamp) { if (bucket->value >= rq_clamp) {
bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
WRITE_ONCE(uc_rq->value, bkt_clamp); uclamp_rq_set(rq, clamp_id, bkt_clamp);
} }
} }

View File

@ -3938,14 +3938,16 @@ static inline unsigned long task_util_est(struct task_struct *p)
} }
#ifdef CONFIG_UCLAMP_TASK #ifdef CONFIG_UCLAMP_TASK
static inline unsigned long uclamp_task_util(struct task_struct *p) static inline unsigned long uclamp_task_util(struct task_struct *p,
unsigned long uclamp_min,
unsigned long uclamp_max)
{ {
return clamp(task_util_est(p), return clamp(task_util_est(p), uclamp_min, uclamp_max);
uclamp_eff_value(p, UCLAMP_MIN),
uclamp_eff_value(p, UCLAMP_MAX));
} }
#else #else
static inline unsigned long uclamp_task_util(struct task_struct *p) static inline unsigned long uclamp_task_util(struct task_struct *p,
unsigned long uclamp_min,
unsigned long uclamp_max)
{ {
return task_util_est(p); return task_util_est(p);
} }
@ -4126,12 +4128,16 @@ static inline int util_fits_cpu(unsigned long util,
* For uclamp_max, we can tolerate a drop in performance level as the * For uclamp_max, we can tolerate a drop in performance level as the
* goal is to cap the task. So it's okay if it's getting less. * goal is to cap the task. So it's okay if it's getting less.
* *
* In case of capacity inversion, which is not handled yet, we should * In case of capacity inversion we should honour the inverted capacity
* honour the inverted capacity for both uclamp_min and uclamp_max all * for both uclamp_min and uclamp_max all the time.
* the time.
*/ */
capacity_orig = capacity_orig_of(cpu); capacity_orig = cpu_in_capacity_inversion(cpu);
capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu); if (capacity_orig) {
capacity_orig_thermal = capacity_orig;
} else {
capacity_orig = capacity_orig_of(cpu);
capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
}
/* /*
* We want to force a task to fit a cpu as implied by uclamp_max. * We want to force a task to fit a cpu as implied by uclamp_max.
@ -4212,10 +4218,12 @@ static inline int util_fits_cpu(unsigned long util,
return fits; return fits;
} }
static inline int task_fits_capacity(struct task_struct *p, static inline int task_fits_cpu(struct task_struct *p, int cpu)
unsigned long capacity)
{ {
return fits_capacity(uclamp_task_util(p), capacity); unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN);
unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX);
unsigned long util = task_util_est(p);
return util_fits_cpu(util, uclamp_min, uclamp_max, cpu);
} }
static inline void update_misfit_status(struct task_struct *p, struct rq *rq) static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
@ -4231,7 +4239,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
return; return;
} }
if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { if (task_fits_cpu(p, cpu_of(rq))) {
rq->misfit_task_load = 0; rq->misfit_task_load = 0;
return; return;
} }
@ -5685,13 +5693,15 @@ static inline unsigned long cpu_util(int cpu);
static inline bool cpu_overutilized(int cpu) static inline bool cpu_overutilized(int cpu)
{ {
unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
int overutilized = -1; int overutilized = -1;
trace_android_rvh_cpu_overutilized(cpu, &overutilized); trace_android_rvh_cpu_overutilized(cpu, &overutilized);
if (overutilized != -1) if (overutilized != -1)
return overutilized; return overutilized;
return !fits_capacity(cpu_util(cpu), capacity_of(cpu)); return !util_fits_cpu(cpu_util(cpu), rq_util_min, rq_util_max, cpu);
} }
static inline void update_overutilized_status(struct rq *rq) static inline void update_overutilized_status(struct rq *rq)
@ -6433,21 +6443,23 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
static int static int
select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
{ {
unsigned long task_util, best_cap = 0; unsigned long task_util, util_min, util_max, best_cap = 0;
int cpu, best_cpu = -1; int cpu, best_cpu = -1;
struct cpumask *cpus; struct cpumask *cpus;
cpus = this_cpu_cpumask_var_ptr(select_idle_mask); cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
task_util = uclamp_task_util(p); task_util = task_util_est(p);
util_min = uclamp_eff_value(p, UCLAMP_MIN);
util_max = uclamp_eff_value(p, UCLAMP_MAX);
for_each_cpu_wrap(cpu, cpus, target) { for_each_cpu_wrap(cpu, cpus, target) {
unsigned long cpu_cap = capacity_of(cpu); unsigned long cpu_cap = capacity_of(cpu);
if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu)) if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
continue; continue;
if (fits_capacity(task_util, cpu_cap)) if (util_fits_cpu(task_util, util_min, util_max, cpu))
return cpu; return cpu;
if (cpu_cap > best_cap) { if (cpu_cap > best_cap) {
@ -6459,10 +6471,13 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
return best_cpu; return best_cpu;
} }
static inline bool asym_fits_capacity(unsigned long task_util, int cpu) static inline bool asym_fits_cpu(unsigned long util,
unsigned long util_min,
unsigned long util_max,
int cpu)
{ {
if (static_branch_unlikely(&sched_asym_cpucapacity)) if (static_branch_unlikely(&sched_asym_cpucapacity))
return fits_capacity(task_util, capacity_of(cpu)); return util_fits_cpu(util, util_min, util_max, cpu);
return true; return true;
} }
@ -6473,7 +6488,7 @@ static inline bool asym_fits_capacity(unsigned long task_util, int cpu)
static int select_idle_sibling(struct task_struct *p, int prev, int target) static int select_idle_sibling(struct task_struct *p, int prev, int target)
{ {
struct sched_domain *sd; struct sched_domain *sd;
unsigned long task_util; unsigned long task_util, util_min, util_max;
int i, recent_used_cpu; int i, recent_used_cpu;
/* /*
@ -6482,11 +6497,13 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
*/ */
if (static_branch_unlikely(&sched_asym_cpucapacity)) { if (static_branch_unlikely(&sched_asym_cpucapacity)) {
sync_entity_load_avg(&p->se); sync_entity_load_avg(&p->se);
task_util = uclamp_task_util(p); task_util = task_util_est(p);
util_min = uclamp_eff_value(p, UCLAMP_MIN);
util_max = uclamp_eff_value(p, UCLAMP_MAX);
} }
if ((available_idle_cpu(target) || sched_idle_cpu(target)) && if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
asym_fits_capacity(task_util, target)) asym_fits_cpu(task_util, util_min, util_max, target))
return target; return target;
/* /*
@ -6494,7 +6511,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
*/ */
if (prev != target && cpus_share_cache(prev, target) && if (prev != target && cpus_share_cache(prev, target) &&
(available_idle_cpu(prev) || sched_idle_cpu(prev)) && (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
asym_fits_capacity(task_util, prev)) asym_fits_cpu(task_util, util_min, util_max, prev))
return prev; return prev;
/* /*
@ -6509,7 +6526,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
in_task() && in_task() &&
prev == smp_processor_id() && prev == smp_processor_id() &&
this_rq()->nr_running <= 1 && this_rq()->nr_running <= 1 &&
asym_fits_capacity(task_util, prev)) { asym_fits_cpu(task_util, util_min, util_max, prev)) {
return prev; return prev;
} }
@ -6520,7 +6537,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
cpus_share_cache(recent_used_cpu, target) && cpus_share_cache(recent_used_cpu, target) &&
(available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
asym_fits_capacity(task_util, recent_used_cpu)) { asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
/* /*
* Replace recent_used_cpu with prev as it is a potential * Replace recent_used_cpu with prev as it is a potential
* candidate for the next wake: * candidate for the next wake:
@ -6846,6 +6863,8 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync) static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
{ {
unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX; unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0;
unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024;
struct root_domain *rd = cpu_rq(smp_processor_id())->rd; struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
int max_spare_cap_cpu_ls = prev_cpu, best_idle_cpu = -1; int max_spare_cap_cpu_ls = prev_cpu, best_idle_cpu = -1;
unsigned long max_spare_cap_ls = 0, target_cap; unsigned long max_spare_cap_ls = 0, target_cap;
@ -6871,7 +6890,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
cpu = smp_processor_id(); cpu = smp_processor_id();
if (sync && cpu_rq(cpu)->nr_running == 1 && if (sync && cpu_rq(cpu)->nr_running == 1 &&
cpumask_test_cpu(cpu, p->cpus_ptr) && cpumask_test_cpu(cpu, p->cpus_ptr) &&
task_fits_capacity(p, capacity_of(cpu))) { task_fits_cpu(p, cpu)) {
rcu_read_unlock(); rcu_read_unlock();
return cpu; return cpu;
} }
@ -6886,7 +6905,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
if (!sd) if (!sd)
goto fail; goto fail;
if (!task_util_est(p)) if (!uclamp_task_util(p, p_util_min, p_util_max))
goto unlock; goto unlock;
latency_sensitive = uclamp_latency_sensitive(p); latency_sensitive = uclamp_latency_sensitive(p);
@ -6894,7 +6913,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
target_cap = boosted ? 0 : ULONG_MAX; target_cap = boosted ? 0 : ULONG_MAX;
for (; pd; pd = pd->next) { for (; pd; pd = pd->next) {
unsigned long util_min = p_util_min, util_max = p_util_max;
unsigned long cur_delta, spare_cap, max_spare_cap = 0; unsigned long cur_delta, spare_cap, max_spare_cap = 0;
unsigned long rq_util_min, rq_util_max;
unsigned long base_energy_pd; unsigned long base_energy_pd;
int max_spare_cap_cpu = -1; int max_spare_cap_cpu = -1;
@ -6903,6 +6924,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
base_energy += base_energy_pd; base_energy += base_energy_pd;
for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
struct rq *rq = cpu_rq(cpu);
if (!cpumask_test_cpu(cpu, p->cpus_ptr)) if (!cpumask_test_cpu(cpu, p->cpus_ptr))
continue; continue;
@ -6918,8 +6941,21 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
* much capacity we can get out of the CPU; this is * much capacity we can get out of the CPU; this is
* aligned with schedutil_cpu_util(). * aligned with schedutil_cpu_util().
*/ */
util = uclamp_rq_util_with(cpu_rq(cpu), util, p); if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
if (!fits_capacity(util, cpu_cap)) /*
* Open code uclamp_rq_util_with() except for
* the clamp() part. Ie: apply max aggregation
* only. util_fits_cpu() logic requires to
* operate on non clamped util but must use the
* max-aggregated uclamp_{min, max}.
*/
rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
util_min = max(rq_util_min, p_util_min);
util_max = max(rq_util_max, p_util_max);
}
if (!util_fits_cpu(util, util_min, util_max, cpu))
continue; continue;
/* Always use prev_cpu as a candidate. */ /* Always use prev_cpu as a candidate. */
@ -8080,7 +8116,7 @@ static int detach_tasks(struct lb_env *env)
case migrate_misfit: case migrate_misfit:
/* This is not a misfit task */ /* This is not a misfit task */
if (task_fits_capacity(p, capacity_of(env->src_cpu))) if (task_fits_cpu(p, env->src_cpu))
goto next; goto next;
env->imbalance = 0; env->imbalance = 0;
@ -8478,17 +8514,83 @@ static unsigned long scale_rt_capacity(int cpu)
static void update_cpu_capacity(struct sched_domain *sd, int cpu) static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{ {
unsigned long capacity_orig = arch_scale_cpu_capacity(cpu);
unsigned long capacity = scale_rt_capacity(cpu); unsigned long capacity = scale_rt_capacity(cpu);
struct sched_group *sdg = sd->groups; struct sched_group *sdg = sd->groups;
struct rq *rq = cpu_rq(cpu);
cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu); rq->cpu_capacity_orig = capacity_orig;
if (!capacity) if (!capacity)
capacity = 1; capacity = 1;
trace_android_rvh_update_cpu_capacity(cpu, &capacity); trace_android_rvh_update_cpu_capacity(cpu, &capacity);
cpu_rq(cpu)->cpu_capacity = capacity; rq->cpu_capacity = capacity;
trace_sched_cpu_capacity_tp(cpu_rq(cpu));
/*
* Detect if the performance domain is in capacity inversion state.
*
* Capacity inversion happens when another perf domain with equal or
* lower capacity_orig_of() ends up having higher capacity than this
* domain after subtracting thermal pressure.
*
* We only take into account thermal pressure in this detection as it's
* the only metric that actually results in *real* reduction of
* capacity due to performance points (OPPs) being dropped/become
* unreachable due to thermal throttling.
*
* We assume:
* * That all cpus in a perf domain have the same capacity_orig
* (same uArch).
* * Thermal pressure will impact all cpus in this perf domain
* equally.
*/
if (sched_energy_enabled()) {
unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
struct perf_domain *pd;
rcu_read_lock();
pd = rcu_dereference(rq->rd->pd);
rq->cpu_capacity_inverted = 0;
for (; pd; pd = pd->next) {
struct cpumask *pd_span = perf_domain_span(pd);
unsigned long pd_cap_orig, pd_cap;
/* We can't be inverted against our own pd */
if (cpumask_test_cpu(cpu_of(rq), pd_span))
continue;
cpu = cpumask_any(pd_span);
pd_cap_orig = arch_scale_cpu_capacity(cpu);
if (capacity_orig < pd_cap_orig)
continue;
/*
* handle the case of multiple perf domains have the
* same capacity_orig but one of them is under higher
* thermal pressure. We record it as capacity
* inversion.
*/
if (capacity_orig == pd_cap_orig) {
pd_cap = pd_cap_orig - thermal_load_avg(cpu_rq(cpu));
if (pd_cap > inv_cap) {
rq->cpu_capacity_inverted = inv_cap;
break;
}
} else if (pd_cap_orig > inv_cap) {
rq->cpu_capacity_inverted = inv_cap;
break;
}
}
rcu_read_unlock();
}
trace_sched_cpu_capacity_tp(rq);
sdg->sgc->capacity = capacity; sdg->sgc->capacity = capacity;
sdg->sgc->min_capacity = capacity; sdg->sgc->min_capacity = capacity;
@ -9023,6 +9125,10 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
memset(sgs, 0, sizeof(*sgs)); memset(sgs, 0, sizeof(*sgs));
/* Assume that task can't fit any CPU of the group */
if (sd->flags & SD_ASYM_CPUCAPACITY)
sgs->group_misfit_task_load = 1;
for_each_cpu(i, sched_group_span(group)) { for_each_cpu(i, sched_group_span(group)) {
struct rq *rq = cpu_rq(i); struct rq *rq = cpu_rq(i);
unsigned int local; unsigned int local;
@ -9042,12 +9148,12 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
if (!nr_running && idle_cpu_without(i, p)) if (!nr_running && idle_cpu_without(i, p))
sgs->idle_cpus++; sgs->idle_cpus++;
} /* Check if task fits in the CPU */
if (sd->flags & SD_ASYM_CPUCAPACITY &&
sgs->group_misfit_task_load &&
task_fits_cpu(p, i))
sgs->group_misfit_task_load = 0;
/* Check if task fits in the group */
if (sd->flags & SD_ASYM_CPUCAPACITY &&
!task_fits_capacity(p, group->sgc->max_capacity)) {
sgs->group_misfit_task_load = 1;
} }
sgs->group_capacity = group->sgc->capacity; sgs->group_capacity = group->sgc->capacity;

View File

@ -993,6 +993,7 @@ struct rq {
unsigned long cpu_capacity; unsigned long cpu_capacity;
unsigned long cpu_capacity_orig; unsigned long cpu_capacity_orig;
unsigned long cpu_capacity_inverted;
struct callback_head *balance_callback; struct callback_head *balance_callback;
@ -2458,6 +2459,23 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
#ifdef CONFIG_UCLAMP_TASK #ifdef CONFIG_UCLAMP_TASK
unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
static inline unsigned long uclamp_rq_get(struct rq *rq,
enum uclamp_id clamp_id)
{
return READ_ONCE(rq->uclamp[clamp_id].value);
}
static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
unsigned int value)
{
WRITE_ONCE(rq->uclamp[clamp_id].value, value);
}
static inline bool uclamp_rq_is_idle(struct rq *rq)
{
return rq->uclamp_flags & UCLAMP_FLAG_IDLE;
}
/** /**
* uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
* @rq: The rq to clamp against. Must not be NULL. * @rq: The rq to clamp against. Must not be NULL.
@ -2493,12 +2511,12 @@ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
* Ignore last runnable task's max clamp, as this task will * Ignore last runnable task's max clamp, as this task will
* reset it. Similarly, no need to read the rq's min clamp. * reset it. Similarly, no need to read the rq's min clamp.
*/ */
if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) if (uclamp_rq_is_idle(rq))
goto out; goto out;
} }
min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN));
max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX));
out: out:
/* /*
* Since CPU's {min,max}_util clamps are MAX aggregated considering * Since CPU's {min,max}_util clamps are MAX aggregated considering
@ -2529,6 +2547,15 @@ static inline bool uclamp_is_used(void)
return static_branch_likely(&sched_uclamp_used); return static_branch_likely(&sched_uclamp_used);
} }
#else /* CONFIG_UCLAMP_TASK */ #else /* CONFIG_UCLAMP_TASK */
static inline unsigned long uclamp_eff_value(struct task_struct *p,
enum uclamp_id clamp_id)
{
if (clamp_id == UCLAMP_MIN)
return 0;
return SCHED_CAPACITY_SCALE;
}
static inline static inline
unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
struct task_struct *p) struct task_struct *p)
@ -2545,6 +2572,25 @@ static inline bool uclamp_is_used(void)
{ {
return false; return false;
} }
static inline unsigned long uclamp_rq_get(struct rq *rq,
enum uclamp_id clamp_id)
{
if (clamp_id == UCLAMP_MIN)
return 0;
return SCHED_CAPACITY_SCALE;
}
static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
unsigned int value)
{
}
static inline bool uclamp_rq_is_idle(struct rq *rq)
{
return false;
}
#endif /* CONFIG_UCLAMP_TASK */ #endif /* CONFIG_UCLAMP_TASK */
#ifdef CONFIG_UCLAMP_TASK_GROUP #ifdef CONFIG_UCLAMP_TASK_GROUP
@ -2579,6 +2625,24 @@ static inline unsigned long capacity_orig_of(int cpu)
{ {
return cpu_rq(cpu)->cpu_capacity_orig; return cpu_rq(cpu)->cpu_capacity_orig;
} }
/*
* Returns inverted capacity if the CPU is in capacity inversion state.
* 0 otherwise.
*
* Capacity inversion detection only considers thermal impact where actual
* performance points (OPPs) gets dropped.
*
* Capacity inversion state happens when another performance domain that has
* equal or lower capacity_orig_of() becomes effectively larger than the perf
* domain this CPU belongs to due to thermal pressure throttling it hard.
*
* See comment in update_cpu_capacity().
*/
static inline unsigned long cpu_in_capacity_inversion(int cpu)
{
return cpu_rq(cpu)->cpu_capacity_inverted;
}
#endif #endif
/** /**

View File

@ -644,6 +644,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
struct cred *new; struct cred *new;
int retval; int retval;
kuid_t kruid, keuid, ksuid; kuid_t kruid, keuid, ksuid;
bool ruid_new, euid_new, suid_new;
kruid = make_kuid(ns, ruid); kruid = make_kuid(ns, ruid);
keuid = make_kuid(ns, euid); keuid = make_kuid(ns, euid);
@ -658,25 +659,29 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
if ((suid != (uid_t) -1) && !uid_valid(ksuid)) if ((suid != (uid_t) -1) && !uid_valid(ksuid))
return -EINVAL; return -EINVAL;
old = current_cred();
/* check for no-op */
if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) &&
(euid == (uid_t) -1 || (uid_eq(keuid, old->euid) &&
uid_eq(keuid, old->fsuid))) &&
(suid == (uid_t) -1 || uid_eq(ksuid, old->suid)))
return 0;
ruid_new = ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
!uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid);
euid_new = euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
!uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid);
suid_new = suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
!uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid);
if ((ruid_new || euid_new || suid_new) &&
!ns_capable_setid(old->user_ns, CAP_SETUID))
return -EPERM;
new = prepare_creds(); new = prepare_creds();
if (!new) if (!new)
return -ENOMEM; return -ENOMEM;
old = current_cred();
retval = -EPERM;
if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
!uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
goto error;
if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
!uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
goto error;
if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
!uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
goto error;
}
if (ruid != (uid_t) -1) { if (ruid != (uid_t) -1) {
new->uid = kruid; new->uid = kruid;
if (!uid_eq(kruid, old->uid)) { if (!uid_eq(kruid, old->uid)) {
@ -736,6 +741,7 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
struct cred *new; struct cred *new;
int retval; int retval;
kgid_t krgid, kegid, ksgid; kgid_t krgid, kegid, ksgid;
bool rgid_new, egid_new, sgid_new;
krgid = make_kgid(ns, rgid); krgid = make_kgid(ns, rgid);
kegid = make_kgid(ns, egid); kegid = make_kgid(ns, egid);
@ -748,23 +754,28 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
return -EINVAL; return -EINVAL;
old = current_cred();
/* check for no-op */
if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) &&
(egid == (gid_t) -1 || (gid_eq(kegid, old->egid) &&
gid_eq(kegid, old->fsgid))) &&
(sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid)))
return 0;
rgid_new = rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
!gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid);
egid_new = egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
!gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid);
sgid_new = sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
!gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid);
if ((rgid_new || egid_new || sgid_new) &&
!ns_capable_setid(old->user_ns, CAP_SETGID))
return -EPERM;
new = prepare_creds(); new = prepare_creds();
if (!new) if (!new)
return -ENOMEM; return -ENOMEM;
old = current_cred();
retval = -EPERM;
if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
!gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
goto error;
if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
!gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
goto error;
if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
!gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
goto error;
}
if (rgid != (gid_t) -1) if (rgid != (gid_t) -1)
new->gid = krgid; new->gid = krgid;

View File

@ -623,6 +623,10 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
result = SCAN_PTE_NON_PRESENT; result = SCAN_PTE_NON_PRESENT;
goto out; goto out;
} }
if (pte_uffd_wp(pteval)) {
result = SCAN_PTE_UFFD_WP;
goto out;
}
page = vm_normal_page(vma, address, pteval); page = vm_normal_page(vma, address, pteval);
if (unlikely(!page)) { if (unlikely(!page)) {
result = SCAN_PAGE_NULL; result = SCAN_PAGE_NULL;

View File

@ -868,12 +868,17 @@ static unsigned int ip_sabotage_in(void *priv,
{ {
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
if (nf_bridge && !nf_bridge->in_prerouting && if (nf_bridge) {
!netif_is_l3_master(skb->dev) && if (nf_bridge->sabotage_in_done)
!netif_is_l3_slave(skb->dev)) { return NF_ACCEPT;
nf_bridge_info_free(skb);
state->okfn(state->net, state->sk, skb); if (!nf_bridge->in_prerouting &&
return NF_STOLEN; !netif_is_l3_master(skb->dev) &&
!netif_is_l3_slave(skb->dev)) {
nf_bridge->sabotage_in_done = 1;
state->okfn(state->net, state->sk, skb);
return NF_STOLEN;
}
} }
return NF_ACCEPT; return NF_ACCEPT;

View File

@ -283,6 +283,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct dccp_hdr *dh, const unsigned int len); const struct dccp_hdr *dh, const unsigned int len);
void dccp_destruct_common(struct sock *sk);
int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
void dccp_destroy_sock(struct sock *sk); void dccp_destroy_sock(struct sock *sk);

View File

@ -992,6 +992,12 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
.sockaddr_len = sizeof(struct sockaddr_in6), .sockaddr_len = sizeof(struct sockaddr_in6),
}; };
static void dccp_v6_sk_destruct(struct sock *sk)
{
dccp_destruct_common(sk);
inet6_sock_destruct(sk);
}
/* NOTE: A lot of things set to zero explicitly by call to /* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc() so need not be done here. * sk_alloc() so need not be done here.
*/ */
@ -1004,17 +1010,12 @@ static int dccp_v6_init_sock(struct sock *sk)
if (unlikely(!dccp_v6_ctl_sock_initialized)) if (unlikely(!dccp_v6_ctl_sock_initialized))
dccp_v6_ctl_sock_initialized = 1; dccp_v6_ctl_sock_initialized = 1;
inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
sk->sk_destruct = dccp_v6_sk_destruct;
} }
return err; return err;
} }
static void dccp_v6_destroy_sock(struct sock *sk)
{
dccp_destroy_sock(sk);
inet6_destroy_sock(sk);
}
static struct timewait_sock_ops dccp6_timewait_sock_ops = { static struct timewait_sock_ops dccp6_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct dccp6_timewait_sock), .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
}; };
@ -1037,7 +1038,7 @@ static struct proto dccp_v6_prot = {
.accept = inet_csk_accept, .accept = inet_csk_accept,
.get_port = inet_csk_get_port, .get_port = inet_csk_get_port,
.shutdown = dccp_shutdown, .shutdown = dccp_shutdown,
.destroy = dccp_v6_destroy_sock, .destroy = dccp_destroy_sock,
.orphan_count = &dccp_orphan_count, .orphan_count = &dccp_orphan_count,
.max_header = MAX_DCCP_HEADER, .max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp6_sock), .obj_size = sizeof(struct dccp6_sock),

View File

@ -171,12 +171,18 @@ const char *dccp_packet_name(const int type)
EXPORT_SYMBOL_GPL(dccp_packet_name); EXPORT_SYMBOL_GPL(dccp_packet_name);
static void dccp_sk_destruct(struct sock *sk) void dccp_destruct_common(struct sock *sk)
{ {
struct dccp_sock *dp = dccp_sk(sk); struct dccp_sock *dp = dccp_sk(sk);
ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
dp->dccps_hc_tx_ccid = NULL; dp->dccps_hc_tx_ccid = NULL;
}
EXPORT_SYMBOL_GPL(dccp_destruct_common);
static void dccp_sk_destruct(struct sock *sk)
{
dccp_destruct_common(sk);
inet_sock_destruct(sk); inet_sock_destruct(sk);
} }

View File

@ -1584,7 +1584,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
} }
EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
void udp_destruct_sock(struct sock *sk) void udp_destruct_common(struct sock *sk)
{ {
/* reclaim completely the forward allocated memory */ /* reclaim completely the forward allocated memory */
struct udp_sock *up = udp_sk(sk); struct udp_sock *up = udp_sk(sk);
@ -1597,10 +1597,14 @@ void udp_destruct_sock(struct sock *sk)
kfree_skb(skb); kfree_skb(skb);
} }
udp_rmem_release(sk, total, 0, true); udp_rmem_release(sk, total, 0, true);
}
EXPORT_SYMBOL_GPL(udp_destruct_common);
static void udp_destruct_sock(struct sock *sk)
{
udp_destruct_common(sk);
inet_sock_destruct(sk); inet_sock_destruct(sk);
} }
EXPORT_SYMBOL_GPL(udp_destruct_sock);
int udp_init_sock(struct sock *sk) int udp_init_sock(struct sock *sk)
{ {
@ -1608,7 +1612,6 @@ int udp_init_sock(struct sock *sk)
sk->sk_destruct = udp_destruct_sock; sk->sk_destruct = udp_destruct_sock;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(udp_init_sock);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
{ {

View File

@ -17,6 +17,14 @@
struct udp_table udplite_table __read_mostly; struct udp_table udplite_table __read_mostly;
EXPORT_SYMBOL(udplite_table); EXPORT_SYMBOL(udplite_table);
/* Designate sk as UDP-Lite socket */
static int udplite_sk_init(struct sock *sk)
{
udp_init_sock(sk);
udp_sk(sk)->pcflag = UDPLITE_BIT;
return 0;
}
static int udplite_rcv(struct sk_buff *skb) static int udplite_rcv(struct sk_buff *skb)
{ {
return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE); return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE);

View File

@ -107,6 +107,13 @@ static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
return (struct ipv6_pinfo *)(((u8 *)sk) + offset); return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
} }
void inet6_sock_destruct(struct sock *sk)
{
inet6_cleanup_sock(sk);
inet_sock_destruct(sk);
}
EXPORT_SYMBOL_GPL(inet6_sock_destruct);
static int inet6_create(struct net *net, struct socket *sock, int protocol, static int inet6_create(struct net *net, struct socket *sock, int protocol,
int kern) int kern)
{ {
@ -199,7 +206,7 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
inet->hdrincl = 1; inet->hdrincl = 1;
} }
sk->sk_destruct = inet_sock_destruct; sk->sk_destruct = inet6_sock_destruct;
sk->sk_family = PF_INET6; sk->sk_family = PF_INET6;
sk->sk_protocol = protocol; sk->sk_protocol = protocol;
@ -505,6 +512,12 @@ void inet6_destroy_sock(struct sock *sk)
} }
EXPORT_SYMBOL_GPL(inet6_destroy_sock); EXPORT_SYMBOL_GPL(inet6_destroy_sock);
void inet6_cleanup_sock(struct sock *sk)
{
inet6_destroy_sock(sk);
}
EXPORT_SYMBOL_GPL(inet6_cleanup_sock);
/* /*
* This does both peername and sockname. * This does both peername and sockname.
*/ */

View File

@ -429,9 +429,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (optlen < sizeof(int)) if (optlen < sizeof(int))
goto e_inval; goto e_inval;
if (val == PF_INET) { if (val == PF_INET) {
struct ipv6_txoptions *opt;
struct sk_buff *pktopt;
if (sk->sk_type == SOCK_RAW) if (sk->sk_type == SOCK_RAW)
break; break;
@ -462,7 +459,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
break; break;
} }
fl6_free_socklist(sk);
__ipv6_sock_mc_close(sk); __ipv6_sock_mc_close(sk);
__ipv6_sock_ac_close(sk); __ipv6_sock_ac_close(sk);
@ -497,14 +493,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
sk->sk_socket->ops = &inet_dgram_ops; sk->sk_socket->ops = &inet_dgram_ops;
sk->sk_family = PF_INET; sk->sk_family = PF_INET;
} }
opt = xchg((__force struct ipv6_txoptions **)&np->opt,
NULL); /* Disable all options not to allocate memory anymore,
if (opt) { * but there is still a race. See the lockless path
atomic_sub(opt->tot_len, &sk->sk_omem_alloc); * in udpv6_sendmsg() and ipv6_local_rxpmtu().
txopt_put(opt); */
} np->rxopt.all = 0;
pktopt = xchg(&np->pktoptions, NULL);
kfree_skb(pktopt); inet6_cleanup_sock(sk);
/* /*
* ... and add it to the refcnt debug socks count * ... and add it to the refcnt debug socks count

View File

@ -22,11 +22,6 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <net/ping.h> #include <net/ping.h>
static void ping_v6_destroy(struct sock *sk)
{
inet6_destroy_sock(sk);
}
/* Compatibility glue so we can support IPv6 when it's compiled as a module */ /* Compatibility glue so we can support IPv6 when it's compiled as a module */
static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
int *addr_len) int *addr_len)
@ -171,7 +166,6 @@ struct proto pingv6_prot = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.init = ping_init_sock, .init = ping_init_sock,
.close = ping_close, .close = ping_close,
.destroy = ping_v6_destroy,
.connect = ip6_datagram_connect_v6_only, .connect = ip6_datagram_connect_v6_only,
.disconnect = __udp_disconnect, .disconnect = __udp_disconnect,
.setsockopt = ipv6_setsockopt, .setsockopt = ipv6_setsockopt,

View File

@ -1211,8 +1211,6 @@ static void raw6_destroy(struct sock *sk)
lock_sock(sk); lock_sock(sk);
ip6_flush_pending_frames(sk); ip6_flush_pending_frames(sk);
release_sock(sk); release_sock(sk);
inet6_destroy_sock(sk);
} }
static int rawv6_init_sk(struct sock *sk) static int rawv6_init_sk(struct sock *sk)

View File

@ -32,7 +32,8 @@ static void *ipv6_rpl_segdata_pos(const struct ipv6_rpl_sr_hdr *hdr, int i)
size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri, size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
unsigned char cmpre) unsigned char cmpre)
{ {
return (n * IPV6_PFXTAIL_LEN(cmpri)) + IPV6_PFXTAIL_LEN(cmpre); return sizeof(struct ipv6_rpl_sr_hdr) + (n * IPV6_PFXTAIL_LEN(cmpri)) +
IPV6_PFXTAIL_LEN(cmpre);
} }
void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr, void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,

View File

@ -1936,12 +1936,6 @@ static int tcp_v6_init_sock(struct sock *sk)
return 0; return 0;
} }
static void tcp_v6_destroy_sock(struct sock *sk)
{
tcp_v4_destroy_sock(sk);
inet6_destroy_sock(sk);
}
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/* Proc filesystem TCPv6 sock list dumping. */ /* Proc filesystem TCPv6 sock list dumping. */
static void get_openreq6(struct seq_file *seq, static void get_openreq6(struct seq_file *seq,
@ -2134,7 +2128,7 @@ struct proto tcpv6_prot = {
.accept = inet_csk_accept, .accept = inet_csk_accept,
.ioctl = tcp_ioctl, .ioctl = tcp_ioctl,
.init = tcp_v6_init_sock, .init = tcp_v6_init_sock,
.destroy = tcp_v6_destroy_sock, .destroy = tcp_v4_destroy_sock,
.shutdown = tcp_shutdown, .shutdown = tcp_shutdown,
.setsockopt = tcp_setsockopt, .setsockopt = tcp_setsockopt,
.getsockopt = tcp_getsockopt, .getsockopt = tcp_getsockopt,

View File

@ -54,6 +54,19 @@
#include <trace/events/skb.h> #include <trace/events/skb.h>
#include "udp_impl.h" #include "udp_impl.h"
static void udpv6_destruct_sock(struct sock *sk)
{
udp_destruct_common(sk);
inet6_sock_destruct(sk);
}
int udpv6_init_sock(struct sock *sk)
{
skb_queue_head_init(&udp_sk(sk)->reader_queue);
sk->sk_destruct = udpv6_destruct_sock;
return 0;
}
static u32 udp6_ehashfn(const struct net *net, static u32 udp6_ehashfn(const struct net *net,
const struct in6_addr *laddr, const struct in6_addr *laddr,
const u16 lport, const u16 lport,
@ -1617,8 +1630,6 @@ void udpv6_destroy_sock(struct sock *sk)
udp_encap_disable(); udp_encap_disable();
} }
} }
inet6_destroy_sock(sk);
} }
/* /*
@ -1702,7 +1713,7 @@ struct proto udpv6_prot = {
.connect = ip6_datagram_connect, .connect = ip6_datagram_connect,
.disconnect = udp_disconnect, .disconnect = udp_disconnect,
.ioctl = udp_ioctl, .ioctl = udp_ioctl,
.init = udp_init_sock, .init = udpv6_init_sock,
.destroy = udpv6_destroy_sock, .destroy = udpv6_destroy_sock,
.setsockopt = udpv6_setsockopt, .setsockopt = udpv6_setsockopt,
.getsockopt = udpv6_getsockopt, .getsockopt = udpv6_getsockopt,

View File

@ -12,6 +12,7 @@ int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int);
int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
__be32, struct udp_table *); __be32, struct udp_table *);
int udpv6_init_sock(struct sock *sk);
int udp_v6_get_port(struct sock *sk, unsigned short snum); int udp_v6_get_port(struct sock *sk, unsigned short snum);
void udp_v6_rehash(struct sock *sk); void udp_v6_rehash(struct sock *sk);

View File

@ -12,6 +12,13 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include "udp_impl.h" #include "udp_impl.h"
static int udplitev6_sk_init(struct sock *sk)
{
udpv6_init_sock(sk);
udp_sk(sk)->pcflag = UDPLITE_BIT;
return 0;
}
static int udplitev6_rcv(struct sk_buff *skb) static int udplitev6_rcv(struct sk_buff *skb)
{ {
return __udp6_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE); return __udp6_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE);
@ -38,7 +45,7 @@ struct proto udplitev6_prot = {
.connect = ip6_datagram_connect, .connect = ip6_datagram_connect,
.disconnect = udp_disconnect, .disconnect = udp_disconnect,
.ioctl = udp_ioctl, .ioctl = udp_ioctl,
.init = udplite_sk_init, .init = udplitev6_sk_init,
.destroy = udpv6_destroy_sock, .destroy = udpv6_destroy_sock,
.setsockopt = udpv6_setsockopt, .setsockopt = udpv6_setsockopt,
.getsockopt = udpv6_getsockopt, .getsockopt = udpv6_getsockopt,

View File

@ -255,8 +255,6 @@ static void l2tp_ip6_destroy_sock(struct sock *sk)
if (tunnel) if (tunnel)
l2tp_tunnel_delete(tunnel); l2tp_tunnel_delete(tunnel);
inet6_destroy_sock(sk);
} }
static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)

View File

@ -2863,12 +2863,6 @@ static const struct proto_ops mptcp_v6_stream_ops = {
static struct proto mptcp_v6_prot; static struct proto mptcp_v6_prot;
static void mptcp_v6_destroy(struct sock *sk)
{
mptcp_destroy(sk);
inet6_destroy_sock(sk);
}
static struct inet_protosw mptcp_v6_protosw = { static struct inet_protosw mptcp_v6_protosw = {
.type = SOCK_STREAM, .type = SOCK_STREAM,
.protocol = IPPROTO_MPTCP, .protocol = IPPROTO_MPTCP,
@ -2884,7 +2878,6 @@ int __init mptcp_proto_v6_init(void)
mptcp_v6_prot = mptcp_prot; mptcp_v6_prot = mptcp_prot;
strcpy(mptcp_v6_prot.name, "MPTCPv6"); strcpy(mptcp_v6_prot.name, "MPTCPv6");
mptcp_v6_prot.slab = NULL; mptcp_v6_prot.slab = NULL;
mptcp_v6_prot.destroy = mptcp_v6_destroy;
mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
err = proto_register(&mptcp_v6_prot, 1); err = proto_register(&mptcp_v6_prot, 1);

View File

@ -421,15 +421,16 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
} else } else
weight = 1; weight = 1;
if (tb[TCA_QFQ_LMAX]) { if (tb[TCA_QFQ_LMAX])
lmax = nla_get_u32(tb[TCA_QFQ_LMAX]); lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) { else
pr_notice("qfq: invalid max length %u\n", lmax);
return -EINVAL;
}
} else
lmax = psched_mtu(qdisc_dev(sch)); lmax = psched_mtu(qdisc_dev(sch));
if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
pr_notice("qfq: invalid max length %u\n", lmax);
return -EINVAL;
}
inv_w = ONE_FP / weight; inv_w = ONE_FP / weight;
weight = ONE_FP / inv_w; weight = ONE_FP / inv_w;

View File

@ -5000,13 +5000,17 @@ static void sctp_destroy_sock(struct sock *sk)
} }
/* Triggered when there are no references on the socket anymore */ /* Triggered when there are no references on the socket anymore */
static void sctp_destruct_sock(struct sock *sk) static void sctp_destruct_common(struct sock *sk)
{ {
struct sctp_sock *sp = sctp_sk(sk); struct sctp_sock *sp = sctp_sk(sk);
/* Free up the HMAC transform. */ /* Free up the HMAC transform. */
crypto_free_shash(sp->hmac); crypto_free_shash(sp->hmac);
}
static void sctp_destruct_sock(struct sock *sk)
{
sctp_destruct_common(sk);
inet_sock_destruct(sk); inet_sock_destruct(sk);
} }
@ -9200,7 +9204,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
sctp_sk(newsk)->reuse = sp->reuse; sctp_sk(newsk)->reuse = sp->reuse;
newsk->sk_shutdown = sk->sk_shutdown; newsk->sk_shutdown = sk->sk_shutdown;
newsk->sk_destruct = sctp_destruct_sock; newsk->sk_destruct = sk->sk_destruct;
newsk->sk_family = sk->sk_family; newsk->sk_family = sk->sk_family;
newsk->sk_protocol = IPPROTO_SCTP; newsk->sk_protocol = IPPROTO_SCTP;
newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
@ -9432,11 +9436,20 @@ struct proto sctp_prot = {
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
#include <net/transp_v6.h> static void sctp_v6_destruct_sock(struct sock *sk)
static void sctp_v6_destroy_sock(struct sock *sk)
{ {
sctp_destroy_sock(sk); sctp_destruct_common(sk);
inet6_destroy_sock(sk); inet6_sock_destruct(sk);
}
static int sctp_v6_init_sock(struct sock *sk)
{
int ret = sctp_init_sock(sk);
if (!ret)
sk->sk_destruct = sctp_v6_destruct_sock;
return ret;
} }
struct proto sctpv6_prot = { struct proto sctpv6_prot = {
@ -9446,8 +9459,8 @@ struct proto sctpv6_prot = {
.disconnect = sctp_disconnect, .disconnect = sctp_disconnect,
.accept = sctp_accept, .accept = sctp_accept,
.ioctl = sctp_ioctl, .ioctl = sctp_ioctl,
.init = sctp_init_sock, .init = sctp_v6_init_sock,
.destroy = sctp_v6_destroy_sock, .destroy = sctp_destroy_sock,
.shutdown = sctp_shutdown, .shutdown = sctp_shutdown,
.setsockopt = sctp_setsockopt, .setsockopt = sctp_setsockopt,
.getsockopt = sctp_getsockopt, .getsockopt = sctp_getsockopt,

View File

@ -625,7 +625,7 @@ int main(int argc, char **argv)
p = strrchr(argv[1], '/'); p = strrchr(argv[1], '/');
p = p ? p + 1 : argv[1]; p = p ? p + 1 : argv[1];
grammar_name = strdup(p); grammar_name = strdup(p);
if (!p) { if (!grammar_name) {
perror(NULL); perror(NULL);
exit(1); exit(1);
} }

View File

@ -207,14 +207,19 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
be_chan = soc_component_to_pcm(component_be)->chan[substream->stream]; be_chan = soc_component_to_pcm(component_be)->chan[substream->stream];
tmp_chan = be_chan; tmp_chan = be_chan;
} }
if (!tmp_chan) if (!tmp_chan) {
tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx"); tmp_chan = dma_request_chan(dev_be, tx ? "tx" : "rx");
if (IS_ERR(tmp_chan)) {
dev_err(dev, "failed to request DMA channel for Back-End\n");
return -EINVAL;
}
}
/* /*
* An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each * An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each
* peripheral, unlike SDMA channel that is allocated dynamically. So no * peripheral, unlike SDMA channel that is allocated dynamically. So no
* need to configure dma_request and dma_request2, but get dma_chan of * need to configure dma_request and dma_request2, but get dma_chan of
* Back-End device directly via dma_request_slave_channel. * Back-End device directly via dma_request_chan.
*/ */
if (!asrc->use_edma) { if (!asrc->use_edma) {
/* Get DMA request of Back-End */ /* Get DMA request of Back-End */

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
#if __alpha__
register unsigned long sp asm("$30");
#elif __arm__ || __aarch64__ || __csky__ || __m68k__ || __mips__ || __riscv
register unsigned long sp asm("sp");
#elif __i386__
register unsigned long sp asm("esp");
#elif __loongarch64
register unsigned long sp asm("$sp");
#elif __ppc__
register unsigned long sp asm("r1");
#elif __s390x__
register unsigned long sp asm("%15");
#elif __sh__
register unsigned long sp asm("r15");
#elif __x86_64__
register unsigned long sp asm("rsp");
#elif __XTENSA__
register unsigned long sp asm("a1");
#else
#error "implement current_stack_pointer equivalent"
#endif

View File

@ -19,6 +19,7 @@
#include <errno.h> #include <errno.h>
#include "../kselftest.h" #include "../kselftest.h"
#include "current_stack_pointer.h"
#ifndef SS_AUTODISARM #ifndef SS_AUTODISARM
#define SS_AUTODISARM (1U << 31) #define SS_AUTODISARM (1U << 31)
@ -40,12 +41,6 @@ void my_usr1(int sig, siginfo_t *si, void *u)
stack_t stk; stack_t stk;
struct stk_data *p; struct stk_data *p;
#if __s390x__
register unsigned long sp asm("%15");
#else
register unsigned long sp asm("sp");
#endif
if (sp < (unsigned long)sstack || if (sp < (unsigned long)sstack ||
sp >= (unsigned long)sstack + SIGSTKSZ) { sp >= (unsigned long)sstack + SIGSTKSZ) {
ksft_exit_fail_msg("SP is not on sigaltstack\n"); ksft_exit_fail_msg("SP is not on sigaltstack\n");