This is the 5.4.255 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmTvUhUACgkQONu9yGCS
 aT4bKA//VvBb7CUEq4FFMv5qig67dKUIqJVfpwLrqaCqVR8B0QonL1M5dcKXywwT
 zFqcQNGmgig9TtbYmrLtcpI/v3J3jilY7/an5dWBEPteyZgpkpAwO3M7MinbtIbj
 qRkU5qN/zojUMqgWUYRenICeiN4EOVQ64/Q9fhbj2yFBeQWzCFb0eoeF059DocTD
 UzN1Ls+cYHvZEDi0VEiapQzYX1JcxMbuWaGDttQLDvjV6FMaExT5mIobDqSF+9MA
 MS9GGj3R/Q+NjOi/AXEMfnWGEYPLsX5hgM3ok2hjyneJiw1J6OqxG1JoPJAnDUEH
 d3u/tlcWQ0j/QP0iNZBvC9aVC9YBndOoaAny5QINoLGQsbeCbZ34cKs80p76xTBa
 Vvl/B2pFu3pGVBk7f37rf/D2v/MTxkDONxwBzG4J6uDViPgpIDK7UExjGDub6gf1
 Ii5HmXvGCNwIk3NnCpdaHUQy3XRI7cz24kvDZsqkalMW6GYwlVNj9gikcW3dfOVY
 Jsdufo9fM5N3jXbru3NW61ne024+NxGRd3SnUsYB/saKfUZAxm0S/O34fzQi3wZx
 VLXFB85DIY5gkYl2VeycDZzmVkFEaDP4vzDR1gCmMTaiQsyQuD5wma6dUGggdF/2
 fvigMgosamWhHHHByASp9RxYRBwTe7vEdFE4+8gbEa7NxMoBcg8=
 =Dhtw
 -----END PGP SIGNATURE-----

Merge 5.4.255 into android11-5.4-stable

Changes in 5.4.255
	mmc: sdhci_f_sdh30: convert to devm_platform_ioremap_resource
	mmc: sdhci-f-sdh30: Replace with sdhci_pltfm
	selftests: forwarding: tc_flower: Relax success criterion
	macsec: Fix traffic counters/statistics
	macsec: use DEV_STATS_INC()
	drm/radeon: Fix integer overflow in radeon_cs_parser_init
	ALSA: emu10k1: roll up loops in DSP setup code for Audigy
	quota: Properly disable quotas when add_dquot_ref() fails
	quota: fix warning in dqgrab()
	HID: add quirk for 03f0:464a HP Elite Presenter Mouse
	ovl: check type and offset of struct vfsmount in ovl_entry
	udf: Fix uninitialized array access for some pathnames
	fs: jfs: Fix UBSAN: array-index-out-of-bounds in dbAllocDmapLev
	MIPS: dec: prom: Address -Warray-bounds warning
	FS: JFS: Fix null-ptr-deref Read in txBegin
	FS: JFS: Check for read-only mounted filesystem in txBegin
	media: v4l2-mem2mem: add lock to protect parameter num_rdy
	media: platform: mediatek: vpu: fix NULL ptr dereference
	usb: chipidea: imx: don't request QoS for imx8ulp
	gfs2: Fix possible data races in gfs2_show_options()
	pcmcia: rsrc_nonstatic: Fix memory leak in nonstatic_release_resource_db()
	Bluetooth: L2CAP: Fix use-after-free
	drm/amdgpu: Fix potential fence use-after-free v2
	ALSA: hda/realtek: Add quirks for Unis H3C Desktop B760 & Q760
	ALSA: hda: fix a possible null-pointer dereference due to data race in snd_hdac_regmap_sync()
	powerpc/kasan: Disable KCOV in KASAN code
	IMA: allow/fix UML builds
	iio: add addac subdirectory
	iio: adc: stx104: Utilize iomap interface
	iio: adc: stx104: Implement and utilize register structures
	iio: stx104: Move to addac subdirectory
	iio: addac: stx104: Fix race condition for stx104_write_raw()
	iio: addac: stx104: Fix race condition when converting analog-to-digital
	iommu/amd: Fix "Guest Virtual APIC Table Root Pointer" configuration in IRTE
	PM-runtime: add tracepoints for usage_count changes
	PM: runtime: Add pm_runtime_get_if_active()
	ALSA: hda: Fix unhandled register update during auto-suspend period
	irqchip/mips-gic: Get rid of the reliance on irq_cpu_online()
	irqchip/mips-gic: Use raw spinlock for gic_lock
	interconnect: Move internal structs into a separate file
	interconnect: Add helpers for enabling/disabling a path
	usb: dwc3: qcom: Add helper functions to enable,disable wake irqs
	USB: dwc3: qcom: fix NULL-deref on suspend
	mmc: bcm2835: fix deferred probing
	mmc: sunxi: fix deferred probing
	leds: trigger: netdev: Recheck NETDEV_LED_MODE_LINKUP on dev rename
	tracing/probes: Have process_fetch_insn() take a void * instead of pt_regs
	tracing/probes: Fix to update dynamic data counter if fetcharg uses it
	net/ncsi: Fix gma flag setting after response
	nfsd4: kill warnings on testing stateids with mismatched clientids
	nfsd: Remove incorrect check in nfsd4_validate_stateid
	virtio-mmio: convert to devm_platform_ioremap_resource
	virtio-mmio: Use to_virtio_mmio_device() to simply code
	virtio-mmio: don't break lifecycle of vm_dev
	i2c: bcm-iproc: Fix bcm_iproc_i2c_isr deadlock issue
	fbdev: mmp: fix value check in mmphw_probe()
	powerpc/rtas_flash: allow user copy to flash block cache objects
	tty: serial: fsl_lpuart: Clear the error flags by writing 1 for lpuart32 platforms
	btrfs: fix BUG_ON condition in btrfs_cancel_balance
	net: xfrm: Fix xfrm_address_filter OOB read
	net: af_key: fix sadb_x_filter validation
	xfrm: interface: rename xfrm_interface.c to xfrm_interface_core.c
	xfrm: fix slab-use-after-free in decode_session6
	ip6_vti: fix slab-use-after-free in decode_session6
	ip_vti: fix potential slab-use-after-free in decode_session6
	xfrm: add NULL check in xfrm_update_ae_params
	selftests: mirror_gre_changes: Tighten up the TTL test match
	ipvs: fix racy memcpy in proc_do_sync_threshold
	netfilter: nft_dynset: disallow object maps
	team: Fix incorrect deletion of ETH_P_8021AD protocol vid from slaves
	i40e: fix misleading debug logs
	net: dsa: mv88e6xxx: Wait for EEPROM done before HW reset
	sock: Fix misuse of sk_under_memory_pressure()
	net: do not allow gso_size to be set to GSO_BY_FRAGS
	bus: ti-sysc: Improve reset to work with modules with no sysconfig
	bus: ti-sysc: Flush posted write on enable before reset
	ARM: dts: imx7s: Drop dma-apb interrupt-names
	ARM: dts: imx: Adjust dma-apbh node name
	ARM: dts: imx: Set default tuning step for imx7d usdhc
	ARM: dts: imx: Set default tuning step for imx6sx usdhc
	ASoC: rt5665: add missed regulator_bulk_disable
	ASoC: meson: axg-tdm-formatter: fix channel slot allocation
	serial: 8250: Fix oops for port->pm on uart_change_pm()
	ALSA: usb-audio: Add support for Mythware XA001AU capture and playback interfaces.
	cifs: Release folio lock on fscache read hit.
	mmc: wbsd: fix double mmc_free_host() in wbsd_init()
	mmc: block: Fix in_flight[issue_type] value error
	netfilter: set default timeout to 3 secs for sctp shutdown send and recv state
	af_unix: Fix null-ptr-deref in unix_stream_sendpage().
	virtio-net: set queues after driver_ok
	net: fix the RTO timer retransmitting skb every 1ms if linear option is enabled
	net: xfrm: Amend XFRMA_SEC_CTX nla_policy structure
	mmc: f-sdh30: fix order of function calls in sdhci_f_sdh30_remove
	net: phy: broadcom: stub c45 read/write for 54810
	PCI: acpiphp: Reassign resources on bridge if necessary
	dlm: improve plock logging if interrupted
	dlm: replace usage of found with dedicated list iterator variable
	fs: dlm: add pid to debug log
	fs: dlm: change plock interrupted message to debug again
	fs: dlm: use dlm_plock_info for do_unlock_close
	fs: dlm: fix mismatch of plock results from userspace
	MIPS: cpu-features: Enable octeon_cache by cpu_type
	MIPS: cpu-features: Use boot_cpu_type for CPU type based features
	fbdev: Improve performance of sys_imageblit()
	fbdev: Fix sys_imageblit() for arbitrary image widths
	fbdev: fix potential OOB read in fast_imageblit()
	dm integrity: increase RECALC_SECTORS to improve recalculate speed
	dm integrity: reduce vmalloc space footprint on 32-bit architectures
	ALSA: pcm: Set per-card upper limit of PCM buffer allocations
	ALSA: pcm: Use SG-buffer only when direct DMA is available
	ALSA: pcm: Fix potential data race at PCM memory allocation helpers
	regmap: Account for register length in SMBus I/O limits
	ASoC: fsl_sai: Refine enable/disable TE/RE sequence in trigger()
	ASoC: fsl_sai: Add new added registers and new bit definition
	ASoC: fsl_sai: Disable bit clock with transmitter
	drm/amd/display: do not wait for mpc idle if tg is disabled
	drm/amd/display: check TG is non-null before checking if enabled
	tracing: Fix memleak due to race between current_tracer and trace
	octeontx2-af: SDP: fix receive link config
	sock: annotate data-races around prot->memory_pressure
	dccp: annotate data-races in dccp_poll()
	ipvlan: Fix a reference count leak warning in ipvlan_ns_exit()
	net: bgmac: Fix return value check for fixed_phy_register()
	net: bcmgenet: Fix return value check for fixed_phy_register()
	net: validate veth and vxcan peer ifindexes
	igb: Avoid starting unnecessary workqueues
	net/sched: fix a qdisc modification with ambiguous command request
	net: remove bond_slave_has_mac_rcu()
	bonding: fix macvlan over alb bond support
	ibmveth: Use dcbf rather than dcbfl
	NFSv4: Fix dropped lock for racing OPEN and delegation return
	clk: Fix slab-out-of-bounds error in devm_clk_release()
	nfsd: Fix race to FREE_STATEID and cl_revoked
	batman-adv: Trigger events for auto adjusted MTU
	batman-adv: Don't increase MTU when set by user
	batman-adv: Do not get eth header before batadv_check_management_packet
	batman-adv: Fix TT global entry leak when client roamed back
	batman-adv: Fix batadv_v_ogm_aggr_send memory leak
	batman-adv: Hold rtnl lock during MTU update via netlink
	lib/clz_ctz.c: Fix __clzdi2() and __ctzdi2() for 32-bit kernels
	radix tree: remove unused variable
	media: vcodec: Fix potential array out-of-bounds in encoder queue_setup
	PCI: acpiphp: Use pci_assign_unassigned_bridge_resources() only for non-root bus
	drm/display/dp: Fix the DP DSC Receiver cap size
	x86/fpu: Set X86_FEATURE_OSXSAVE feature after enabling OSXSAVE in CR4
	mm: allow a controlled amount of unfairness in the page lock
	rtnetlink: Reject negative ifindexes in RTM_NEWLINK
	ALSA: pcm: Fix build error on m68k and others
	Revert "ALSA: pcm: Use SG-buffer only when direct DMA is available"
	interconnect: Do not skip aggregation for disabled paths
	ALSA: pcm: Check for null pointer of pointer substream before dereferencing it
	Documentation/sysctl: document page_lock_unfairness
	irqchip/mips-gic: Don't touch vl_map if a local interrupt is not routable
	scsi: snic: Fix double free in snic_tgt_create()
	scsi: core: raid_class: Remove raid_component_add()
	clk: Fix undefined reference to `clk_rate_exclusive_{get,put}'
	pinctrl: renesas: rza2: Add lock around pinctrl_generic{{add,remove}_group,{add,remove}_function}
	dma-buf/sw_sync: Avoid recursive lock during fence signal
	Linux 5.4.255

Change-Id: I564de3c67511761f8a5d1d21b5373a5cbdf90dca
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-08-30 16:47:09 +00:00
commit ef75d6901c
151 changed files with 1560 additions and 846 deletions

View File

@ -62,6 +62,7 @@ Currently, these files are in /proc/sys/vm:
- overcommit_memory
- overcommit_ratio
- page-cluster
- page_lock_unfairness
- panic_on_oom
- percpu_pagelist_fraction
- stat_interval
@ -757,6 +758,14 @@ extra faults and I/O delays for following faults if they would have been part of
that consecutive pages readahead would have brought in.
page_lock_unfairness
====================
This value determines the number of times that the page lock can be
stolen from under a waiter. After the lock is stolen the number of times
specified in this file (default is 5), the "fair lock handoff" semantics
will apply, and the waiter will only be awakened if the lock can be taken.
panic_on_oom
============

View File

@ -382,6 +382,12 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
nonzero, increment the counter and return 1; otherwise return 0 without
changing the counter
`int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);`
- return -EINVAL if 'power.disable_depth' is nonzero; otherwise, if the
runtime PM status is RPM_ACTIVE, and either ign_usage_count is true
or the device's usage_count is non-zero, increment the counter and
return 1; otherwise return 0 without changing the counter
`void pm_runtime_put_noidle(struct device *dev);`
- decrement the device's usage counter

View File

@ -1101,7 +1101,7 @@ APEX EMBEDDED SYSTEMS STX104 IIO DRIVER
M: William Breathitt Gray <vilhelm.gray@gmail.com>
L: linux-iio@vger.kernel.org
S: Maintained
F: drivers/iio/adc/stx104.c
F: drivers/iio/addac/stx104.c
APM DRIVER
M: Jiri Kosina <jikos@kernel.org>

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 254
SUBLEVEL = 255
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -59,7 +59,7 @@
reg = <0x80000000 0x2000>;
};
dma_apbh: dma-apbh@80004000 {
dma_apbh: dma-controller@80004000 {
compatible = "fsl,imx23-dma-apbh";
reg = <0x80004000 0x2000>;
interrupts = <0 14 20 0

View File

@ -78,7 +78,7 @@
status = "disabled";
};
dma_apbh: dma-apbh@80004000 {
dma_apbh: dma-controller@80004000 {
compatible = "fsl,imx28-dma-apbh";
reg = <0x80004000 0x2000>;
interrupts = <82 83 84 85

View File

@ -160,7 +160,7 @@
interrupt-parent = <&gpc>;
ranges;
dma_apbh: dma-apbh@110000 {
dma_apbh: dma-controller@110000 {
compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
reg = <0x00110000 0x2000>;
interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,

View File

@ -211,7 +211,7 @@
power-domains = <&pd_pu>;
};
dma_apbh: dma-apbh@1804000 {
dma_apbh: dma-controller@1804000 {
compatible = "fsl,imx6sx-dma-apbh", "fsl,imx28-dma-apbh";
reg = <0x01804000 0x2000>;
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
@ -958,6 +958,8 @@
<&clks IMX6SX_CLK_USDHC1>;
clock-names = "ipg", "ahb", "per";
bus-width = <4>;
fsl,tuning-start-tap = <20>;
fsl,tuning-step= <2>;
status = "disabled";
};
@ -970,6 +972,8 @@
<&clks IMX6SX_CLK_USDHC2>;
clock-names = "ipg", "ahb", "per";
bus-width = <4>;
fsl,tuning-start-tap = <20>;
fsl,tuning-step= <2>;
status = "disabled";
};
@ -982,6 +986,8 @@
<&clks IMX6SX_CLK_USDHC3>;
clock-names = "ipg", "ahb", "per";
bus-width = <4>;
fsl,tuning-start-tap = <20>;
fsl,tuning-step= <2>;
status = "disabled";
};

View File

@ -174,7 +174,7 @@
<0x00a06000 0x2000>;
};
dma_apbh: dma-apbh@1804000 {
dma_apbh: dma-controller@1804000 {
compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
reg = <0x01804000 0x2000>;
interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,

View File

@ -1133,6 +1133,8 @@
<&clks IMX7D_USDHC1_ROOT_CLK>;
clock-names = "ipg", "ahb", "per";
bus-width = <4>;
fsl,tuning-step = <2>;
fsl,tuning-start-tap = <20>;
status = "disabled";
};
@ -1145,6 +1147,8 @@
<&clks IMX7D_USDHC2_ROOT_CLK>;
clock-names = "ipg", "ahb", "per";
bus-width = <4>;
fsl,tuning-step = <2>;
fsl,tuning-start-tap = <20>;
status = "disabled";
};
@ -1157,6 +1161,8 @@
<&clks IMX7D_USDHC3_ROOT_CLK>;
clock-names = "ipg", "ahb", "per";
bus-width = <4>;
fsl,tuning-step = <2>;
fsl,tuning-start-tap = <20>;
status = "disabled";
};
@ -1192,14 +1198,13 @@
};
};
dma_apbh: dma-apbh@33000000 {
dma_apbh: dma-controller@33000000 {
compatible = "fsl,imx7d-dma-apbh", "fsl,imx28-dma-apbh";
reg = <0x33000000 0x2000>;
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "gpmi0", "gpmi1", "gpmi2", "gpmi3";
#dma-cells = <1>;
dma-channels = <4>;
clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>;

View File

@ -124,7 +124,24 @@
#define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE)
#endif
#ifndef cpu_has_octeon_cache
#define cpu_has_octeon_cache 0
#define cpu_has_octeon_cache \
({ \
int __res; \
\
switch (boot_cpu_type()) { \
case CPU_CAVIUM_OCTEON: \
case CPU_CAVIUM_OCTEON_PLUS: \
case CPU_CAVIUM_OCTEON2: \
case CPU_CAVIUM_OCTEON3: \
__res = 1; \
break; \
\
default: \
__res = 0; \
} \
\
__res; \
})
#endif
/* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
#ifndef cpu_has_fpu
@ -341,7 +358,7 @@
({ \
int __res; \
\
switch (current_cpu_type()) { \
switch (boot_cpu_type()) { \
case CPU_M14KC: \
case CPU_74K: \
case CPU_1074K: \

View File

@ -70,7 +70,7 @@ static inline bool prom_is_rex(u32 magic)
*/
typedef struct {
int pagesize;
unsigned char bitmap[0];
unsigned char bitmap[];
} memmap;

View File

@ -710,9 +710,9 @@ static int __init rtas_flash_init(void)
if (!rtas_validate_flash_data.buf)
return -ENOMEM;
flash_block_cache = kmem_cache_create("rtas_flash_cache",
RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
NULL);
flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache",
RTAS_BLK_SIZE, RTAS_BLK_SIZE,
0, 0, RTAS_BLK_SIZE, NULL);
if (!flash_block_cache) {
printk(KERN_ERR "%s: failed to create block cache\n",
__func__);

View File

@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
KASAN_SANITIZE := n
KCOV_INSTRUMENT := n
obj-$(CONFIG_PPC32) += kasan_init_32.o

View File

@ -805,6 +805,14 @@ void __init fpu__init_system_xstate(void)
fpu__init_prepare_fx_sw_frame();
setup_init_fpu_buf();
setup_xstate_comp();
/*
* CPU capabilities initialization runs before FPU init. So
* X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
* functional, set the feature bit so depending code works.
*/
setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
print_xstate_offset_size();
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",

View File

@ -1048,9 +1048,11 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
if (!atomic_dec_and_test(&dev->power.usage_count)) {
trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
}
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@ -1080,9 +1082,11 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
if (!atomic_dec_and_test(&dev->power.usage_count)) {
trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
}
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@ -1125,28 +1129,47 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
* pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
* pm_runtime_get_if_active - Conditionally bump up the device's usage counter.
* @dev: Device to handle.
*
* Return -EINVAL if runtime PM is disabled for the device.
*
* If that's not the case and if the device's runtime PM status is RPM_ACTIVE
* and the runtime PM usage counter is nonzero, increment the counter and
* return 1. Otherwise return 0 without changing the counter.
* Otherwise, if the device's runtime PM status is RPM_ACTIVE and either
* ign_usage_count is true or the device's usage_count is non-zero, increment
* the counter and return 1. Otherwise return 0 without changing the counter.
*
* If ign_usage_count is true, the function can be used to prevent suspending
* the device when its runtime PM status is RPM_ACTIVE.
*
* If ign_usage_count is false, the function can be used to prevent suspending
* the device when both its runtime PM status is RPM_ACTIVE and its usage_count
* is non-zero.
*
* The caller is resposible for putting the device's usage count when ther
* return value is greater than zero.
*/
int pm_runtime_get_if_in_use(struct device *dev)
int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
retval = dev->power.disable_depth > 0 ? -EINVAL :
dev->power.runtime_status == RPM_ACTIVE
&& atomic_inc_not_zero(&dev->power.usage_count);
if (dev->power.disable_depth > 0) {
retval = -EINVAL;
} else if (dev->power.runtime_status != RPM_ACTIVE) {
retval = 0;
} else if (ign_usage_count) {
retval = 1;
atomic_inc(&dev->power.usage_count);
} else {
retval = atomic_inc_not_zero(&dev->power.usage_count);
}
trace_rpm_usage_rcuidle(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
/**
* __pm_runtime_set_status - Set runtime PM status of a device.
@ -1476,6 +1499,8 @@ void pm_runtime_allow(struct device *dev)
dev->power.runtime_auto = true;
if (atomic_dec_and_test(&dev->power.usage_count))
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
else
trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
@ -1543,6 +1568,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
if (!old_use || old_delay >= 0) {
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
} else {
trace_rpm_usage_rcuidle(dev, 0);
}
}

View File

@ -242,8 +242,8 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
static struct regmap_bus regmap_i2c_smbus_i2c_block = {
.write = regmap_i2c_smbus_i2c_write,
.read = regmap_i2c_smbus_i2c_read,
.max_raw_read = I2C_SMBUS_BLOCK_MAX,
.max_raw_write = I2C_SMBUS_BLOCK_MAX,
.max_raw_read = I2C_SMBUS_BLOCK_MAX - 1,
.max_raw_write = I2C_SMBUS_BLOCK_MAX - 1,
};
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,

View File

@ -1809,7 +1809,7 @@ static int sysc_reset(struct sysc *ddata)
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
if (ddata->legacy_mode || sysc_offset < 0 ||
if (ddata->legacy_mode ||
ddata->cap->regbits->srst_shift < 0 ||
ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
return 0;
@ -1819,9 +1819,13 @@ static int sysc_reset(struct sysc *ddata)
if (ddata->pre_reset_quirk)
ddata->pre_reset_quirk(ddata);
if (sysc_offset >= 0) {
sysc_val = sysc_read_sysconfig(ddata);
sysc_val |= sysc_mask;
sysc_write(ddata, sysc_offset, sysc_val);
/* Flush posted write */
sysc_val = sysc_read_sysconfig(ddata);
}
if (ddata->cfg.srst_udelay)
usleep_range(ddata->cfg.srst_udelay,

View File

@ -205,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put);
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
struct clk **ptr, *clk;
struct devm_clk_state *state;
struct clk *clk;
ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
clk = of_clk_get_by_name(np, con_id);
if (!IS_ERR(clk)) {
*ptr = clk;
devres_add(dev, ptr);
state->clk = clk;
devres_add(dev, state);
} else {
devres_free(ptr);
devres_free(state);
}
return clk;

View File

@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
*/
static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
{
LIST_HEAD(signalled);
struct sync_pt *pt, *next;
trace_sync_timeline(obj);
@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
if (!timeline_fence_signaled(&pt->base))
break;
list_del_init(&pt->link);
dma_fence_get(&pt->base);
list_move_tail(&pt->link, &signalled);
rb_erase(&pt->node, &obj->pt_tree);
/*
* A signal callback may release the last reference to this
* fence, causing it to be freed. That operation has to be
* last to avoid a use after free inside this loop, and must
* be after we remove the fence from the timeline in order to
* prevent deadlocking on timeline->lock inside
* timeline_fence_release().
*/
dma_fence_signal_locked(&pt->base);
}
spin_unlock_irq(&obj->lock);
list_for_each_entry_safe(pt, next, &signalled, link) {
list_del_init(&pt->link);
dma_fence_put(&pt->base);
}
}
/**

View File

@ -1575,15 +1575,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
continue;
r = dma_fence_wait_timeout(fence, true, timeout);
if (r > 0 && fence->error)
r = fence->error;
dma_fence_put(fence);
if (r < 0)
return r;
if (r == 0)
break;
if (fence->error)
return fence->error;
}
memset(wait, 0, sizeof(*wait));

View File

@ -2902,6 +2902,8 @@ static void dcn10_wait_for_mpcc_disconnect(
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
if (pipe_ctx->stream_res.tg &&
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
hubp->funcs->set_blank(hubp, true);

View File

@ -271,7 +271,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
unsigned size, i;
u64 size;
unsigned i;
u32 ring = RADEON_CS_RING_GFX;
s32 priority = 0;

View File

@ -581,6 +581,7 @@
#define USB_DEVICE_ID_UGCI_FIGHTING 0x0030
#define USB_VENDOR_ID_HP 0x03f0
#define USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A 0x464a
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a

View File

@ -96,6 +96,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },

View File

@ -240,13 +240,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 offset)
{
u32 val;
unsigned long flags;
if (iproc_i2c->idm_base) {
spin_lock(&iproc_i2c->idm_lock);
spin_lock_irqsave(&iproc_i2c->idm_lock, flags);
writel(iproc_i2c->ape_addr_mask,
iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
val = readl(iproc_i2c->base + offset);
spin_unlock(&iproc_i2c->idm_lock);
spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags);
} else {
val = readl(iproc_i2c->base + offset);
}
@ -257,12 +258,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 offset, u32 val)
{
unsigned long flags;
if (iproc_i2c->idm_base) {
spin_lock(&iproc_i2c->idm_lock);
spin_lock_irqsave(&iproc_i2c->idm_lock, flags);
writel(iproc_i2c->ape_addr_mask,
iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
writel(val, iproc_i2c->base + offset);
spin_unlock(&iproc_i2c->idm_lock);
spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags);
} else {
writel(val, iproc_i2c->base + offset);
}

View File

@ -70,6 +70,7 @@ config IIO_TRIGGERED_EVENT
source "drivers/iio/accel/Kconfig"
source "drivers/iio/adc/Kconfig"
source "drivers/iio/addac/Kconfig"
source "drivers/iio/afe/Kconfig"
source "drivers/iio/amplifiers/Kconfig"
source "drivers/iio/chemical/Kconfig"

View File

@ -15,6 +15,7 @@ obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o
obj-y += accel/
obj-y += adc/
obj-y += addac/
obj-y += afe/
obj-y += amplifiers/
obj-y += buffer/

View File

@ -840,22 +840,6 @@ config STMPE_ADC
Say yes here to build support for ST Microelectronics STMPE
built-in ADC block (stmpe811).
config STX104
tristate "Apex Embedded Systems STX104 driver"
depends on PC104 && X86
select ISA_BUS_API
select GPIOLIB
help
Say yes here to build support for the Apex Embedded Systems STX104
integrated analog PC/104 card.
This driver supports the 16 channels of single-ended (8 channels of
differential) analog inputs, 2 channels of analog output, 4 digital
inputs, and 4 digital outputs provided by the STX104.
The base port addresses for the devices may be configured via the base
array module parameter.
config SUN4I_GPADC
tristate "Support for the Allwinner SoCs GPADC"
depends on IIO

View File

@ -72,7 +72,6 @@ obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
obj-$(CONFIG_STX104) += stx104.o
obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o

24
drivers/iio/addac/Kconfig Normal file
View File

@ -0,0 +1,24 @@
#
# ADC DAC drivers
#
# When adding new entries keep the list in alphabetical order
menu "Analog to digital and digital to analog converters"
config STX104
tristate "Apex Embedded Systems STX104 driver"
depends on PC104 && X86
select ISA_BUS_API
select GPIOLIB
help
Say yes here to build support for the Apex Embedded Systems STX104
integrated analog PC/104 card.
This driver supports the 16 channels of single-ended (8 channels of
differential) analog inputs, 2 channels of analog output, 4 digital
inputs, and 4 digital outputs provided by the STX104.
The base port addresses for the devices may be configured via the base
array module parameter.
endmenu

View File

@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O ADDAC drivers
#
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_STX104) += stx104.o

View File

@ -15,7 +15,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#define STX104_OUT_CHAN(chan) { \
.type = IIO_VOLTAGE, \
@ -44,14 +46,38 @@ static unsigned int num_stx104;
module_param_hw_array(base, uint, ioport, &num_stx104, 0);
MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses");
/**
* struct stx104_reg - device register structure
* @ssr_ad: Software Strobe Register and ADC Data
* @achan: ADC Channel
* @dio: Digital I/O
* @dac: DAC Channels
* @cir_asr: Clear Interrupts and ADC Status
* @acr: ADC Control
* @pccr_fsh: Pacer Clock Control and FIFO Status MSB
* @acfg: ADC Configuration
*/
struct stx104_reg {
u16 ssr_ad;
u8 achan;
u8 dio;
u16 dac[2];
u8 cir_asr;
u8 acr;
u8 pccr_fsh;
u8 acfg;
};
/**
* struct stx104_iio - IIO device private data structure
* @lock: synchronization lock to prevent I/O race conditions
* @chan_out_states: channels' output states
* @base: base port address of the IIO device
* @reg: I/O address offset for the device registers
*/
struct stx104_iio {
struct mutex lock;
unsigned int chan_out_states[STX104_NUM_OUT_CHAN];
unsigned int base;
struct stx104_reg __iomem *reg;
};
/**
@ -64,7 +90,7 @@ struct stx104_iio {
struct stx104_gpio {
struct gpio_chip chip;
spinlock_t lock;
unsigned int base;
u8 __iomem *base;
unsigned int out_state;
};
@ -72,6 +98,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long mask)
{
struct stx104_iio *const priv = iio_priv(indio_dev);
struct stx104_reg __iomem *const reg = priv->reg;
unsigned int adc_config;
int adbu;
int gain;
@ -79,7 +106,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_HARDWAREGAIN:
/* get gain configuration */
adc_config = inb(priv->base + 11);
adc_config = ioread8(&reg->acfg);
gain = adc_config & 0x3;
*val = 1 << gain;
@ -90,25 +117,31 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
}
mutex_lock(&priv->lock);
/* select ADC channel */
outb(chan->channel | (chan->channel << 4), priv->base + 2);
iowrite8(chan->channel | (chan->channel << 4), &reg->achan);
/* trigger ADC sample capture and wait for completion */
outb(0, priv->base);
while (inb(priv->base + 8) & BIT(7));
/* trigger ADC sample capture by writing to the 8-bit
* Software Strobe Register and wait for completion
*/
iowrite8(0, &reg->ssr_ad);
while (ioread8(&reg->cir_asr) & BIT(7));
*val = inw(priv->base);
*val = ioread16(&reg->ssr_ad);
mutex_unlock(&priv->lock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
/* get ADC bipolar/unipolar configuration */
adc_config = inb(priv->base + 11);
adc_config = ioread8(&reg->acfg);
adbu = !(adc_config & BIT(2));
*val = -32768 * adbu;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
/* get ADC bipolar/unipolar and gain configuration */
adc_config = inb(priv->base + 11);
adc_config = ioread8(&reg->acfg);
adbu = !(adc_config & BIT(2));
gain = adc_config & 0x3;
@ -130,16 +163,16 @@ static int stx104_write_raw(struct iio_dev *indio_dev,
/* Only four gain states (x1, x2, x4, x8) */
switch (val) {
case 1:
outb(0, priv->base + 11);
iowrite8(0, &priv->reg->acfg);
break;
case 2:
outb(1, priv->base + 11);
iowrite8(1, &priv->reg->acfg);
break;
case 4:
outb(2, priv->base + 11);
iowrite8(2, &priv->reg->acfg);
break;
case 8:
outb(3, priv->base + 11);
iowrite8(3, &priv->reg->acfg);
break;
default:
return -EINVAL;
@ -152,9 +185,12 @@ static int stx104_write_raw(struct iio_dev *indio_dev,
if ((unsigned int)val > 65535)
return -EINVAL;
priv->chan_out_states[chan->channel] = val;
outw(val, priv->base + 4 + 2 * chan->channel);
mutex_lock(&priv->lock);
priv->chan_out_states[chan->channel] = val;
iowrite16(val, &priv->reg->dac[chan->channel]);
mutex_unlock(&priv->lock);
return 0;
}
return -EINVAL;
@ -222,7 +258,7 @@ static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset)
if (offset >= 4)
return -EINVAL;
return !!(inb(stx104gpio->base) & BIT(offset));
return !!(ioread8(stx104gpio->base) & BIT(offset));
}
static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
@ -230,7 +266,7 @@ static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
{
struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
*bits = inb(stx104gpio->base);
*bits = ioread8(stx104gpio->base);
return 0;
}
@ -252,7 +288,7 @@ static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset,
else
stx104gpio->out_state &= ~mask;
outb(stx104gpio->out_state, stx104gpio->base);
iowrite8(stx104gpio->out_state, stx104gpio->base);
spin_unlock_irqrestore(&stx104gpio->lock, flags);
}
@ -279,7 +315,7 @@ static void stx104_gpio_set_multiple(struct gpio_chip *chip,
stx104gpio->out_state &= ~*mask;
stx104gpio->out_state |= *mask & *bits;
outb(stx104gpio->out_state, stx104gpio->base);
iowrite8(stx104gpio->out_state, stx104gpio->base);
spin_unlock_irqrestore(&stx104gpio->lock, flags);
}
@ -306,11 +342,16 @@ static int stx104_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
priv = iio_priv(indio_dev);
priv->reg = devm_ioport_map(dev, base[id], STX104_EXTENT);
if (!priv->reg)
return -ENOMEM;
indio_dev->info = &stx104_info;
indio_dev->modes = INDIO_DIRECT_MODE;
/* determine if differential inputs */
if (inb(base[id] + 8) & BIT(5)) {
if (ioread8(&priv->reg->cir_asr) & BIT(5)) {
indio_dev->num_channels = ARRAY_SIZE(stx104_channels_diff);
indio_dev->channels = stx104_channels_diff;
} else {
@ -321,18 +362,17 @@ static int stx104_probe(struct device *dev, unsigned int id)
indio_dev->name = dev_name(dev);
indio_dev->dev.parent = dev;
priv = iio_priv(indio_dev);
priv->base = base[id];
mutex_init(&priv->lock);
/* configure device for software trigger operation */
outb(0, base[id] + 9);
iowrite8(0, &priv->reg->acr);
/* initialize gain setting to x1 */
outb(0, base[id] + 11);
iowrite8(0, &priv->reg->acfg);
/* initialize DAC output to 0V */
outw(0, base[id] + 4);
outw(0, base[id] + 6);
iowrite16(0, &priv->reg->dac[0]);
iowrite16(0, &priv->reg->dac[1]);
stx104gpio->chip.label = dev_name(dev);
stx104gpio->chip.parent = dev;
@ -347,7 +387,7 @@ static int stx104_probe(struct device *dev, unsigned int id)
stx104gpio->chip.get_multiple = stx104_gpio_get_multiple;
stx104gpio->chip.set = stx104_gpio_set;
stx104gpio->chip.set_multiple = stx104_gpio_set_multiple;
stx104gpio->base = base[id] + 3;
stx104gpio->base = &priv->reg->dio;
stx104gpio->out_state = 0x0;
spin_lock_init(&stx104gpio->lock);

View File

@ -94,6 +94,7 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
path->reqs[i].node = node;
path->reqs[i].dev = dev;
path->reqs[i].enabled = true;
/* reference to previous node was saved during path traversal */
node = node->reverse;
}
@ -178,6 +179,7 @@ static int aggregate_requests(struct icc_node *node)
{
struct icc_provider *p = node->provider;
struct icc_req *r;
u32 avg_bw, peak_bw;
node->avg_bw = 0;
node->peak_bw = 0;
@ -185,9 +187,17 @@ static int aggregate_requests(struct icc_node *node)
if (p->pre_aggregate)
p->pre_aggregate(node);
hlist_for_each_entry(r, &node->req_list, req_node)
p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
hlist_for_each_entry(r, &node->req_list, req_node) {
if (r->enabled) {
avg_bw = r->avg_bw;
peak_bw = r->peak_bw;
} else {
avg_bw = 0;
peak_bw = 0;
}
p->aggregate(node, r->tag, avg_bw, peak_bw,
&node->avg_bw, &node->peak_bw);
}
return 0;
}
@ -469,6 +479,39 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
}
EXPORT_SYMBOL_GPL(icc_set_bw);
static int __icc_enable(struct icc_path *path, bool enable)
{
int i;
if (!path)
return 0;
if (WARN_ON(IS_ERR(path) || !path->num_nodes))
return -EINVAL;
mutex_lock(&icc_lock);
for (i = 0; i < path->num_nodes; i++)
path->reqs[i].enabled = enable;
mutex_unlock(&icc_lock);
return icc_set_bw(path, path->reqs[0].avg_bw,
path->reqs[0].peak_bw);
}
int icc_enable(struct icc_path *path)
{
return __icc_enable(path, true);
}
EXPORT_SYMBOL_GPL(icc_enable);
int icc_disable(struct icc_path *path)
{
return __icc_enable(path, false);
}
EXPORT_SYMBOL_GPL(icc_disable);
/**
* icc_get() - return a handle for path between two endpoints
* @dev: the device requesting the path

View File

@ -14,6 +14,7 @@
* @req_node: entry in list of requests for the particular @node
* @node: the interconnect node to which this constraint applies
* @dev: reference to the device that sets the constraints
* @enabled: indicates whether the path with this request is enabled
* @tag: path tag (optional)
* @avg_bw: an integer describing the average bandwidth in kBps
* @peak_bw: an integer describing the peak bandwidth in kBps
@ -22,6 +23,7 @@ struct icc_req {
struct hlist_node req_node;
struct icc_node *node;
struct device *dev;
bool enabled;
u32 tag;
u32 avg_bw;
u32 peak_bw;

View File

@ -886,8 +886,8 @@ struct amd_ir_data {
*/
struct irq_cfg *cfg;
int ga_vector;
int ga_root_ptr;
int ga_tag;
u64 ga_root_ptr;
u32 ga_tag;
};
struct amd_irte_ops {

View File

@ -48,7 +48,7 @@ void __iomem *mips_gic_base;
DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
static DEFINE_SPINLOCK(gic_lock);
static DEFINE_RAW_SPINLOCK(gic_lock);
static struct irq_domain *gic_irq_domain;
static struct irq_domain *gic_ipi_domain;
static int gic_shared_intrs;
@ -207,7 +207,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
spin_lock_irqsave(&gic_lock, flags);
raw_spin_lock_irqsave(&gic_lock, flags);
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_FALLING:
pol = GIC_POL_FALLING_EDGE;
@ -247,7 +247,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
else
irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
handle_level_irq, NULL);
spin_unlock_irqrestore(&gic_lock, flags);
raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
}
@ -265,7 +265,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
return -EINVAL;
/* Assumption : cpumask refers to a single CPU */
spin_lock_irqsave(&gic_lock, flags);
raw_spin_lock_irqsave(&gic_lock, flags);
/* Re-route this IRQ */
write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
@ -276,7 +276,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
irq_data_update_effective_affinity(d, cpumask_of(cpu));
spin_unlock_irqrestore(&gic_lock, flags);
raw_spin_unlock_irqrestore(&gic_lock, flags);
return IRQ_SET_MASK_OK;
}
@ -354,12 +354,12 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
cd = irq_data_get_irq_chip_data(d);
cd->mask = false;
spin_lock_irqsave(&gic_lock, flags);
raw_spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_rmask(BIT(intr));
}
spin_unlock_irqrestore(&gic_lock, flags);
raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
@ -372,32 +372,45 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
cd = irq_data_get_irq_chip_data(d);
cd->mask = true;
spin_lock_irqsave(&gic_lock, flags);
raw_spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_smask(BIT(intr));
}
spin_unlock_irqrestore(&gic_lock, flags);
raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
static void gic_all_vpes_irq_cpu_online(void)
{
static const unsigned int local_intrs[] = {
GIC_LOCAL_INT_TIMER,
GIC_LOCAL_INT_PERFCTR,
GIC_LOCAL_INT_FDC,
};
unsigned long flags;
int i;
raw_spin_lock_irqsave(&gic_lock, flags);
for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {
unsigned int intr = local_intrs[i];
struct gic_all_vpes_chip_data *cd;
unsigned int intr;
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
cd = irq_data_get_irq_chip_data(d);
if (!gic_local_irq_is_routable(intr))
continue;
cd = &gic_all_vpes_chip_data[intr];
write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
if (cd->mask)
write_gic_vl_smask(BIT(intr));
}
raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static struct irq_chip gic_all_vpes_local_irq_controller = {
.name = "MIPS GIC Local",
.irq_mask = gic_mask_local_irq_all_vpes,
.irq_unmask = gic_unmask_local_irq_all_vpes,
.irq_cpu_online = gic_all_vpes_irq_cpu_online,
};
static void __gic_irq_dispatch(void)
@ -421,11 +434,11 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
data = irq_get_irq_data(virq);
spin_lock_irqsave(&gic_lock, flags);
raw_spin_lock_irqsave(&gic_lock, flags);
write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
irq_data_update_effective_affinity(data, cpumask_of(cpu));
spin_unlock_irqrestore(&gic_lock, flags);
raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
}
@ -476,6 +489,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
intr = GIC_HWIRQ_TO_LOCAL(hwirq);
map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
/*
* If adding support for more per-cpu interrupts, keep the the
* array in gic_all_vpes_irq_cpu_online() in sync.
*/
switch (intr) {
case GIC_LOCAL_INT_TIMER:
/* CONFIG_MIPS_CMP workaround (see __gic_init) */
@ -514,12 +531,12 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
if (!gic_local_irq_is_routable(intr))
return -EPERM;
spin_lock_irqsave(&gic_lock, flags);
raw_spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
}
spin_unlock_irqrestore(&gic_lock, flags);
raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
}
@ -662,8 +679,8 @@ static int gic_cpu_startup(unsigned int cpu)
/* Clear all local IRQ masks (ie. disable all local interrupts) */
write_gic_vl_rmask(~0);
/* Invoke irq_cpu_online callbacks to enable desired interrupts */
irq_cpu_online();
/* Enable desired interrupts */
gic_all_vpes_irq_cpu_online();
return 0;
}

View File

@ -318,6 +318,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
switch (evt) {
case NETDEV_CHANGENAME:
if (netif_carrier_ok(dev))
set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
fallthrough;
case NETDEV_REGISTER:
if (trigger_data->net_dev)
dev_put(trigger_data->net_dev);

View File

@ -31,11 +31,11 @@
#define DEFAULT_BUFFER_SECTORS 128
#define DEFAULT_JOURNAL_WATERMARK 50
#define DEFAULT_SYNC_MSEC 10000
#define DEFAULT_MAX_JOURNAL_SECTORS 131072
#define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
#define MIN_LOG2_INTERLEAVE_SECTORS 3
#define MAX_LOG2_INTERLEAVE_SECTORS 31
#define METADATA_WORKQUEUE_MAX_ACTIVE 16
#define RECALC_SECTORS 8192
#define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
#define RECALC_WRITE_SUPER 16
#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
#define BITMAP_FLUSH_INTERVAL (10 * HZ)

View File

@ -759,6 +759,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
return -EINVAL;
if (*nplanes) {
if (*nplanes != q_data->fmt->num_planes)
return -EINVAL;
for (i = 0; i < *nplanes; i++)
if (sizes[i] < q_data->sizeimage[i])
return -EINVAL;

View File

@ -529,15 +529,17 @@ static int load_requested_vpu(struct mtk_vpu *vpu,
int vpu_load_firmware(struct platform_device *pdev)
{
struct mtk_vpu *vpu;
struct device *dev = &pdev->dev;
struct device *dev;
struct vpu_run *run;
int ret;
if (!pdev) {
dev_err(dev, "VPU platform device is invalid\n");
pr_err("VPU platform device is invalid\n");
return -EINVAL;
}
dev = &pdev->dev;
vpu = platform_get_drvdata(pdev);
run = &vpu->run;

View File

@ -2033,14 +2033,14 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
mmc_blk_urgent_bkops(mq, mqrq);
}
static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
{
unsigned long flags;
bool put_card;
spin_lock_irqsave(&mq->lock, flags);
mq->in_flight[mmc_issue_type(mq, req)] -= 1;
mq->in_flight[issue_type] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);
@ -2052,6 +2052,7 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
{
enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = &mqrq->brq.mrq;
struct mmc_host *host = mq->card->host;
@ -2067,7 +2068,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
else
blk_mq_complete_request(req);
mmc_blk_mq_dec_in_flight(mq, req);
mmc_blk_mq_dec_in_flight(mq, issue_type);
}
void mmc_blk_mq_recovery(struct mmc_queue *mq)

View File

@ -1408,8 +1408,8 @@ static int bcm2835_probe(struct platform_device *pdev)
host->max_clk = clk_get_rate(clk);
host->irq = platform_get_irq(pdev, 0);
if (host->irq <= 0) {
ret = -EINVAL;
if (host->irq < 0) {
ret = host->irq;
goto err;
}

View File

@ -50,9 +50,16 @@ struct f_sdhost_priv {
bool enable_cmd_dat_delay;
};
static void *sdhci_f_sdhost_priv(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
return sdhci_pltfm_priv(pltfm_host);
}
static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
{
struct f_sdhost_priv *priv = sdhci_priv(host);
struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
u32 ctrl = 0;
usleep_range(2500, 3000);
@ -85,7 +92,7 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
{
struct f_sdhost_priv *priv = sdhci_priv(host);
struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
u32 ctl;
if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
@ -109,31 +116,32 @@ static const struct sdhci_ops sdhci_f_sdh30_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = {
.ops = &sdhci_f_sdh30_ops,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
| SDHCI_QUIRK_INVERTED_WRITE_PROTECT,
.quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE
| SDHCI_QUIRK2_TUNING_WORK_AROUND,
};
static int sdhci_f_sdh30_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct device *dev = &pdev->dev;
struct resource *res;
int irq, ctrl = 0, ret = 0;
int ctrl = 0, ret = 0;
struct f_sdhost_priv *priv;
struct sdhci_pltfm_host *pltfm_host;
u32 reg = 0;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data,
sizeof(struct f_sdhost_priv));
if (IS_ERR(host))
return PTR_ERR(host);
priv = sdhci_priv(host);
pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
priv->dev = dev;
host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
SDHCI_QUIRK2_TUNING_WORK_AROUND;
priv->enable_cmd_dat_delay = device_property_read_bool(dev,
"fujitsu,cmd-dat-delay-select");
@ -141,19 +149,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
if (ret)
goto err;
platform_set_drvdata(pdev, host);
host->hw_name = "f_sdh30";
host->ops = &sdhci_f_sdh30_ops;
host->irq = irq;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err;
}
if (dev_of_node(dev)) {
sdhci_get_of_property(pdev);
@ -208,23 +203,22 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
err_clk:
clk_disable_unprepare(priv->clk_iface);
err:
sdhci_free_host(host);
sdhci_pltfm_free(pdev);
return ret;
}
static int sdhci_f_sdh30_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct f_sdhost_priv *priv = sdhci_priv(host);
struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
struct clk *clk_iface = priv->clk_iface;
struct clk *clk = priv->clk;
sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) ==
0xffffffff);
sdhci_pltfm_unregister(pdev);
clk_disable_unprepare(priv->clk_iface);
clk_disable_unprepare(priv->clk);
sdhci_free_host(host);
platform_set_drvdata(pdev, NULL);
clk_disable_unprepare(clk_iface);
clk_disable_unprepare(clk);
return 0;
}

View File

@ -1314,8 +1314,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
return ret;
host->irq = platform_get_irq(pdev, 0);
if (host->irq <= 0) {
ret = -EINVAL;
if (host->irq < 0) {
ret = host->irq;
goto error_disable_mmc;
}

View File

@ -1708,8 +1708,6 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
wbsd_release_resources(host);
wbsd_free_mmc(dev);
mmc_free_host(mmc);
return ret;
}

View File

@ -656,10 +656,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
return NULL;
arp = (struct arp_pkt *)skb_network_header(skb);
/* Don't modify or load balance ARPs that do not originate locally
* (e.g.,arrive via a bridge).
/* Don't modify or load balance ARPs that do not originate
* from the bond itself or a VLAN directly above the bond.
*/
if (!bond_slave_has_mac_rx(bond, arp->mac_src))
if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
return NULL;
if (arp->op_code == htons(ARPOP_REPLY)) {

View File

@ -175,12 +175,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
nla_peer = data[VXCAN_INFO_PEER];
ifmp = nla_data(nla_peer);
err = rtnl_nla_parse_ifla(peer_tb,
nla_data(nla_peer) +
sizeof(struct ifinfomsg),
nla_len(nla_peer) -
sizeof(struct ifinfomsg),
NULL);
err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
if (err < 0)
return err;

View File

@ -2143,6 +2143,14 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
/* If there is a GPIO connected to the reset pin, toggle it */
if (gpiod) {
/* If the switch has just been reset and not yet completed
* loading EEPROM, the reset may interrupt the I2C transaction
* mid-byte, causing the first EEPROM read after the reset
* from the wrong location resulting in the switch booting
* to wrong mode and inoperable.
*/
mv88e6xxx_g1_wait_eeprom_done(chip);
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);

View File

@ -1447,7 +1447,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
int err;
phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (!phy_dev || IS_ERR(phy_dev)) {
if (IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return -ENODEV;
}

View File

@ -565,7 +565,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
};
phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (!phydev || IS_ERR(phydev)) {
if (IS_ERR(phydev)) {
dev_err(kdev, "failed to register fixed PHY device\n");
return -ENODEV;
}

View File

@ -196,7 +196,7 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
unsigned long offset;
for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset));
}
/* replenish the buffers for a pool. note that we don't need to

View File

@ -210,11 +210,11 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
* @offset: offset in words from module start
* @words: number of words to write
* @data: buffer with words to write to the Shadow RAM
* @words: number of words to read
* @data: buffer with words to read to the Shadow RAM
* @last_command: tells the AdminQ that this is the last command
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
* Reads a 16 bit words buffer to the Shadow RAM using the admin command.
**/
static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
u8 module_pointer, u32 offset,
@ -234,18 +234,18 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
*/
if ((offset + words) > hw->nvm.sr_size)
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: offset %d beyond Shadow RAM limit %d\n",
"NVM read error: offset %d beyond Shadow RAM limit %d\n",
(offset + words), hw->nvm.sr_size);
else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
/* We can write only up to 4KB (one sector), in one AQ write */
/* We can read only up to 4KB (one sector), in one AQ write */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write fail error: tried to write %d words, limit is %d.\n",
"NVM read fail error: tried to read %d words, limit is %d.\n",
words, I40E_SR_SECTOR_SIZE_IN_WORDS);
else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
!= (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
/* A single write cannot spread over two sectors */
/* A single read cannot spread over two sectors */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
"NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n",
offset, words);
else
ret_code = i40e_aq_read_nvm(hw, module_pointer,

View File

@ -1245,6 +1245,16 @@ void igb_ptp_init(struct igb_adapter *adapter)
return;
}
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
} else if (adapter->ptp_clock) {
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
adapter->ptp_flags |= IGB_PTP_ENABLED;
spin_lock_init(&adapter->tmreg_lock);
INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
@ -1256,16 +1266,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
igb_ptp_reset(adapter);
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
} else if (adapter->ptp_clock) {
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
adapter->ptp_flags |= IGB_PTP_ENABLED;
}
}

View File

@ -2430,9 +2430,10 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (link < 0)
return NIX_AF_ERR_RX_LINK_INVALID;
nix_find_link_frs(rvu, req, pcifunc);
linkcfg:
nix_find_link_frs(rvu, req, pcifunc);
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
if (req->update_minlen)

View File

@ -735,6 +735,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
write_pnet(&port->pnet, newnet);
if (port->mode == IPVLAN_MODE_L3S)
ipvlan_migrate_l3s_hook(oldnet, newnet);
break;
}

View File

@ -317,6 +317,19 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
return sa;
}
static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
{
struct macsec_rx_sa *sa = NULL;
int an;
for (an = 0; an < MACSEC_NUM_AN; an++) {
sa = macsec_rxsa_get(rx_sc->sa[an]);
if (sa)
break;
}
return sa;
}
static void free_rx_sc_rcu(struct rcu_head *head)
{
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
@ -561,18 +574,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
skb->protocol = eth_hdr(skb)->h_proto;
}
static unsigned int macsec_msdu_len(struct sk_buff *skb)
{
struct macsec_dev *macsec = macsec_priv(skb->dev);
struct macsec_secy *secy = &macsec->secy;
bool sci_present = macsec_skb_cb(skb)->has_sci;
return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
}
static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
struct macsec_tx_sa *tx_sa)
{
unsigned int msdu_len = macsec_msdu_len(skb);
struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
u64_stats_update_begin(&txsc_stats->syncp);
if (tx_sc->encrypt) {
txsc_stats->stats.OutOctetsEncrypted += skb->len;
txsc_stats->stats.OutOctetsEncrypted += msdu_len;
txsc_stats->stats.OutPktsEncrypted++;
this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
} else {
txsc_stats->stats.OutOctetsProtected += skb->len;
txsc_stats->stats.OutOctetsProtected += msdu_len;
txsc_stats->stats.OutPktsProtected++;
this_cpu_inc(tx_sa->stats->OutPktsProtected);
}
@ -602,9 +625,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
aead_request_free(macsec_skb_cb(skb)->req);
rcu_read_lock_bh();
macsec_encrypt_finish(skb, dev);
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
len = skb->len;
/* packet is encrypted/protected so tx_bytes must be calculated */
len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
macsec_encrypt_finish(skb, dev);
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
rcu_read_unlock_bh();
@ -760,6 +784,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
macsec_skb_cb(skb)->req = req;
macsec_skb_cb(skb)->tx_sa = tx_sa;
macsec_skb_cb(skb)->has_sci = sci_present;
aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
dev_hold(skb->dev);
@ -800,15 +825,17 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
DEV_STATS_INC(secy->netdev, rx_dropped);
return false;
}
if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
unsigned int msdu_len = macsec_msdu_len(skb);
u64_stats_update_begin(&rxsc_stats->syncp);
if (hdr->tci_an & MACSEC_TCI_E)
rxsc_stats->stats.InOctetsDecrypted += skb->len;
rxsc_stats->stats.InOctetsDecrypted += msdu_len;
else
rxsc_stats->stats.InOctetsValidated += skb->len;
rxsc_stats->stats.InOctetsValidated += msdu_len;
u64_stats_update_end(&rxsc_stats->syncp);
}
@ -821,6 +848,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotValid++;
u64_stats_update_end(&rxsc_stats->syncp);
this_cpu_inc(rx_sa->stats->InPktsNotValid);
DEV_STATS_INC(secy->netdev, rx_errors);
return false;
}
@ -906,9 +935,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
macsec_finalize_skb(skb, macsec->secy.icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
len = skb->len;
macsec_reset_skb(skb, macsec->secy.netdev);
len = skb->len;
if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
count_rx(dev, len);
@ -1050,6 +1079,7 @@ static void handle_not_macsec(struct sk_buff *skb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
continue;
}
@ -1161,6 +1191,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsBadTag++;
u64_stats_update_end(&secy_stats->syncp);
DEV_STATS_INC(secy->netdev, rx_errors);
goto drop_nosa;
}
@ -1171,11 +1202,15 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
/* If validateFrames is Strict or the C bit in the
* SecTAG is set, discard
*/
struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotUsingSA++;
u64_stats_update_end(&rxsc_stats->syncp);
DEV_STATS_INC(secy->netdev, rx_errors);
if (active_rx_sa)
this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
goto drop_nosa;
}
@ -1185,6 +1220,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsUnusedSA++;
u64_stats_update_end(&rxsc_stats->syncp);
if (active_rx_sa)
this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
goto deliver;
}
@ -1202,6 +1239,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
goto drop;
}
}
@ -1230,6 +1268,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
deliver:
macsec_finalize_skb(skb, secy->icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
len = skb->len;
macsec_reset_skb(skb, secy->netdev);
if (rx_sa)
@ -1237,12 +1276,11 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
macsec_rxsc_put(rx_sc);
skb_orphan(skb);
len = skb->len;
ret = gro_cells_receive(&macsec->gro_cells, skb);
if (ret == NET_RX_SUCCESS)
count_rx(dev, len);
else
macsec->secy.netdev->stats.rx_dropped++;
DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
rcu_read_unlock();
@ -1279,6 +1317,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoSCI++;
u64_stats_update_end(&secy_stats->syncp);
DEV_STATS_INC(macsec->secy.netdev, rx_errors);
continue;
}
@ -1297,7 +1336,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
secy_stats->stats.InPktsUnknownSCI++;
u64_stats_update_end(&secy_stats->syncp);
} else {
macsec->secy.netdev->stats.rx_dropped++;
DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
}
}
@ -2731,21 +2770,21 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
if (!secy->operational) {
kfree_skb(skb);
dev->stats.tx_dropped++;
DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
len = skb->len;
skb = macsec_encrypt(skb, dev);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EINPROGRESS)
dev->stats.tx_dropped++;
DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
macsec_encrypt_finish(skb, dev);
len = skb->len;
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
return ret;
@ -2957,8 +2996,9 @@ static void macsec_get_stats64(struct net_device *dev,
s->tx_bytes += tmp.tx_bytes;
}
s->rx_dropped = dev->stats.rx_dropped;
s->tx_dropped = dev->stats.tx_dropped;
s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped);
s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped);
s->rx_errors = atomic_long_read(&dev->stats.__rx_errors);
}
static int macsec_get_iflink(const struct net_device *dev)

View File

@ -425,6 +425,17 @@ static int bcm5482_read_status(struct phy_device *phydev)
return err;
}
static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
{
return -EOPNOTSUPP;
}
static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
u16 val)
{
return -EOPNOTSUPP;
}
static int bcm5481_config_aneg(struct phy_device *phydev)
{
struct device_node *np = phydev->mdio.dev.of_node;
@ -696,6 +707,8 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54810",
/* PHY_GBIT_FEATURES */
.read_mmd = bcm54810_read_mmd,
.write_mmd = bcm54810_write_mmd,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,

View File

@ -2194,7 +2194,9 @@ static void team_setup(struct net_device *dev)
dev->hw_features = TEAM_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_STAG_RX |
NETIF_F_HW_VLAN_STAG_FILTER;
dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
dev->features |= dev->hw_features;

View File

@ -1255,10 +1255,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
nla_peer = data[VETH_INFO_PEER];
ifmp = nla_data(nla_peer);
err = rtnl_nla_parse_ifla(peer_tb,
nla_data(nla_peer) + sizeof(struct ifinfomsg),
nla_len(nla_peer) - sizeof(struct ifinfomsg),
NULL);
err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
if (err < 0)
return err;

View File

@ -3268,8 +3268,6 @@ static int virtnet_probe(struct virtio_device *vdev)
}
}
_virtnet_set_queues(vi, vi->curr_queue_pairs);
/* serialize netdev register + virtio_device_ready() with ndo_open() */
rtnl_lock();
@ -3282,6 +3280,8 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
_virtnet_set_queues(vi, vi->curr_queue_pairs);
rtnl_unlock();
err = virtnet_cpu_notif_add(vi);

View File

@ -510,12 +510,15 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
if (pass && dev->subordinate) {
check_hotplug_bridge(slot, dev);
pcibios_resource_survey_bus(dev->subordinate);
__pci_bus_size_bridges(dev->subordinate,
&add_list);
if (pci_is_root_bus(bus))
__pci_bus_size_bridges(dev->subordinate, &add_list);
}
}
}
if (pci_is_root_bus(bus))
__pci_bus_assign_resources(bus, &add_list, NULL);
else
pci_assign_unassigned_bridge_resources(bus->self);
}
acpiphp_sanitize_bus(bus);

View File

@ -1053,6 +1053,8 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
q = p->next;
kfree(p);
}
kfree(data);
}

View File

@ -14,6 +14,7 @@
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinmux.h>
@ -46,6 +47,7 @@ struct rza2_pinctrl_priv {
struct pinctrl_dev *pctl;
struct pinctrl_gpio_range gpio_range;
int npins;
struct mutex mutex; /* serialize adding groups and functions */
};
#define RZA2_PDR(port) (0x0000 + (port) * 2) /* Direction 16-bit */
@ -359,10 +361,14 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
psel_val[i] = MUX_FUNC(value);
}
mutex_lock(&priv->mutex);
/* Register a single pin group listing all the pins we read from DT */
gsel = pinctrl_generic_add_group(pctldev, np->name, pins, npins, NULL);
if (gsel < 0)
return gsel;
if (gsel < 0) {
ret = gsel;
goto unlock;
}
/*
* Register a single group function where the 'data' is an array PSEL
@ -391,6 +397,8 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
(*map)->data.mux.function = np->name;
*num_maps = 1;
mutex_unlock(&priv->mutex);
return 0;
remove_function:
@ -399,6 +407,9 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
remove_group:
pinctrl_generic_remove_group(pctldev, gsel);
unlock:
mutex_unlock(&priv->mutex);
dev_err(priv->dev, "Unable to parse DT node %s\n", np->name);
return ret;
@ -476,6 +487,8 @@ static int rza2_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
mutex_init(&priv->mutex);
platform_set_drvdata(pdev, priv);
priv->npins = (int)(uintptr_t)of_device_get_match_data(&pdev->dev) *

View File

@ -209,54 +209,6 @@ raid_attr_ro_state(level);
raid_attr_ro_fn(resync);
raid_attr_ro_state_fn(state);
static void raid_component_release(struct device *dev)
{
struct raid_component *rc =
container_of(dev, struct raid_component, dev);
dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n");
put_device(rc->dev.parent);
kfree(rc);
}
int raid_component_add(struct raid_template *r,struct device *raid_dev,
struct device *component_dev)
{
struct device *cdev =
attribute_container_find_class_device(&r->raid_attrs.ac,
raid_dev);
struct raid_component *rc;
struct raid_data *rd = dev_get_drvdata(cdev);
int err;
rc = kzalloc(sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
INIT_LIST_HEAD(&rc->node);
device_initialize(&rc->dev);
rc->dev.release = raid_component_release;
rc->dev.parent = get_device(component_dev);
rc->num = rd->component_count++;
dev_set_name(&rc->dev, "component-%d", rc->num);
list_add_tail(&rc->node, &rd->component_list);
rc->dev.class = &raid_class.class;
err = device_add(&rc->dev);
if (err)
goto err_out;
return 0;
err_out:
put_device(&rc->dev);
list_del(&rc->node);
rd->component_count--;
put_device(component_dev);
kfree(rc);
return err;
}
EXPORT_SYMBOL(raid_component_add);
struct raid_template *
raid_class_attach(struct raid_function_template *ft)
{

View File

@ -317,12 +317,11 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
"Snic Tgt: device_add, with err = %d\n",
ret);
put_device(&tgt->dev);
put_device(&snic->shost->shost_gendev);
spin_lock_irqsave(snic->shost->host_lock, flags);
list_del(&tgt->list);
spin_unlock_irqrestore(snic->shost->host_lock, flags);
kfree(tgt);
put_device(&tgt->dev);
tgt = NULL;
return tgt;

View File

@ -3140,6 +3140,7 @@ void serial8250_init_port(struct uart_8250_port *up)
struct uart_port *port = &up->port;
spin_lock_init(&port->lock);
port->pm = NULL;
port->ops = &serial8250_pops;
up->cur_iotype = 0xFF;

View File

@ -1023,8 +1023,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
if (sr & (UARTSTAT_PE | UARTSTAT_FE)) {
/* Read DR to clear the error flags */
lpuart32_read(&sport->port, UARTDATA);
/* Clear the error flags */
lpuart32_write(&sport->port, sr, UARTSTAT);
if (sr & UARTSTAT_PE)
sport->port.icount.parity++;

View File

@ -70,6 +70,10 @@ static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
CI_HDRC_PMQOS,
};
static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
};
static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
{ .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
{ .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
@ -80,6 +84,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
{ .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data},
{ .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data},
{ .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data},
{ .compatible = "fsl,imx8ulp-usb", .data = &imx8ulp_usb_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);

View File

@ -193,55 +193,58 @@ static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom)
/* Only usable in contexts where the role can not change. */
static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
{
struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
struct dwc3 *dwc;
/*
* FIXME: Fix this layering violation.
*/
dwc = platform_get_drvdata(qcom->dwc3);
/* Core driver may not have probed yet. */
if (!dwc)
return false;
return dwc->xhci;
}
static void dwc3_qcom_enable_wakeup_irq(int irq)
{
if (!irq)
return;
enable_irq(irq);
enable_irq_wake(irq);
}
static void dwc3_qcom_disable_wakeup_irq(int irq)
{
if (!irq)
return;
disable_irq_wake(irq);
disable_irq_nosync(irq);
}
static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
{
if (qcom->hs_phy_irq) {
disable_irq_wake(qcom->hs_phy_irq);
disable_irq_nosync(qcom->hs_phy_irq);
}
dwc3_qcom_disable_wakeup_irq(qcom->hs_phy_irq);
if (qcom->dp_hs_phy_irq) {
disable_irq_wake(qcom->dp_hs_phy_irq);
disable_irq_nosync(qcom->dp_hs_phy_irq);
}
dwc3_qcom_disable_wakeup_irq(qcom->dp_hs_phy_irq);
if (qcom->dm_hs_phy_irq) {
disable_irq_wake(qcom->dm_hs_phy_irq);
disable_irq_nosync(qcom->dm_hs_phy_irq);
}
dwc3_qcom_disable_wakeup_irq(qcom->dm_hs_phy_irq);
if (qcom->ss_phy_irq) {
disable_irq_wake(qcom->ss_phy_irq);
disable_irq_nosync(qcom->ss_phy_irq);
}
dwc3_qcom_disable_wakeup_irq(qcom->ss_phy_irq);
}
static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
{
if (qcom->hs_phy_irq) {
enable_irq(qcom->hs_phy_irq);
enable_irq_wake(qcom->hs_phy_irq);
}
dwc3_qcom_enable_wakeup_irq(qcom->hs_phy_irq);
if (qcom->dp_hs_phy_irq) {
enable_irq(qcom->dp_hs_phy_irq);
enable_irq_wake(qcom->dp_hs_phy_irq);
}
dwc3_qcom_enable_wakeup_irq(qcom->dp_hs_phy_irq);
if (qcom->dm_hs_phy_irq) {
enable_irq(qcom->dm_hs_phy_irq);
enable_irq_wake(qcom->dm_hs_phy_irq);
}
dwc3_qcom_enable_wakeup_irq(qcom->dm_hs_phy_irq);
if (qcom->ss_phy_irq) {
enable_irq(qcom->ss_phy_irq);
enable_irq_wake(qcom->ss_phy_irq);
}
dwc3_qcom_enable_wakeup_irq(qcom->ss_phy_irq);
}
static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)

View File

@ -188,23 +188,29 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
{
u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
u32 bit_mask, end_mask, eorx, shift;
const char *s = image->data, *src;
u32 bit_mask, eorx, shift;
const u8 *s = image->data, *src;
u32 *dst;
const u32 *tab = NULL;
const u32 *tab;
size_t tablen;
u32 colortab[16];
int i, j, k;
switch (bpp) {
case 8:
tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
tablen = 16;
break;
case 16:
tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
tablen = 4;
break;
case 32:
default:
tab = cfb_tab32;
tablen = 2;
break;
default:
return;
}
for (i = ppw-1; i--; ) {
@ -218,20 +224,62 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
eorx = fgx ^ bgx;
k = image->width/ppw;
for (i = 0; i < tablen; ++i)
colortab[i] = (tab[i] & eorx) ^ bgx;
for (i = image->height; i--; ) {
dst = dst1;
shift = 8;
src = s;
for (j = k; j--; ) {
/*
* Manually unroll the per-line copying loop for better
* performance. This works until we processed the last
* completely filled source byte (inclusive).
*/
switch (ppw) {
case 4: /* 8 bpp */
for (j = k; j >= 2; j -= 2, ++src) {
*dst++ = colortab[(*src >> 4) & bit_mask];
*dst++ = colortab[(*src >> 0) & bit_mask];
}
break;
case 2: /* 16 bpp */
for (j = k; j >= 4; j -= 4, ++src) {
*dst++ = colortab[(*src >> 6) & bit_mask];
*dst++ = colortab[(*src >> 4) & bit_mask];
*dst++ = colortab[(*src >> 2) & bit_mask];
*dst++ = colortab[(*src >> 0) & bit_mask];
}
break;
case 1: /* 32 bpp */
for (j = k; j >= 8; j -= 8, ++src) {
*dst++ = colortab[(*src >> 7) & bit_mask];
*dst++ = colortab[(*src >> 6) & bit_mask];
*dst++ = colortab[(*src >> 5) & bit_mask];
*dst++ = colortab[(*src >> 4) & bit_mask];
*dst++ = colortab[(*src >> 3) & bit_mask];
*dst++ = colortab[(*src >> 2) & bit_mask];
*dst++ = colortab[(*src >> 1) & bit_mask];
*dst++ = colortab[(*src >> 0) & bit_mask];
}
break;
}
/*
* For image widths that are not a multiple of 8, there
* are trailing pixels left on the current line. Print
* them as well.
*/
for (; j--; ) {
shift -= ppw;
end_mask = tab[(*src >> shift) & bit_mask];
*dst++ = (end_mask & eorx) ^ bgx;
*dst++ = colortab[(*src >> shift) & bit_mask];
if (!shift) {
shift = 8;
src++;
++src;
}
}
dst1 += p->fix.line_length;
s += spitch;
}

View File

@ -510,7 +510,9 @@ static int mmphw_probe(struct platform_device *pdev)
ret = -ENOENT;
goto failed;
}
clk_prepare_enable(ctrl->clk);
ret = clk_prepare_enable(ctrl->clk);
if (ret)
goto failed;
/* init global regs */
ctrl_set_default(ctrl);

View File

@ -542,11 +542,9 @@ static void virtio_mmio_release_dev(struct device *_d)
{
struct virtio_device *vdev =
container_of(_d, struct virtio_device, dev);
struct virtio_mmio_device *vm_dev =
container_of(vdev, struct virtio_mmio_device, vdev);
struct platform_device *pdev = vm_dev->pdev;
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
devm_kfree(&pdev->dev, vm_dev);
kfree(vm_dev);
}
/* Platform device */
@ -554,19 +552,10 @@ static void virtio_mmio_release_dev(struct device *_d)
static int virtio_mmio_probe(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev;
struct resource *mem;
unsigned long magic;
int rc;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
return -EINVAL;
if (!devm_request_mem_region(&pdev->dev, mem->start,
resource_size(mem), pdev->name))
return -EBUSY;
vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
if (!vm_dev)
return -ENOMEM;
@ -577,9 +566,9 @@ static int virtio_mmio_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&vm_dev->virtqueues);
spin_lock_init(&vm_dev->lock);
vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
if (vm_dev->base == NULL)
return -EFAULT;
vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vm_dev->base))
return PTR_ERR(vm_dev->base);
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);

View File

@ -4558,8 +4558,7 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
}
}
BUG_ON(fs_info->balance_ctl ||
test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
atomic_dec(&fs_info->balance_cancel_req);
mutex_unlock(&fs_info->balance_mutex);
return 0;

View File

@ -4510,9 +4510,9 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
io_error:
kunmap(page);
unlock_page(page);
read_complete:
unlock_page(page);
return rc;
}

View File

@ -1856,7 +1856,7 @@ static void del_timeout(struct dlm_lkb *lkb)
void dlm_scan_timeout(struct dlm_ls *ls)
{
struct dlm_rsb *r;
struct dlm_lkb *lkb;
struct dlm_lkb *lkb = NULL, *iter;
int do_cancel, do_warn;
s64 wait_us;
@ -1867,27 +1867,28 @@ void dlm_scan_timeout(struct dlm_ls *ls)
do_cancel = 0;
do_warn = 0;
mutex_lock(&ls->ls_timeout_mutex);
list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) {
wait_us = ktime_to_us(ktime_sub(ktime_get(),
lkb->lkb_timestamp));
iter->lkb_timestamp));
if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
wait_us >= (lkb->lkb_timeout_cs * 10000))
if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) &&
wait_us >= (iter->lkb_timeout_cs * 10000))
do_cancel = 1;
if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
wait_us >= dlm_config.ci_timewarn_cs * 10000)
do_warn = 1;
if (!do_cancel && !do_warn)
continue;
hold_lkb(lkb);
hold_lkb(iter);
lkb = iter;
break;
}
mutex_unlock(&ls->ls_timeout_mutex);
if (!do_cancel && !do_warn)
if (!lkb)
break;
r = lkb->lkb_resource;
@ -5241,21 +5242,18 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
{
struct dlm_lkb *lkb;
int found = 0;
struct dlm_lkb *lkb = NULL, *iter;
mutex_lock(&ls->ls_waiters_mutex);
list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
if (lkb->lkb_flags & DLM_IFL_RESEND) {
hold_lkb(lkb);
found = 1;
list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
if (iter->lkb_flags & DLM_IFL_RESEND) {
hold_lkb(iter);
lkb = iter;
break;
}
}
mutex_unlock(&ls->ls_waiters_mutex);
if (!found)
lkb = NULL;
return lkb;
}
@ -5914,37 +5912,36 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
int mode, uint32_t flags, void *name, unsigned int namelen,
unsigned long timeout_cs, uint32_t *lkid)
{
struct dlm_lkb *lkb;
struct dlm_lkb *lkb = NULL, *iter;
struct dlm_user_args *ua;
int found_other_mode = 0;
int found = 0;
int rv = 0;
mutex_lock(&ls->ls_orphans_mutex);
list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
if (lkb->lkb_resource->res_length != namelen)
list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
if (iter->lkb_resource->res_length != namelen)
continue;
if (memcmp(lkb->lkb_resource->res_name, name, namelen))
if (memcmp(iter->lkb_resource->res_name, name, namelen))
continue;
if (lkb->lkb_grmode != mode) {
if (iter->lkb_grmode != mode) {
found_other_mode = 1;
continue;
}
found = 1;
list_del_init(&lkb->lkb_ownqueue);
lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
*lkid = lkb->lkb_id;
lkb = iter;
list_del_init(&iter->lkb_ownqueue);
iter->lkb_flags &= ~DLM_IFL_ORPHAN;
*lkid = iter->lkb_id;
break;
}
mutex_unlock(&ls->ls_orphans_mutex);
if (!found && found_other_mode) {
if (!lkb && found_other_mode) {
rv = -EAGAIN;
goto out;
}
if (!found) {
if (!lkb) {
rv = -ENOENT;
goto out;
}

View File

@ -80,8 +80,7 @@ static void send_op(struct plock_op *op)
abandoned waiter. So, we have to insert the unlock-close when the
lock call is interrupted. */
static void do_unlock_close(struct dlm_ls *ls, u64 number,
struct file *file, struct file_lock *fl)
static void do_unlock_close(const struct dlm_plock_info *info)
{
struct plock_op *op;
@ -90,15 +89,12 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number,
return;
op->info.optype = DLM_PLOCK_OP_UNLOCK;
op->info.pid = fl->fl_pid;
op->info.fsid = ls->ls_global_id;
op->info.number = number;
op->info.pid = info->pid;
op->info.fsid = info->fsid;
op->info.number = info->number;
op->info.start = 0;
op->info.end = OFFSET_MAX;
if (fl->fl_lmops && fl->fl_lmops->lm_grant)
op->info.owner = (__u64) fl->fl_pid;
else
op->info.owner = (__u64)(long) fl->fl_owner;
op->info.owner = info->owner;
op->info.flags |= DLM_PLOCK_FL_CLOSE;
send_op(op);
@ -161,13 +157,14 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
rv = wait_event_killable(recv_wq, (op->done != 0));
if (rv == -ERESTARTSYS) {
log_debug(ls, "%s: wait killed %llx", __func__,
(unsigned long long)number);
spin_lock(&ops_lock);
list_del(&op->list);
spin_unlock(&ops_lock);
log_debug(ls, "%s: wait interrupted %x %llx pid %d",
__func__, ls->ls_global_id,
(unsigned long long)number, op->info.pid);
dlm_release_plock_op(op);
do_unlock_close(ls, number, file, fl);
do_unlock_close(&op->info);
goto out;
}
@ -408,7 +405,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
if (op->info.flags & DLM_PLOCK_FL_CLOSE)
list_del(&op->list);
else
list_move(&op->list, &recv_list);
list_move_tail(&op->list, &recv_list);
memcpy(&info, &op->info, sizeof(info));
}
spin_unlock(&ops_lock);
@ -433,9 +430,9 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
loff_t *ppos)
{
struct plock_op *op = NULL, *iter;
struct dlm_plock_info info;
struct plock_op *op;
int found = 0, do_callback = 0;
int do_callback = 0;
if (count != sizeof(info))
return -EINVAL;
@ -446,31 +443,63 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
if (check_version(&info))
return -EINVAL;
/*
* The results for waiting ops (SETLKW) can be returned in any
* order, so match all fields to find the op. The results for
* non-waiting ops are returned in the order that they were sent
* to userspace, so match the result with the first non-waiting op.
*/
spin_lock(&ops_lock);
list_for_each_entry(op, &recv_list, list) {
if (op->info.fsid == info.fsid &&
op->info.number == info.number &&
op->info.owner == info.owner) {
if (info.wait) {
list_for_each_entry(iter, &recv_list, list) {
if (iter->info.fsid == info.fsid &&
iter->info.number == info.number &&
iter->info.owner == info.owner &&
iter->info.pid == info.pid &&
iter->info.start == info.start &&
iter->info.end == info.end &&
iter->info.ex == info.ex &&
iter->info.wait) {
op = iter;
break;
}
}
} else {
list_for_each_entry(iter, &recv_list, list) {
if (!iter->info.wait) {
op = iter;
break;
}
}
}
if (op) {
/* Sanity check that op and info match. */
if (info.wait)
WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
else
WARN_ON(op->info.fsid != info.fsid ||
op->info.number != info.number ||
op->info.owner != info.owner ||
op->info.optype != info.optype);
list_del_init(&op->list);
memcpy(&op->info, &info, sizeof(info));
if (op->data)
do_callback = 1;
else
op->done = 1;
found = 1;
break;
}
}
spin_unlock(&ops_lock);
if (found) {
if (op) {
if (do_callback)
dlm_plock_callback(op);
else
wake_up(&recv_wq);
} else
log_print("dev_write no op %x %llx", info.fsid,
(unsigned long long)info.number);
log_print("%s: no op %x %llx", __func__,
info.fsid, (unsigned long long)info.number);
return count;
}

View File

@ -732,10 +732,9 @@ void dlm_recovered_lock(struct dlm_rsb *r)
static void recover_lvb(struct dlm_rsb *r)
{
struct dlm_lkb *lkb, *high_lkb = NULL;
struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
uint32_t high_seq = 0;
int lock_lvb_exists = 0;
int big_lock_exists = 0;
int lvblen = r->res_ls->ls_lvblen;
if (!rsb_flag(r, RSB_NEW_MASTER2) &&
@ -751,37 +750,37 @@ static void recover_lvb(struct dlm_rsb *r)
/* we are the new master, so figure out if VALNOTVALID should
be set, and set the rsb lvb from the best lkb available. */
list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
continue;
lock_lvb_exists = 1;
if (lkb->lkb_grmode > DLM_LOCK_CR) {
big_lock_exists = 1;
if (iter->lkb_grmode > DLM_LOCK_CR) {
big_lkb = iter;
goto setflag;
}
if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
high_lkb = lkb;
high_seq = lkb->lkb_lvbseq;
if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
high_lkb = iter;
high_seq = iter->lkb_lvbseq;
}
}
list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
continue;
lock_lvb_exists = 1;
if (lkb->lkb_grmode > DLM_LOCK_CR) {
big_lock_exists = 1;
if (iter->lkb_grmode > DLM_LOCK_CR) {
big_lkb = iter;
goto setflag;
}
if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
high_lkb = lkb;
high_seq = lkb->lkb_lvbseq;
if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
high_lkb = iter;
high_seq = iter->lkb_lvbseq;
}
}
@ -790,7 +789,7 @@ static void recover_lvb(struct dlm_rsb *r)
goto out;
/* lvb is invalidated if only NL/CR locks remain */
if (!big_lock_exists)
if (!big_lkb)
rsb_set_flag(r, RSB_VALNOTVALID);
if (!r->res_lvbptr) {
@ -799,9 +798,9 @@ static void recover_lvb(struct dlm_rsb *r)
goto out;
}
if (big_lock_exists) {
r->res_lvbseq = lkb->lkb_lvbseq;
memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
if (big_lkb) {
r->res_lvbseq = big_lkb->lkb_lvbseq;
memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen);
} else if (high_lkb) {
r->res_lvbseq = high_lkb->lkb_lvbseq;
memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);

View File

@ -1046,7 +1046,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
{
struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
struct gfs2_args *args = &sdp->sd_args;
int val;
unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
spin_lock(&sdp->sd_tune.gt_spin);
logd_secs = sdp->sd_tune.gt_logd_secs;
quota_quantum = sdp->sd_tune.gt_quota_quantum;
statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
statfs_slow = sdp->sd_tune.gt_statfs_slow;
spin_unlock(&sdp->sd_tune.gt_spin);
if (is_ancestor(root, sdp->sd_master_dir))
seq_puts(s, ",meta");
@ -1101,17 +1108,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
}
if (args->ar_discard)
seq_puts(s, ",discard");
val = sdp->sd_tune.gt_logd_secs;
if (val != 30)
seq_printf(s, ",commit=%d", val);
val = sdp->sd_tune.gt_statfs_quantum;
if (val != 30)
seq_printf(s, ",statfs_quantum=%d", val);
else if (sdp->sd_tune.gt_statfs_slow)
if (logd_secs != 30)
seq_printf(s, ",commit=%d", logd_secs);
if (statfs_quantum != 30)
seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
else if (statfs_slow)
seq_puts(s, ",statfs_quantum=0");
val = sdp->sd_tune.gt_quota_quantum;
if (val != 60)
seq_printf(s, ",quota_quantum=%d", val);
if (quota_quantum != 60)
seq_printf(s, ",quota_quantum=%d", quota_quantum);
if (args->ar_statfs_percent)
seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
if (args->ar_errors != GFS2_ERRORS_DEFAULT) {

View File

@ -2027,6 +2027,9 @@ dbAllocDmapLev(struct bmap * bmp,
if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
return -ENOSPC;
if (leafidx < 0)
return -EIO;
/* determine the block number within the file system corresponding
* to the leaf at which free space was found.
*/

View File

@ -354,6 +354,11 @@ tid_t txBegin(struct super_block *sb, int flag)
jfs_info("txBegin: flag = 0x%x", flag);
log = JFS_SBI(sb)->log;
if (!log) {
jfs_error(sb, "read-only filesystem\n");
return 0;
}
TXN_LOCK();
INCREMENT(TxStat.txBegin);

View File

@ -798,6 +798,11 @@ static int jfs_link(struct dentry *old_dentry,
if (rc)
goto out;
if (isReadOnly(ip)) {
jfs_error(ip->i_sb, "read-only filesystem\n");
return -EROFS;
}
tid = txBegin(ip->i_sb, 0);
mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);

View File

@ -6887,8 +6887,15 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
} else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
goto out_restart;
break;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
if (data->arg.new_lock_owner != 0 &&
nfs4_refresh_open_old_stateid(&data->arg.open_stateid,
lsp->ls_state))
goto out_restart;
if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp))
goto out_restart;
fallthrough;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
if (data->arg.new_lock_owner != 0) {

View File

@ -1096,9 +1096,9 @@ static void revoke_delegation(struct nfs4_delegation *dp)
WARN_ON(!list_empty(&dp->dl_recall_lru));
if (clp->cl_minorversion) {
spin_lock(&clp->cl_lock);
dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
refcount_inc(&dp->dl_stid.sc_count);
spin_lock(&clp->cl_lock);
list_add(&dp->dl_recall_lru, &clp->cl_revoked);
spin_unlock(&clp->cl_lock);
}
@ -5513,15 +5513,6 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return status;
/* Client debugging aid. */
if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
char addr_str[INET6_ADDRSTRLEN];
rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
sizeof(addr_str));
pr_warn_ratelimited("NFSD: client %s testing state ID "
"with incorrect client ID\n", addr_str);
return status;
}
spin_lock(&cl->cl_lock);
s = find_stateid_locked(cl, stateid);
if (!s)

View File

@ -28,6 +28,7 @@ struct ovl_sb {
};
struct ovl_layer {
/* ovl_free_fs() relies on @mnt being the first member! */
struct vfsmount *mnt;
/* Trap in ovl inode cache */
struct inode *trap;
@ -38,6 +39,14 @@ struct ovl_layer {
int fsid;
};
/*
* ovl_free_fs() relies on @mnt being the first member when unmounting
* the private mounts created for each layer. Let's check both the
* offset and type.
*/
static_assert(offsetof(struct ovl_layer, mnt) == 0);
static_assert(__same_type(typeof_member(struct ovl_layer, mnt), struct vfsmount *));
struct ovl_path {
struct ovl_layer *layer;
struct dentry *dentry;

View File

@ -546,7 +546,7 @@ static void invalidate_dquots(struct super_block *sb, int type)
continue;
/* Wait for dquot users */
if (atomic_read(&dquot->dq_count)) {
dqgrab(dquot);
atomic_inc(&dquot->dq_count);
spin_unlock(&dq_list_lock);
/*
* Once dqput() wakes us up, we know it's time to free
@ -2415,7 +2415,8 @@ int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
error = add_dquot_ref(sb, type);
if (error)
dquot_disable(sb, type, flags);
dquot_disable(sb, type,
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
return error;
out_fmt:

View File

@ -247,7 +247,7 @@ static int udf_name_from_CS0(struct super_block *sb,
}
if (translate) {
if (str_o_len <= 2 && str_o[0] == '.' &&
if (str_o_len > 0 && str_o_len <= 2 && str_o[0] == '.' &&
(str_o_len == 1 || str_o[1] == '.'))
needsCRC = 1;
if (needsCRC) {

View File

@ -1060,7 +1060,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3

View File

@ -172,6 +172,39 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
*/
bool clk_is_match(const struct clk *p, const struct clk *q);
/**
* clk_rate_exclusive_get - get exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to get exclusive control over the rate of a
* provider. It prevents any other consumer to execute, even indirectly,
* opereation which could alter the rate of the provider or cause glitches
*
* If exlusivity is claimed more than once on clock, even by the same driver,
* the rate effectively gets locked as exclusivity can't be preempted.
*
* Must not be called from within atomic context.
*
* Returns success (0) or negative errno.
*/
int clk_rate_exclusive_get(struct clk *clk);
/**
* clk_rate_exclusive_put - release exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to release the exclusivity it previously got
* from clk_rate_exclusive_get()
*
* The caller must balance the number of clk_rate_exclusive_get() and
* clk_rate_exclusive_put() calls.
*
* Must not be called from within atomic context.
*/
void clk_rate_exclusive_put(struct clk *clk);
#else
static inline int clk_notifier_register(struct clk *clk,
@ -218,6 +251,13 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
return p == q;
}
static inline int clk_rate_exclusive_get(struct clk *clk)
{
return 0;
}
static inline void clk_rate_exclusive_put(struct clk *clk) {}
#endif
/**
@ -530,38 +570,6 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
*/
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id);
/**
* clk_rate_exclusive_get - get exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to get exclusive control over the rate of a
* provider. It prevents any other consumer to execute, even indirectly,
* opereation which could alter the rate of the provider or cause glitches
*
* If exlusivity is claimed more than once on clock, even by the same driver,
* the rate effectively gets locked as exclusivity can't be preempted.
*
* Must not be called from within atomic context.
*
* Returns success (0) or negative errno.
*/
int clk_rate_exclusive_get(struct clk *clk);
/**
* clk_rate_exclusive_put - release exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to release the exclusivity it previously got
* from clk_rate_exclusive_get()
*
* The caller must balance the number of clk_rate_exclusive_get() and
* clk_rate_exclusive_put() calls.
*
* Must not be called from within atomic context.
*/
void clk_rate_exclusive_put(struct clk *clk);
/**
* clk_enable - inform the system when the clock source should be running.
@ -918,14 +926,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
static inline int clk_rate_exclusive_get(struct clk *clk)
{
return 0;
}
static inline void clk_rate_exclusive_put(struct clk *clk) {}
static inline int clk_enable(struct clk *clk)
{
return 0;

View File

@ -29,6 +29,8 @@ struct icc_path *icc_get(struct device *dev, const int src_id,
const int dst_id);
struct icc_path *of_icc_get(struct device *dev, const char *name);
void icc_put(struct icc_path *path);
int icc_enable(struct icc_path *path);
int icc_disable(struct icc_path *path);
int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
void icc_set_tag(struct icc_path *path, u32 tag);
@ -50,6 +52,16 @@ static inline void icc_put(struct icc_path *path)
{
}
static inline int icc_enable(struct icc_path *path)
{
return 0;
}
static inline int icc_disable(struct icc_path *path)
{
return 0;
}
static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
{
return 0;

View File

@ -39,6 +39,8 @@ struct user_struct;
struct writeback_control;
struct bdi_writeback;
extern int sysctl_page_lock_unfairness;
void init_mm_internals(void);
#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */

View File

@ -38,7 +38,7 @@ extern int pm_runtime_force_resume(struct device *dev);
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
extern int pm_runtime_get_if_in_use(struct device *dev);
extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
@ -59,6 +59,11 @@ extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device_link *link);
static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return pm_runtime_get_if_active(dev, false);
}
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
dev->power.ignore_children = enable;
@ -142,6 +147,11 @@ static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return -EINVAL;
}
static inline int pm_runtime_get_if_active(struct device *dev,
bool ign_usage_count)
{
return -EINVAL;
}
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; }

View File

@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state)
struct raid_template *raid_class_attach(struct raid_function_template *);
void raid_class_release(struct raid_template *);
int __must_check raid_component_add(struct raid_template *, struct device *,
struct device *);

View File

@ -148,6 +148,10 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (gso_type & SKB_GSO_UDP)
nh_off -= thlen;
/* Kernel has a special handling for GSO_BY_FRAGS. */
if (gso_size == GSO_BY_FRAGS)
return -EINVAL;
/* Too small packets are not really GSO ones. */
if (skb->len - nh_off > gso_size) {
shinfo->gso_size = gso_size;

View File

@ -20,6 +20,8 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int
#define WQ_FLAG_EXCLUSIVE 0x01
#define WQ_FLAG_WOKEN 0x02
#define WQ_FLAG_BOOKMARK 0x04
#define WQ_FLAG_CUSTOM 0x08
#define WQ_FLAG_DONE 0x10
/*
* A single wait-queue entry structure:

View File

@ -401,7 +401,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
static inline
unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
{
return m2m_ctx->out_q_ctx.num_rdy;
unsigned int num_buf_rdy;
unsigned long flags;
spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy;
spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
return num_buf_rdy;
}
/**
@ -413,7 +420,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
static inline
unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
{
return m2m_ctx->cap_q_ctx.num_rdy;
unsigned int num_buf_rdy;
unsigned long flags;
spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy;
spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
return num_buf_rdy;
}
/**

View File

@ -686,37 +686,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
}
/* Caller must hold rcu_read_lock() for read */
static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
const u8 *mac)
static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac)
{
struct list_head *iter;
struct slave *tmp;
bond_for_each_slave_rcu(bond, tmp, iter)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return tmp;
return NULL;
}
/* Caller must hold rcu_read_lock() for read */
static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
{
struct list_head *iter;
struct slave *tmp;
struct netdev_hw_addr *ha;
bond_for_each_slave_rcu(bond, tmp, iter)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return true;
if (netdev_uc_empty(bond->dev))
return false;
netdev_for_each_uc_addr(ha, bond->dev)
if (ether_addr_equal_64bits(mac, ha->addr))
return true;
return false;
}

View File

@ -164,7 +164,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
int rtnl_delete_link(struct net_device *dev);
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
struct netlink_ext_ack *exterr);
struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);

View File

@ -1178,6 +1178,7 @@ struct proto {
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
* Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
* All the __sk_mem_schedule() is of this nature: accounting
* is strict, actions are advisory and have some latency.
*/
@ -1291,6 +1292,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
return sk->sk_prot->memory_pressure != NULL;
}
static inline bool sk_under_global_memory_pressure(const struct sock *sk)
{
return sk->sk_prot->memory_pressure &&
!!READ_ONCE(*sk->sk_prot->memory_pressure);
}
static inline bool sk_under_memory_pressure(const struct sock *sk)
{
if (!sk->sk_prot->memory_pressure)
@ -1300,7 +1307,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
mem_cgroup_under_socket_pressure(sk->sk_memcg))
return true;
return !!*sk->sk_prot->memory_pressure;
return !!READ_ONCE(*sk->sk_prot->memory_pressure);
}
static inline long
@ -1354,7 +1361,7 @@ proto_memory_pressure(struct proto *prot)
{
if (!prot->memory_pressure)
return false;
return !!*prot->memory_pressure;
return !!READ_ONCE(*prot->memory_pressure);
}

View File

@ -119,6 +119,9 @@ struct snd_card {
bool registered; /* card_dev is registered? */
wait_queue_head_t remove_sleep;
size_t total_pcm_alloc_bytes; /* total amount of allocated buffers */
struct mutex memory_mutex; /* protection for the above */
#ifdef CONFIG_PM
unsigned int power_state; /* power state */
wait_queue_head_t power_sleep;

Some files were not shown because too many files have changed in this diff Show More