This is the 5.10.178 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmRBD58ACgkQONu9yGCS
 aT5BTxAApbYtClwFI1KGwlvnh9elm2m6NYDZcBleAT8bps1ofI50Bpca0CKgkX8f
 HLzRid8WE5BW6+3tpDJxBwqEGEGG1Z8bgleaM62PxiNU3CRFKtUuDmS2DiVAK30d
 PfdvjhOxlwf4f6e+WHSvGXvqxV9w1DtjqG+Lz1jA37sAAj6IithDuSkNrYcsojFF
 u+zA+17M2KVG8vrTCHZVH/ij9A1w4gWOkhYVCKaC7hKafTsU613YjFTGpqelhvTS
 6AfMSTI15E01Qy6FM5OjqmVM4k8UWIydA1WBV7aHLn3y2MzXeaYza8xsg90Qu2V2
 49F4Yu53WLkuNV0aDOURnaQ7M1m+Sj8IL/MD7G5iLIDwjN3PDwY5IqwKyYueZ/2P
 TdlNTTffCC66MiYjAy/A5gPg4bjxvs7aaQkjgahluzWnXUWSdyUvJDg1XYGdhf1l
 W4E2OcWH0Al6se56255O2eKbvmeOe+IHW22oRoDaAC9+14Lp6KWP9sAh4/zrEcgf
 /x0YxZekOoWVdVtoP4oS1CE3Rj9v4HmtPT2QVltE7Dag7sn3FtGWTQ+SxZ34gmwY
 RYCvoCpBF5SNg3tkW/eIwl+6fRryiT/LS9OsUmz+5g0L6mkK5m6ScleIbAGYq6BZ
 4mu6CwHuSBX0O/EvRgmVpZpPsKsHypVu86krtTlW/+HcKBrXSuY=
 =hM8w
 -----END PGP SIGNATURE-----

Merge 5.10.178 into android12-5.10-lts

Changes in 5.10.178
	gpio: GPIO_REGMAP: select REGMAP instead of depending on it
	Drivers: vmbus: Check for channel allocation before looking up relids
	pwm: cros-ec: Explicitly set .polarity in .get_state()
	pwm: sprd: Explicitly set .polarity in .get_state()
	KVM: s390: pv: fix external interruption loop not always detected
	wifi: mac80211: fix invalid drv_sta_pre_rcu_remove calls for non-uploaded sta
	net: qrtr: combine nameservice into main module
	net: qrtr: Fix a refcount bug in qrtr_recvmsg()
	icmp: guard against too small mtu
	net: don't let netpoll invoke NAPI if in xmit context
	sctp: check send stream number after wait_for_sndbuf
	net: qrtr: Do not do DEL_SERVER broadcast after DEL_CLIENT
	ipv6: Fix an uninit variable access bug in __ip6_make_skb()
	gpio: davinci: Add irq chip flag to skip set wake
	net: ethernet: ti: am65-cpsw: Fix mdio cleanup in probe
	net: stmmac: fix up RX flow hash indirection table when setting channels
	sunrpc: only free unix grouplist after RCU settles
	NFSD: callback request does not use correct credential for AUTH_SYS
	usb: xhci: tegra: fix sleep in atomic call
	xhci: also avoid the XHCI_ZERO_64B_REGS quirk with a passthrough iommu
	USB: serial: cp210x: add Silicon Labs IFS-USB-DATACABLE IDs
	usb: typec: altmodes/displayport: Fix configure initial pin assignment
	USB: serial: option: add Telit FE990 compositions
	USB: serial: option: add Quectel RM500U-CN modem
	iio: adc: ti-ads7950: Set `can_sleep` flag for GPIO chip
	iio: dac: cio-dac: Fix max DAC write value check for 12-bit
	iio: light: cm32181: Unregister second I2C client if present
	tty: serial: sh-sci: Fix transmit end interrupt handler
	tty: serial: sh-sci: Fix Rx on RZ/G2L SCI
	tty: serial: fsl_lpuart: avoid checking for transfer complete when UARTCTRL_SBK is asserted in lpuart32_tx_empty
	nilfs2: fix potential UAF of struct nilfs_sc_info in nilfs_segctor_thread()
	nilfs2: fix sysfs interface lifetime
	dt-bindings: serial: renesas,scif: Fix 4th IRQ for 4-IRQ SCIFs
	ALSA: hda/realtek: Add quirk for Clevo X370SNW
	iio: adc: ad7791: fix IRQ flags
	scsi: iscsi_tcp: Check that sock is valid before iscsi_set_param()
	perf/core: Fix the same task check in perf_event_set_output
	ftrace: Mark get_lock_parent_ip() __always_inline
	ftrace: Fix issue that 'direct->addr' not restored in modify_ftrace_direct()
	can: j1939: j1939_tp_tx_dat_new(): fix out-of-bounds memory access
	can: isotp: isotp_ops: fix poll() to not report false EPOLLOUT events
	tracing: Free error logs of tracing instances
	ASoC: hdac_hdmi: use set_stream() instead of set_tdm_slots()
	drm/panfrost: Fix the panfrost_mmu_map_fault_addr() error path
	drm/nouveau/disp: Support more modes by checking with lower bpc
	ring-buffer: Fix race while reader and writer are on the same page
	mm/swap: fix swap_info_struct race between swapoff and get_swap_pages()
	selftests: intel_pstate: ftime() is deprecated
	drm/bridge: lt9611: Fix PLL being unable to lock
	Revert "media: ti: cal: fix possible memory leak in cal_ctx_create()"
	ocfs2: fix freeing uninitialized resource on ocfs2_dlm_shutdown
	bpftool: Print newline before '}' for struct with padding only fields
	Revert "pinctrl: amd: Disable and mask interrupts on resume"
	ALSA: emu10k1: fix capture interrupt handler unlinking
	ALSA: hda/sigmatel: add pin overrides for Intel DP45SG motherboard
	ALSA: i2c/cs8427: fix iec958 mixer control deactivation
	ALSA: firewire-tascam: add missing unwind goto in snd_tscm_stream_start_duplex()
	ALSA: hda/sigmatel: fix S/PDIF out on Intel D*45* motherboards
	Bluetooth: L2CAP: Fix use-after-free in l2cap_disconnect_{req,rsp}
	Bluetooth: Fix race condition in hidp_session_thread
	btrfs: print checksum type and implementation at mount time
	btrfs: fix fast csum implementation detection
	fbmem: Reject FB_ACTIVATE_KD_TEXT from userspace
	mtdblock: tolerate corrected bit-flips
	mtd: rawnand: meson: fix bitmask for length in command word
	mtd: rawnand: stm32_fmc2: remove unsupported EDO mode
	mtd: rawnand: stm32_fmc2: use timings.mode instead of checking tRC_min
	clk: sprd: set max_register according to mapping range
	IB/mlx5: Add support for NDR link speed
	IB/mlx5: Add support for 400G_8X lane speed
	RDMA/cma: Allow UD qp_type to join multicast only
	9p/xen : Fix use after free bug in xen_9pfs_front_remove due to race condition
	niu: Fix missing unwind goto in niu_alloc_channels()
	sysctl: add proc_dou8vec_minmax()
	ipv4: shrink netns_ipv4 with sysctl conversions
	tcp: convert elligible sysctls to u8
	tcp: restrict net.ipv4.tcp_app_win
	drm/armada: Fix a potential double free in an error handling path
	qlcnic: check pci_reset_function result
	net: qrtr: Fix an uninit variable access bug in qrtr_tx_resume()
	sctp: fix a potential overflow in sctp_ifwdtsn_skip
	RDMA/core: Fix GID entry ref leak when create_ah fails
	udp6: fix potential access to stale information
	net: macb: fix a memory corruption in extended buffer descriptor mode
	libbpf: Fix single-line struct definition output in btf_dump
	power: supply: cros_usbpd: reclassify "default case!" as debug
	wifi: mwifiex: mark OF related data as maybe unused
	i2c: imx-lpi2c: clean rx/tx buffers upon new message
	efi: sysfb_efi: Add quirk for Lenovo Yoga Book X91F/L
	drm: panel-orientation-quirks: Add quirk for Lenovo Yoga Book X90F
	verify_pefile: relax wrapper length check
	asymmetric_keys: log on fatal failures in PE/pkcs7
	riscv: add icache flush for nommu sigreturn trampoline
	net: sfp: initialize sfp->i2c_block_size at sfp allocation
	scsi: ses: Handle enclosure with just a primary component gracefully
	x86/PCI: Add quirk for AMD XHCI controller that loses MSI-X state in D3hot
	cgroup/cpuset: Wake up cpuset_attach_wq tasks in cpuset_cancel_attach()
	ubi: Fix failure attaching when vid_hdr offset equals to (sub)page size
	mtd: ubi: wl: Fix a couple of kernel-doc issues
	ubi: Fix deadlock caused by recursively holding work_sem
	powerpc/pseries: rename min_common_depth to primary_domain_index
	powerpc/pseries: Rename TYPE1_AFFINITY to FORM1_AFFINITY
	powerpc/pseries: Consolidate different NUMA distance update code paths
	powerpc/pseries: Add a helper for form1 cpu distance
	powerpc/pseries: Add support for FORM2 associativity
	powerpc/papr_scm: Update the NUMA distance table for the target node
	sched/fair: Move calculate of avg_load to a better location
	sched/fair: Fix imbalance overflow
	x86/rtc: Remove __init for runtime functions
	i2c: ocores: generate stop condition after timeout in polling mode
	watchdog: sbsa_wdog: Make sure the timeout programming is within the limits
	coresight-etm4: Fix for() loop drvdata->nr_addr_cmp range bug
	kbuild: check the minimum assembler version in Kconfig
	kbuild: Switch to 'f' variants of integrated assembler flag
	kbuild: check CONFIG_AS_IS_LLVM instead of LLVM_IAS
	riscv: Handle zicsr/zifencei issues between clang and binutils
	kexec: move locking into do_kexec_load
	kexec: turn all kexec_mutex acquisitions into trylocks
	panic, kexec: make __crash_kexec() NMI safe
	sysctl: Fix data-races in proc_dou8vec_minmax().
	Linux 5.10.178

Change-Id: I34107ee680c7b081bb0c2782483cbb7ec62252ca
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-04-25 16:47:24 +00:00
commit 2d6a4ad08c
127 changed files with 1489 additions and 565 deletions

View File

@ -74,7 +74,7 @@ properties:
- description: Error interrupt
- description: Receive buffer full interrupt
- description: Transmit buffer empty interrupt
- description: Transmit End interrupt
- description: Break interrupt
- items:
- description: Error interrupt
- description: Receive buffer full interrupt
@ -89,7 +89,7 @@ properties:
- const: eri
- const: rxi
- const: txi
- const: tei
- const: bri
- items:
- const: eri
- const: rxi

View File

@ -272,6 +272,8 @@ tcp_app_win - INTEGER
Reserve max(window/2^tcp_app_win, mss) of window for application
buffer. Value 0 is special, it means that nothing is reserved.
Possible values are [0, 31], inclusive.
Default: 31
tcp_autocorking - BOOLEAN

View File

@ -0,0 +1,104 @@
============================
NUMA resource associativity
=============================
Associativity represents the groupings of the various platform resources into
domains of substantially similar mean performance relative to resources outside
of that domain. Resources subsets of a given domain that exhibit better
performance relative to each other than relative to other resources subsets
are represented as being members of a sub-grouping domain. This performance
characteristic is presented in terms of NUMA node distance within the Linux kernel.
From the platform view, these groups are also referred to as domains.
PAPR interface currently supports different ways of communicating these resource
grouping details to the OS. These are referred to as Form 0, Form 1 and Form2
associativity grouping. Form 0 is the oldest format and is now considered deprecated.
Hypervisor indicates the type/form of associativity used via "ibm,architecture-vec-5 property".
Bit 0 of byte 5 in the "ibm,architecture-vec-5" property indicates usage of Form 0 or Form 1.
A value of 1 indicates the usage of Form 1 associativity. For Form 2 associativity
bit 2 of byte 5 in the "ibm,architecture-vec-5" property is used.
Form 0
-----
Form 0 associativity supports only two NUMA distances (LOCAL and REMOTE).
Form 1
-----
With Form 1 a combination of ibm,associativity-reference-points, and ibm,associativity
device tree properties are used to determine the NUMA distance between resource groups/domains.
The “ibm,associativity” property contains a list of one or more numbers (domainID)
representing the resources platform grouping domains.
The “ibm,associativity-reference-points” property contains a list of one or more numbers
(domainID index) that represents the 1 based ordinal in the associativity lists.
The list of domainID indexes represents an increasing hierarchy of resource grouping.
ex:
{ primary domainID index, secondary domainID index, tertiary domainID index.. }
Linux kernel uses the domainID at the primary domainID index as the NUMA node id.
Linux kernel computes NUMA distance between two domains by recursively comparing
if they belong to the same higher-level domains. For mismatch at every higher
level of the resource group, the kernel doubles the NUMA distance between the
comparing domains.
Form 2
-------
Form 2 associativity format adds separate device tree properties representing NUMA node distance
thereby making the node distance computation flexible. Form 2 also allows flexible primary
domain numbering. With numa distance computation now detached from the index value in
"ibm,associativity-reference-points" property, Form 2 allows a large number of primary domain
ids at the same domainID index representing resource groups of different performance/latency
characteristics.
Hypervisor indicates the usage of FORM2 associativity using bit 2 of byte 5 in the
"ibm,architecture-vec-5" property.
"ibm,numa-lookup-index-table" property contains a list of one or more numbers representing
the domainIDs present in the system. The offset of the domainID in this property is
used as an index while computing numa distance information via "ibm,numa-distance-table".
prop-encoded-array: The number N of the domainIDs encoded as with encode-int, followed by
N domainID encoded as with encode-int
For ex:
"ibm,numa-lookup-index-table" = {4, 0, 8, 250, 252}. The offset of domainID 8 (2) is used when
computing the distance of domain 8 from other domains present in the system. For the rest of
this document, this offset will be referred to as domain distance offset.
"ibm,numa-distance-table" property contains a list of one or more numbers representing the NUMA
distance between resource groups/domains present in the system.
prop-encoded-array: The number N of the distance values encoded as with encode-int, followed by
N distance values encoded as with encode-bytes. The max distance value we could encode is 255.
The number N must be equal to the square of m where m is the number of domainIDs in the
numa-lookup-index-table.
For ex:
ibm,numa-lookup-index-table = <3 0 8 40>;
ibm,numa-distace-table = <9>, /bits/ 8 < 10 20 80
20 10 160
80 160 10>;
| 0 8 40
--|------------
|
0 | 10 20 80
|
8 | 20 10 160
|
40| 80 160 10
A possible "ibm,associativity" property for resources in node 0, 8 and 40
{ 3, 6, 7, 0 }
{ 3, 6, 9, 8 }
{ 3, 6, 7, 40}
With "ibm,associativity-reference-points" { 0x3 }
"ibm,lookup-index-table" helps in having a compact representation of distance matrix.
Since domainID can be sparse, the matrix of distances can also be effectively sparse.
With "ibm,lookup-index-table" we can achieve a compact representation of
distance information.

View File

@ -704,7 +704,7 @@ ref
no-jd
BIOS setup but without jack-detection
intel
Intel DG45* mobos
Intel D*45* mobos
dell-m6-amic
Dell desktops/laptops with analog mics
dell-m6-dmic

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 177
SUBLEVEL = 178
EXTRAVERSION =
NAME = Dare mighty things
@ -596,8 +596,10 @@ endif
ifneq ($(GCC_TOOLCHAIN),)
CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
endif
ifneq ($(LLVM_IAS),1)
CLANG_FLAGS += -no-integrated-as
ifeq ($(LLVM_IAS),1)
CLANG_FLAGS += -fintegrated-as
else
CLANG_FLAGS += -fno-integrated-as
endif
CLANG_FLAGS += -Werror=unknown-warning-option
KBUILD_CFLAGS += $(CLANG_FLAGS)
@ -870,7 +872,7 @@ else
DEBUG_CFLAGS += -g
endif
ifeq ($(LLVM_IAS),1)
ifdef CONFIG_AS_IS_LLVM
KBUILD_AFLAGS += -g
else
KBUILD_AFLAGS += -Wa,-gdwarf-2

View File

@ -44,7 +44,7 @@
#define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000)
#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
#define FW_FEATURE_FORM1_AFFINITY ASM_CONST(0x0000000100000000)
#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
#define FW_FEATURE_DRMEM_V2 ASM_CONST(0x0000000400000000)
#define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000800000000)
@ -53,6 +53,7 @@
#define FW_FEATURE_ULTRAVISOR ASM_CONST(0x0000004000000000)
#define FW_FEATURE_STUFF_TCE ASM_CONST(0x0000008000000000)
#define FW_FEATURE_RPT_INVALIDATE ASM_CONST(0x0000010000000000)
#define FW_FEATURE_FORM2_AFFINITY ASM_CONST(0x0000020000000000)
#ifndef __ASSEMBLY__
@ -69,11 +70,11 @@ enum {
FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN |
FW_FEATURE_FORM1_AFFINITY | FW_FEATURE_PRRN |
FW_FEATURE_HPT_RESIZE | FW_FEATURE_DRMEM_V2 |
FW_FEATURE_DRC_INFO | FW_FEATURE_BLOCK_REMOVE |
FW_FEATURE_PAPR_SCM | FW_FEATURE_ULTRAVISOR |
FW_FEATURE_RPT_INVALIDATE,
FW_FEATURE_RPT_INVALIDATE | FW_FEATURE_FORM2_AFFINITY,
FW_FEATURE_PSERIES_ALWAYS = 0,
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_ULTRAVISOR,
FW_FEATURE_POWERNV_ALWAYS = 0,

View File

@ -147,8 +147,9 @@ extern int of_read_drc_info_cell(struct property **prop,
#define OV5_MSI 0x0201 /* PCIe/MSI support */
#define OV5_CMO 0x0480 /* Cooperative Memory Overcommitment */
#define OV5_XCMO 0x0440 /* Page Coalescing */
#define OV5_TYPE1_AFFINITY 0x0580 /* Type 1 NUMA affinity */
#define OV5_FORM1_AFFINITY 0x0580 /* FORM1 NUMA affinity */
#define OV5_PRRN 0x0540 /* Platform Resource Reassignment */
#define OV5_FORM2_AFFINITY 0x0520 /* Form2 NUMA affinity */
#define OV5_HP_EVT 0x0604 /* Hot Plug Event support */
#define OV5_RESIZE_HPT 0x0601 /* Hash Page Table resizing */
#define OV5_PFO_HW_RNG 0x1180 /* PFO Random Number Generator */

View File

@ -36,7 +36,7 @@ static inline int pcibus_to_node(struct pci_bus *bus)
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))
extern int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc);
int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc);
extern int __node_distance(int, int);
#define node_distance(a, b) __node_distance(a, b)
@ -64,6 +64,7 @@ static inline int early_cpu_to_node(int cpu)
}
int of_drconf_to_nid_single(struct drmem_lmb *lmb);
void update_numa_distance(struct device_node *node);
#else
@ -83,7 +84,7 @@ static inline void sysfs_remove_device_from_node(struct device *dev,
static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
static inline int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
{
return 0;
}
@ -93,6 +94,7 @@ static inline int of_drconf_to_nid_single(struct drmem_lmb *lmb)
return first_online_node;
}
static inline void update_numa_distance(struct device_node *node) {}
#endif /* CONFIG_NUMA */
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)

View File

@ -1069,7 +1069,8 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
#else
0,
#endif
.associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
.associativity = OV5_FEAT(OV5_FORM1_AFFINITY) | OV5_FEAT(OV5_PRRN) |
OV5_FEAT(OV5_FORM2_AFFINITY),
.bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
.micro_checkpoint = 0,
.reserved0 = 0,

View File

@ -51,14 +51,22 @@ EXPORT_SYMBOL(numa_cpu_lookup_table);
EXPORT_SYMBOL(node_to_cpumask_map);
EXPORT_SYMBOL(node_data);
static int min_common_depth;
static int primary_domain_index;
static int n_mem_addr_cells, n_mem_size_cells;
static int form1_affinity;
#define FORM0_AFFINITY 0
#define FORM1_AFFINITY 1
#define FORM2_AFFINITY 2
static int affinity_form;
#define MAX_DISTANCE_REF_POINTS 4
static int distance_ref_points_depth;
static const __be32 *distance_ref_points;
static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
static int numa_distance_table[MAX_NUMNODES][MAX_NUMNODES] = {
[0 ... MAX_NUMNODES - 1] = { [0 ... MAX_NUMNODES - 1] = -1 }
};
static int numa_id_index_table[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE };
/*
* Allocate node_to_cpumask_map based on number of available nodes
@ -163,7 +171,55 @@ static void unmap_cpu_from_node(unsigned long cpu)
}
#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
static int __associativity_to_nid(const __be32 *associativity,
int max_array_sz)
{
int nid;
/*
* primary_domain_index is 1 based array index.
*/
int index = primary_domain_index - 1;
if (!numa_enabled || index >= max_array_sz)
return NUMA_NO_NODE;
nid = of_read_number(&associativity[index], 1);
/* POWER4 LPAR uses 0xffff as invalid node */
if (nid == 0xffff || nid >= nr_node_ids)
nid = NUMA_NO_NODE;
return nid;
}
/*
* Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA
* info is found.
*/
static int associativity_to_nid(const __be32 *associativity)
{
int array_sz = of_read_number(associativity, 1);
/* Skip the first element in the associativity array */
return __associativity_to_nid((associativity + 1), array_sz);
}
static int __cpu_form2_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
{
int dist;
int node1, node2;
node1 = associativity_to_nid(cpu1_assoc);
node2 = associativity_to_nid(cpu2_assoc);
dist = numa_distance_table[node1][node2];
if (dist <= LOCAL_DISTANCE)
return 0;
else if (dist <= REMOTE_DISTANCE)
return 1;
else
return 2;
}
static int __cpu_form1_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
{
int dist = 0;
@ -179,6 +235,15 @@ int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
return dist;
}
int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
{
/* We should not get called with FORM0 */
VM_WARN_ON(affinity_form == FORM0_AFFINITY);
if (affinity_form == FORM1_AFFINITY)
return __cpu_form1_relative_distance(cpu1_assoc, cpu2_assoc);
return __cpu_form2_relative_distance(cpu1_assoc, cpu2_assoc);
}
/* must hold reference to node during call */
static const __be32 *of_get_associativity(struct device_node *dev)
{
@ -190,7 +255,9 @@ int __node_distance(int a, int b)
int i;
int distance = LOCAL_DISTANCE;
if (!form1_affinity)
if (affinity_form == FORM2_AFFINITY)
return numa_distance_table[a][b];
else if (affinity_form == FORM0_AFFINITY)
return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
for (i = 0; i < distance_ref_points_depth; i++) {
@ -205,52 +272,6 @@ int __node_distance(int a, int b)
}
EXPORT_SYMBOL(__node_distance);
static void initialize_distance_lookup_table(int nid,
const __be32 *associativity)
{
int i;
if (!form1_affinity)
return;
for (i = 0; i < distance_ref_points_depth; i++) {
const __be32 *entry;
entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
distance_lookup_table[nid][i] = of_read_number(entry, 1);
}
}
/*
* Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA
* info is found.
*/
static int associativity_to_nid(const __be32 *associativity)
{
int nid = NUMA_NO_NODE;
if (!numa_enabled)
goto out;
if (of_read_number(associativity, 1) >= min_common_depth)
nid = of_read_number(&associativity[min_common_depth], 1);
/* POWER4 LPAR uses 0xffff as invalid node */
if (nid == 0xffff || nid >= nr_node_ids)
nid = NUMA_NO_NODE;
if (nid > 0 &&
of_read_number(associativity, 1) >= distance_ref_points_depth) {
/*
* Skip the length field and send start of associativity array
*/
initialize_distance_lookup_table(nid, associativity + 1);
}
out:
return nid;
}
/* Returns the nid associated with the given device tree node,
* or -1 if not found.
*/
@ -284,10 +305,159 @@ int of_node_to_nid(struct device_node *device)
}
EXPORT_SYMBOL(of_node_to_nid);
static int __init find_min_common_depth(void)
static void __initialize_form1_numa_distance(const __be32 *associativity,
int max_array_sz)
{
int depth;
int i, nid;
if (affinity_form != FORM1_AFFINITY)
return;
nid = __associativity_to_nid(associativity, max_array_sz);
if (nid != NUMA_NO_NODE) {
for (i = 0; i < distance_ref_points_depth; i++) {
const __be32 *entry;
int index = be32_to_cpu(distance_ref_points[i]) - 1;
/*
* broken hierarchy, return with broken distance table
*/
if (WARN(index >= max_array_sz, "Broken ibm,associativity property"))
return;
entry = &associativity[index];
distance_lookup_table[nid][i] = of_read_number(entry, 1);
}
}
}
static void initialize_form1_numa_distance(const __be32 *associativity)
{
int array_sz;
array_sz = of_read_number(associativity, 1);
/* Skip the first element in the associativity array */
__initialize_form1_numa_distance(associativity + 1, array_sz);
}
/*
* Used to update distance information w.r.t newly added node.
*/
void update_numa_distance(struct device_node *node)
{
int nid;
if (affinity_form == FORM0_AFFINITY)
return;
else if (affinity_form == FORM1_AFFINITY) {
const __be32 *associativity;
associativity = of_get_associativity(node);
if (!associativity)
return;
initialize_form1_numa_distance(associativity);
return;
}
/* FORM2 affinity */
nid = of_node_to_nid_single(node);
if (nid == NUMA_NO_NODE)
return;
/*
* With FORM2 we expect NUMA distance of all possible NUMA
* nodes to be provided during boot.
*/
WARN(numa_distance_table[nid][nid] == -1,
"NUMA distance details for node %d not provided\n", nid);
}
EXPORT_SYMBOL_GPL(update_numa_distance);
/*
* ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
* ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements}
*/
static void initialize_form2_numa_distance_lookup_table(void)
{
int i, j;
struct device_node *root;
const __u8 *numa_dist_table;
const __be32 *numa_lookup_index;
int numa_dist_table_length;
int max_numa_index, distance_index;
if (firmware_has_feature(FW_FEATURE_OPAL))
root = of_find_node_by_path("/ibm,opal");
else
root = of_find_node_by_path("/rtas");
if (!root)
root = of_find_node_by_path("/");
numa_lookup_index = of_get_property(root, "ibm,numa-lookup-index-table", NULL);
max_numa_index = of_read_number(&numa_lookup_index[0], 1);
/* first element of the array is the size and is encode-int */
numa_dist_table = of_get_property(root, "ibm,numa-distance-table", NULL);
numa_dist_table_length = of_read_number((const __be32 *)&numa_dist_table[0], 1);
/* Skip the size which is encoded int */
numa_dist_table += sizeof(__be32);
pr_debug("numa_dist_table_len = %d, numa_dist_indexes_len = %d\n",
numa_dist_table_length, max_numa_index);
for (i = 0; i < max_numa_index; i++)
/* +1 skip the max_numa_index in the property */
numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1);
if (numa_dist_table_length != max_numa_index * max_numa_index) {
WARN(1, "Wrong NUMA distance information\n");
/* consider everybody else just remote. */
for (i = 0; i < max_numa_index; i++) {
for (j = 0; j < max_numa_index; j++) {
int nodeA = numa_id_index_table[i];
int nodeB = numa_id_index_table[j];
if (nodeA == nodeB)
numa_distance_table[nodeA][nodeB] = LOCAL_DISTANCE;
else
numa_distance_table[nodeA][nodeB] = REMOTE_DISTANCE;
}
}
}
distance_index = 0;
for (i = 0; i < max_numa_index; i++) {
for (j = 0; j < max_numa_index; j++) {
int nodeA = numa_id_index_table[i];
int nodeB = numa_id_index_table[j];
numa_distance_table[nodeA][nodeB] = numa_dist_table[distance_index++];
pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, numa_distance_table[nodeA][nodeB]);
}
}
of_node_put(root);
}
static int __init find_primary_domain_index(void)
{
int index;
struct device_node *root;
/*
* Check for which form of affinity.
*/
if (firmware_has_feature(FW_FEATURE_OPAL)) {
affinity_form = FORM1_AFFINITY;
} else if (firmware_has_feature(FW_FEATURE_FORM2_AFFINITY)) {
dbg("Using form 2 affinity\n");
affinity_form = FORM2_AFFINITY;
} else if (firmware_has_feature(FW_FEATURE_FORM1_AFFINITY)) {
dbg("Using form 1 affinity\n");
affinity_form = FORM1_AFFINITY;
} else
affinity_form = FORM0_AFFINITY;
if (firmware_has_feature(FW_FEATURE_OPAL))
root = of_find_node_by_path("/ibm,opal");
@ -318,25 +488,21 @@ static int __init find_min_common_depth(void)
}
distance_ref_points_depth /= sizeof(int);
if (firmware_has_feature(FW_FEATURE_OPAL) ||
firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
dbg("Using form 1 affinity\n");
form1_affinity = 1;
}
if (form1_affinity) {
depth = of_read_number(distance_ref_points, 1);
} else {
if (affinity_form == FORM0_AFFINITY) {
if (distance_ref_points_depth < 2) {
printk(KERN_WARNING "NUMA: "
"short ibm,associativity-reference-points\n");
"short ibm,associativity-reference-points\n");
goto err;
}
depth = of_read_number(&distance_ref_points[1], 1);
index = of_read_number(&distance_ref_points[1], 1);
} else {
/*
* Both FORM1 and FORM2 affinity find the primary domain details
* at the same offset.
*/
index = of_read_number(distance_ref_points, 1);
}
/*
* Warn and cap if the hardware supports more than
* MAX_DISTANCE_REF_POINTS domains.
@ -348,7 +514,7 @@ static int __init find_min_common_depth(void)
}
of_node_put(root);
return depth;
return index;
err:
of_node_put(root);
@ -426,6 +592,38 @@ static int of_get_assoc_arrays(struct assoc_arrays *aa)
return 0;
}
static int get_nid_and_numa_distance(struct drmem_lmb *lmb)
{
struct assoc_arrays aa = { .arrays = NULL };
int default_nid = NUMA_NO_NODE;
int nid = default_nid;
int rc, index;
if ((primary_domain_index < 0) || !numa_enabled)
return default_nid;
rc = of_get_assoc_arrays(&aa);
if (rc)
return default_nid;
if (primary_domain_index <= aa.array_sz &&
!(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
const __be32 *associativity;
index = lmb->aa_index * aa.array_sz;
associativity = &aa.arrays[index];
nid = __associativity_to_nid(associativity, aa.array_sz);
if (nid > 0 && affinity_form == FORM1_AFFINITY) {
/*
* lookup array associativity entries have
* no length of the array as the first element.
*/
__initialize_form1_numa_distance(associativity, aa.array_sz);
}
}
return nid;
}
/*
* This is like of_node_to_nid_single() for memory represented in the
* ibm,dynamic-reconfiguration-memory node.
@ -437,35 +635,28 @@ int of_drconf_to_nid_single(struct drmem_lmb *lmb)
int nid = default_nid;
int rc, index;
if ((min_common_depth < 0) || !numa_enabled)
if ((primary_domain_index < 0) || !numa_enabled)
return default_nid;
rc = of_get_assoc_arrays(&aa);
if (rc)
return default_nid;
if (min_common_depth <= aa.array_sz &&
if (primary_domain_index <= aa.array_sz &&
!(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
nid = of_read_number(&aa.arrays[index], 1);
const __be32 *associativity;
if (nid == 0xffff || nid >= nr_node_ids)
nid = default_nid;
if (nid > 0) {
index = lmb->aa_index * aa.array_sz;
initialize_distance_lookup_table(nid,
&aa.arrays[index]);
}
index = lmb->aa_index * aa.array_sz;
associativity = &aa.arrays[index];
nid = __associativity_to_nid(associativity, aa.array_sz);
}
return nid;
}
#ifdef CONFIG_PPC_SPLPAR
static int vphn_get_nid(long lcpu)
static int __vphn_get_associativity(long lcpu, __be32 *associativity)
{
__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
long rc, hwid;
/*
@ -485,12 +676,30 @@ static int vphn_get_nid(long lcpu)
rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity);
if (rc == H_SUCCESS)
return associativity_to_nid(associativity);
return 0;
}
return -1;
}
static int vphn_get_nid(long lcpu)
{
__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
if (!__vphn_get_associativity(lcpu, associativity))
return associativity_to_nid(associativity);
return NUMA_NO_NODE;
}
#else
static int __vphn_get_associativity(long lcpu, __be32 *associativity)
{
return -1;
}
static int vphn_get_nid(long unused)
{
return NUMA_NO_NODE;
@ -685,7 +894,7 @@ static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
size = read_n_cells(n_mem_size_cells, usm);
}
nid = of_drconf_to_nid_single(lmb);
nid = get_nid_and_numa_distance(lmb);
fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
&nid);
node_set_online(nid);
@ -702,24 +911,31 @@ static int __init parse_numa_properties(void)
struct device_node *memory;
int default_nid = 0;
unsigned long i;
const __be32 *associativity;
if (numa_enabled == 0) {
printk(KERN_WARNING "NUMA disabled by user\n");
return -1;
}
min_common_depth = find_min_common_depth();
primary_domain_index = find_primary_domain_index();
if (min_common_depth < 0) {
if (primary_domain_index < 0) {
/*
* if we fail to parse min_common_depth from device tree
* if we fail to parse primary_domain_index from device tree
* mark the numa disabled, boot with numa disabled.
*/
numa_enabled = false;
return min_common_depth;
return primary_domain_index;
}
dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
dbg("NUMA associativity depth for CPU/Memory: %d\n", primary_domain_index);
/*
* If it is FORM2 initialize the distance table here.
*/
if (affinity_form == FORM2_AFFINITY)
initialize_form2_numa_distance_lookup_table();
/*
* Even though we connect cpus to numa domains later in SMP
@ -727,18 +943,30 @@ static int __init parse_numa_properties(void)
* each node to be onlined must have NODE_DATA etc backing it.
*/
for_each_present_cpu(i) {
__be32 vphn_assoc[VPHN_ASSOC_BUFSIZE];
struct device_node *cpu;
int nid = vphn_get_nid(i);
int nid = NUMA_NO_NODE;
/*
* Don't fall back to default_nid yet -- we will plug
* cpus into nodes once the memory scan has discovered
* the topology.
*/
if (nid == NUMA_NO_NODE) {
memset(vphn_assoc, 0, VPHN_ASSOC_BUFSIZE * sizeof(__be32));
if (__vphn_get_associativity(i, vphn_assoc) == 0) {
nid = associativity_to_nid(vphn_assoc);
initialize_form1_numa_distance(vphn_assoc);
} else {
/*
* Don't fall back to default_nid yet -- we will plug
* cpus into nodes once the memory scan has discovered
* the topology.
*/
cpu = of_get_cpu_node(i, NULL);
BUG_ON(!cpu);
nid = of_node_to_nid_single(cpu);
associativity = of_get_associativity(cpu);
if (associativity) {
nid = associativity_to_nid(associativity);
initialize_form1_numa_distance(associativity);
}
of_node_put(cpu);
}
@ -776,8 +1004,11 @@ static int __init parse_numa_properties(void)
* have associativity properties. If none, then
* everything goes to default_nid.
*/
nid = of_node_to_nid_single(memory);
if (nid < 0)
associativity = of_get_associativity(memory);
if (associativity) {
nid = associativity_to_nid(associativity);
initialize_form1_numa_distance(associativity);
} else
nid = default_nid;
fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
@ -926,7 +1157,7 @@ static void __init find_possible_nodes(void)
goto out;
}
max_nodes = of_read_number(&domains[min_common_depth], 1);
max_nodes = of_read_number(&domains[primary_domain_index], 1);
pr_info("Partition configured for %d NUMA nodes.\n", max_nodes);
for (i = 0; i < max_nodes; i++) {
@ -935,7 +1166,7 @@ static void __init find_possible_nodes(void)
}
prop_length /= sizeof(int);
if (prop_length > min_common_depth + 2)
if (prop_length > primary_domain_index + 2)
coregroup_enabled = 1;
out:
@ -1268,7 +1499,7 @@ int cpu_to_coregroup_id(int cpu)
goto out;
index = of_read_number(associativity, 1);
if (index > min_common_depth + 1)
if (index > primary_domain_index + 1)
return of_read_number(&associativity[index - 1], 1);
out:

View File

@ -119,10 +119,11 @@ struct vec5_fw_feature {
static __initdata struct vec5_fw_feature
vec5_fw_features_table[] = {
{FW_FEATURE_TYPE1_AFFINITY, OV5_TYPE1_AFFINITY},
{FW_FEATURE_FORM1_AFFINITY, OV5_FORM1_AFFINITY},
{FW_FEATURE_PRRN, OV5_PRRN},
{FW_FEATURE_DRMEM_V2, OV5_DRMEM_V2},
{FW_FEATURE_DRC_INFO, OV5_DRC_INFO},
{FW_FEATURE_FORM2_AFFINITY, OV5_FORM2_AFFINITY},
};
static void __init fw_vec5_feature_init(const char *vec5, unsigned long len)

View File

@ -484,6 +484,8 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
return saved_rc;
}
update_numa_distance(dn);
rc = dlpar_online_cpu(dn);
if (rc) {
saved_rc = rc;

View File

@ -180,6 +180,8 @@ static int update_lmb_associativity_index(struct drmem_lmb *lmb)
return -ENODEV;
}
update_numa_distance(lmb_node);
dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (!dr_node) {
dlpar_free_cc_nodes(lmb_node);

View File

@ -261,7 +261,7 @@ static int cpu_relative_dispatch_distance(int last_disp_cpu, int cur_disp_cpu)
if (!last_disp_cpu_assoc || !cur_disp_cpu_assoc)
return -EIO;
return cpu_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc);
return cpu_relative_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc);
}
static int cpu_home_node_dispatch_distance(int disp_cpu)
@ -281,7 +281,7 @@ static int cpu_home_node_dispatch_distance(int disp_cpu)
if (!disp_cpu_assoc || !vcpu_assoc)
return -EIO;
return cpu_distance(disp_cpu_assoc, vcpu_assoc);
return cpu_relative_distance(disp_cpu_assoc, vcpu_assoc);
}
static void update_vcpu_disp_stat(int disp_cpu)

View File

@ -1079,6 +1079,13 @@ static int papr_scm_probe(struct platform_device *pdev)
return -ENODEV;
}
/*
* open firmware platform device create won't update the NUMA
* distance table. For PAPR SCM devices we use numa_map_to_online_node()
* to find the nearest online NUMA node and that requires correct
* distance table information.
*/
update_numa_distance(dn);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)

View File

@ -331,6 +331,28 @@ config RISCV_BASE_PMU
endmenu
config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
def_bool y
# https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc
depends on AS_IS_GNU && AS_VERSION >= 23800
help
Newer binutils versions default to ISA spec version 20191213 which
moves some instructions from the I extension to the Zicsr and Zifencei
extensions.
config TOOLCHAIN_NEEDS_OLD_ISA_SPEC
def_bool y
depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
# https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16
depends on CC_IS_CLANG && CLANG_VERSION < 170000
help
Certain versions of clang do not support zicsr and zifencei via -march
but newer versions of binutils require it for the reasons noted in the
help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This
option causes an older ISA spec compatible with these older versions
of clang to be passed to GAS, which has the same result as passing zicsr
and zifencei to -march.
config FPU
bool "FPU support"
default y

View File

@ -40,7 +40,7 @@ ifeq ($(CONFIG_LD_IS_LLD),y)
ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 150000; echo $$?),0)
KBUILD_CFLAGS += -mno-relax
KBUILD_AFLAGS += -mno-relax
ifneq ($(LLVM_IAS),1)
ifndef CONFIG_AS_IS_LLVM
KBUILD_CFLAGS += -Wa,-mno-relax
KBUILD_AFLAGS += -Wa,-mno-relax
endif
@ -53,10 +53,12 @@ riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
# Newer binutils versions default to ISA spec version 20191213 which moves some
# instructions from the I extension to the Zicsr and Zifencei extensions.
toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei)
riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC
KBUILD_CFLAGS += -Wa,-misa-spec=2.2
KBUILD_AFLAGS += -Wa,-misa-spec=2.2
else
riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei
endif
KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
KBUILD_AFLAGS += -march=$(riscv-march-y)

View File

@ -16,6 +16,7 @@
#include <asm/vdso.h>
#include <asm/switch_to.h>
#include <asm/csr.h>
#include <asm/cacheflush.h>
extern u32 __user_rt_sigreturn[2];
@ -178,6 +179,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
{
struct rt_sigframe __user *frame;
long err = 0;
unsigned long __maybe_unused addr;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(frame, sizeof(*frame)))
@ -206,7 +208,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn,
sizeof(frame->sigreturn_code)))
return -EFAULT;
regs->ra = (unsigned long)&frame->sigreturn_code;
addr = (unsigned long)&frame->sigreturn_code;
/* Make sure the two instructions are pushed to icache. */
flush_icache_range(addr, addr + sizeof(frame->sigreturn_code));
regs->ra = addr;
#endif /* CONFIG_MMU */
/*

View File

@ -270,10 +270,18 @@ static int handle_prog(struct kvm_vcpu *vcpu)
/**
* handle_external_interrupt - used for external interruption interceptions
*
* This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
* the new PSW does not have external interrupts disabled. In the first case,
* we've got to deliver the interrupt manually, and in the second case, we
* drop to userspace to handle the situation there.
* This interception occurs if:
* - the CPUSTAT_EXT_INT bit was already set when the external interrupt
* occurred. In this case, the interrupt needs to be injected manually to
* preserve interrupt priority.
* - the external new PSW has external interrupts enabled, which will cause an
* interruption loop. We drop to userspace in this case.
*
* The latter case can be detected by inspecting the external mask bit in the
* external new psw.
*
* Under PV, only the latter case can occur, since interrupt priorities are
* handled in the ultravisor.
*/
static int handle_external_interrupt(struct kvm_vcpu *vcpu)
{
@ -284,10 +292,18 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
vcpu->stat.exit_external_interrupt++;
rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
if (rc)
return rc;
/* We can not handle clock comparator or timer interrupt with bad PSW */
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
newpsw = vcpu->arch.sie_block->gpsw;
} else {
rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
if (rc)
return rc;
}
/*
* Clock comparator or timer interrupt with external interrupt enabled
* will cause interrupt loop. Drop to userspace.
*/
if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
(newpsw.mask & PSW_MASK_EXT))
return -EOPNOTSUPP;

View File

@ -272,6 +272,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
"IdeaPad Duet 3 10IGL5"),
},
},
{
/* Lenovo Yoga Book X91F / X91L */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
/* Non exact match to match F + L versions */
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
},
},
{},
};

View File

@ -32,8 +32,8 @@ static int __init iommu_init_noop(void) { return 0; }
static void iommu_shutdown_noop(void) { }
bool __init bool_x86_init_noop(void) { return false; }
void x86_op_int_noop(int cpu) { }
static __init int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; }
static __init void get_rtc_noop(struct timespec64 *now) { }
static int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; }
static void get_rtc_noop(struct timespec64 *now) { }
static __initconst const struct of_device_id of_cmos_match[] = {
{ .compatible = "motorola,mc146818" },

View File

@ -7,6 +7,7 @@
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <asm/amd_nb.h>
#include <asm/hpet.h>
#include <asm/pci_x86.h>
@ -824,3 +825,23 @@ static void rs690_fix_64bit_dma(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
#endif
#ifdef CONFIG_AMD_NB
#define AMD_15B8_RCC_DEV2_EPF0_STRAP2 0x10136008
#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK 0x00000080L
static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev)
{
u32 data;
if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) {
data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK;
if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data))
pci_err(dev, "Failed to write data 0x%x\n", data);
} else {
pci_err(dev, "Failed to read data\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0);
#endif

View File

@ -79,16 +79,16 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
}
if (sinfo->msgdigest_len != sig->digest_size) {
pr_debug("Sig %u: Invalid digest size (%u)\n",
sinfo->index, sinfo->msgdigest_len);
pr_warn("Sig %u: Invalid digest size (%u)\n",
sinfo->index, sinfo->msgdigest_len);
ret = -EBADMSG;
goto error;
}
if (memcmp(sig->digest, sinfo->msgdigest,
sinfo->msgdigest_len) != 0) {
pr_debug("Sig %u: Message digest doesn't match\n",
sinfo->index);
pr_warn("Sig %u: Message digest doesn't match\n",
sinfo->index);
ret = -EKEYREJECTED;
goto error;
}
@ -488,7 +488,7 @@ int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
const void *data, size_t datalen)
{
if (pkcs7->data) {
pr_debug("Data already supplied\n");
pr_warn("Data already supplied\n");
return -EINVAL;
}
pkcs7->data = data;

View File

@ -74,7 +74,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
break;
default:
pr_debug("Unknown PEOPT magic = %04hx\n", pe32->magic);
pr_warn("Unknown PEOPT magic = %04hx\n", pe32->magic);
return -ELIBBAD;
}
@ -95,7 +95,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
ctx->certs_size = ddir->certs.size;
if (!ddir->certs.virtual_address || !ddir->certs.size) {
pr_debug("Unsigned PE binary\n");
pr_warn("Unsigned PE binary\n");
return -ENODATA;
}
@ -127,7 +127,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
unsigned len;
if (ctx->sig_len < sizeof(wrapper)) {
pr_debug("Signature wrapper too short\n");
pr_warn("Signature wrapper too short\n");
return -ELIBBAD;
}
@ -135,19 +135,23 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
pr_debug("sig wrapper = { %x, %x, %x }\n",
wrapper.length, wrapper.revision, wrapper.cert_type);
/* Both pesign and sbsign round up the length of certificate table
* (in optional header data directories) to 8 byte alignment.
/* sbsign rounds up the length of certificate table (in optional
* header data directories) to 8 byte alignment. However, the PE
* specification states that while entries are 8-byte aligned, this is
* not included in their length, and as a result, pesign has not
* rounded up since 0.110.
*/
if (round_up(wrapper.length, 8) != ctx->sig_len) {
pr_debug("Signature wrapper len wrong\n");
if (wrapper.length > ctx->sig_len) {
pr_warn("Signature wrapper bigger than sig len (%x > %x)\n",
ctx->sig_len, wrapper.length);
return -ELIBBAD;
}
if (wrapper.revision != WIN_CERT_REVISION_2_0) {
pr_debug("Signature is not revision 2.0\n");
pr_warn("Signature is not revision 2.0\n");
return -ENOTSUPP;
}
if (wrapper.cert_type != WIN_CERT_TYPE_PKCS_SIGNED_DATA) {
pr_debug("Signature certificate type is not PKCS\n");
pr_warn("Signature certificate type is not PKCS\n");
return -ENOTSUPP;
}
@ -160,7 +164,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
ctx->sig_offset += sizeof(wrapper);
ctx->sig_len -= sizeof(wrapper);
if (ctx->sig_len < 4) {
pr_debug("Signature data missing\n");
pr_warn("Signature data missing\n");
return -EKEYREJECTED;
}
@ -194,7 +198,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
return 0;
}
not_pkcs7:
pr_debug("Signature data not PKCS#7\n");
pr_warn("Signature data not PKCS#7\n");
return -ELIBBAD;
}
@ -337,8 +341,8 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
digest_size = crypto_shash_digestsize(tfm);
if (digest_size != ctx->digest_len) {
pr_debug("Digest size mismatch (%zx != %x)\n",
digest_size, ctx->digest_len);
pr_warn("Digest size mismatch (%zx != %x)\n",
digest_size, ctx->digest_len);
ret = -EBADMSG;
goto error_no_desc;
}
@ -369,7 +373,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
* PKCS#7 certificate.
*/
if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) {
pr_debug("Digest mismatch\n");
pr_warn("Digest mismatch\n");
ret = -EKEYREJECTED;
} else {
pr_debug("The digests match!\n");

View File

@ -17,7 +17,6 @@ static const struct regmap_config sprdclk_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0xffff,
.fast_io = true,
};
@ -43,6 +42,8 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node, *np;
struct regmap *regmap;
struct resource *res;
struct regmap_config reg_config = sprdclk_regmap_config;
if (of_find_property(node, "sprd,syscon", NULL)) {
regmap = syscon_regmap_lookup_by_phandle(node, "sprd,syscon");
@ -59,12 +60,14 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
return PTR_ERR(regmap);
}
} else {
base = devm_platform_ioremap_resource(pdev, 0);
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
reg_config.max_register = resource_size(res) - reg_config.reg_stride;
regmap = devm_regmap_init_mmio(&pdev->dev, base,
&sprdclk_regmap_config);
&reg_config);
if (IS_ERR(regmap)) {
pr_err("failed to init regmap\n");
return PTR_ERR(regmap);

View File

@ -99,7 +99,7 @@ config GPIO_GENERIC
tristate
config GPIO_REGMAP
depends on REGMAP
select REGMAP
tristate
# put drivers in the right section, in alphabetical order

View File

@ -326,7 +326,7 @@ static struct irq_chip gpio_irqchip = {
.irq_enable = gpio_irq_enable,
.irq_disable = gpio_irq_disable,
.irq_set_type = gpio_irq_type,
.flags = IRQCHIP_SET_TYPE_MASKED,
.flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
};
static void gpio_irq_handler(struct irq_desc *desc)

View File

@ -102,7 +102,6 @@ static int armada_drm_bind(struct device *dev)
if (ret) {
dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
__func__, ret);
kfree(priv);
return ret;
}

View File

@ -256,6 +256,7 @@ static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode
{ 0x8126, 0x55 },
{ 0x8127, 0x66 },
{ 0x8128, 0x88 },
{ 0x812a, 0x20 },
};
regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));

View File

@ -284,10 +284,17 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Book X90F / X91F / X91L */
}, { /* Lenovo Yoga Book X90F / X90L */
.matches = {
/* Non exact match to match all versions */
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Book X91F / X91L */
.matches = {
/* Non exact match to match F + L versions */
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* OneGX1 Pro */

View File

@ -396,6 +396,35 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder,
return 0;
}
static void
nv50_outp_atomic_fix_depth(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state)
{
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
unsigned int max_rate, mode_rate;
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_DP:
max_rate = nv_encoder->dp.link_nr * nv_encoder->dp.link_bw;
/* we don't support more than 10 anyway */
asyh->or.bpc = min_t(u8, asyh->or.bpc, 10);
/* reduce the bpc until it works out */
while (asyh->or.bpc > 6) {
mode_rate = DIV_ROUND_UP(mode->clock * asyh->or.bpc * 3, 8);
if (mode_rate <= max_rate)
break;
asyh->or.bpc -= 2;
}
break;
default:
break;
}
}
static int
nv50_outp_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@ -414,6 +443,9 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
if (crtc_state->mode_changed || crtc_state->connectors_changed)
asyh->or.bpc = connector->display_info.bpc;
/* We might have to reduce the bpc */
nv50_outp_atomic_fix_depth(encoder, crtc_state);
return 0;
}

View File

@ -220,8 +220,6 @@ void nouveau_dp_irq(struct nouveau_drm *drm,
}
/* TODO:
* - Use the minimum possible BPC here, once we add support for the max bpc
* property.
* - Validate against the DP caps advertised by the GPU (we don't check these
* yet)
*/
@ -233,7 +231,11 @@ nv50_dp_mode_valid(struct drm_connector *connector,
{
const unsigned int min_clock = 25000;
unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock;
const u8 bpp = connector->display_info.bpc * 3;
/* Check with the minmum bpc always, so we can advertise better modes.
* In particlar not doing this causes modes to be dropped on HDR
* displays as we might check with a bpc of 16 even.
*/
const u8 bpp = 6 * 3;
if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
return MODE_NO_INTERLACE;

View File

@ -451,6 +451,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
if (IS_ERR(pages[i])) {
mutex_unlock(&bo->base.pages_lock);
ret = PTR_ERR(pages[i]);
pages[i] = NULL;
goto err_pages;
}
}

View File

@ -305,6 +305,10 @@ void vmbus_disconnect(void)
*/
struct vmbus_channel *relid2channel(u32 relid)
{
if (vmbus_connection.channels == NULL) {
pr_warn_once("relid2channel: relid=%d: No channels mapped!\n", relid);
return NULL;
}
if (WARN_ON(relid >= MAX_CHANNEL_RELIDS))
return NULL;
return READ_ONCE(vmbus_connection.channels[relid]);

View File

@ -447,7 +447,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
if (etm4x_sspcicrn_present(drvdata, i))
etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
}
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
}

View File

@ -462,6 +462,8 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
if (num == 1 && msgs[0].len == 0)
goto stop;
lpi2c_imx->rx_buf = NULL;
lpi2c_imx->tx_buf = NULL;
lpi2c_imx->delivered = 0;
lpi2c_imx->msglen = msgs[i].len;
init_completion(&lpi2c_imx->complete);

View File

@ -343,18 +343,18 @@ static int ocores_poll_wait(struct ocores_i2c *i2c)
* ocores_isr(), we just add our polling code around it.
*
* It can run in atomic context
*
* Return: 0 on success, -ETIMEDOUT on timeout
*/
static void ocores_process_polling(struct ocores_i2c *i2c)
static int ocores_process_polling(struct ocores_i2c *i2c)
{
while (1) {
irqreturn_t ret;
int err;
irqreturn_t ret;
int err = 0;
while (1) {
err = ocores_poll_wait(i2c);
if (err) {
i2c->state = STATE_ERROR;
if (err)
break; /* timeout */
}
ret = ocores_isr(-1, i2c);
if (ret == IRQ_NONE)
@ -365,13 +365,15 @@ static void ocores_process_polling(struct ocores_i2c *i2c)
break;
}
}
return err;
}
static int ocores_xfer_core(struct ocores_i2c *i2c,
struct i2c_msg *msgs, int num,
bool polling)
{
int ret;
int ret = 0;
u8 ctrl;
ctrl = oc_getreg(i2c, OCI2C_CONTROL);
@ -389,15 +391,16 @@ static int ocores_xfer_core(struct ocores_i2c *i2c,
oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_START);
if (polling) {
ocores_process_polling(i2c);
ret = ocores_process_polling(i2c);
} else {
ret = wait_event_timeout(i2c->wait,
(i2c->state == STATE_ERROR) ||
(i2c->state == STATE_DONE), HZ);
if (ret == 0) {
ocores_process_timeout(i2c);
return -ETIMEDOUT;
}
if (wait_event_timeout(i2c->wait,
(i2c->state == STATE_ERROR) ||
(i2c->state == STATE_DONE), HZ) == 0)
ret = -ETIMEDOUT;
}
if (ret) {
ocores_process_timeout(i2c);
return ret;
}
return (i2c->state == STATE_DONE) ? num : -EIO;

View File

@ -253,7 +253,7 @@ static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
.has_registers = true,
.addr_shift = 4,
.read_mask = BIT(3),
.irq_flags = IRQF_TRIGGER_LOW,
.irq_flags = IRQF_TRIGGER_FALLING,
};
static int ad7791_read_raw(struct iio_dev *indio_dev,

View File

@ -634,6 +634,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->chip.label = dev_name(&st->spi->dev);
st->chip.parent = &st->spi->dev;
st->chip.owner = THIS_MODULE;
st->chip.can_sleep = true;
st->chip.base = -1;
st->chip.ngpio = TI_ADS7950_NUM_GPIOS;
st->chip.get_direction = ti_ads7950_get_direction;

View File

@ -66,8 +66,8 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
/* DAC can only accept up to a 16-bit value */
if ((unsigned int)val > 65535)
/* DAC can only accept up to a 12-bit value */
if ((unsigned int)val > 4095)
return -EINVAL;
priv->chan_out_states[chan->channel] = val;

View File

@ -429,6 +429,14 @@ static const struct iio_info cm32181_info = {
.attrs = &cm32181_attribute_group,
};
static void cm32181_unregister_dummy_client(void *data)
{
struct i2c_client *client = data;
/* Unregister the dummy client */
i2c_unregister_device(client);
}
static int cm32181_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@ -458,6 +466,10 @@ static int cm32181_probe(struct i2c_client *client)
client = i2c_acpi_new_device(dev, 1, &board_info);
if (IS_ERR(client))
return PTR_ERR(client);
ret = devm_add_action_or_reset(dev, cm32181_unregister_dummy_client, client);
if (ret)
return ret;
}
cm32181 = iio_priv(indio_dev);

View File

@ -505,22 +505,11 @@ static inline unsigned short cma_family(struct rdma_id_private *id_priv)
return id_priv->id.route.addr.src_addr.ss_family;
}
static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
static int cma_set_default_qkey(struct rdma_id_private *id_priv)
{
struct ib_sa_mcmember_rec rec;
int ret = 0;
if (id_priv->qkey) {
if (qkey && id_priv->qkey != qkey)
return -EINVAL;
return 0;
}
if (qkey) {
id_priv->qkey = qkey;
return 0;
}
switch (id_priv->id.ps) {
case RDMA_PS_UDP:
case RDMA_PS_IB:
@ -540,6 +529,16 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
return ret;
}
static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
{
if (!qkey ||
(id_priv->qkey && (id_priv->qkey != qkey)))
return -EINVAL;
id_priv->qkey = qkey;
return 0;
}
static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
{
dev_addr->dev_type = ARPHRD_INFINIBAND;
@ -1107,7 +1106,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
if (id_priv->id.qp_type == IB_QPT_UD) {
ret = cma_set_qkey(id_priv, 0);
ret = cma_set_default_qkey(id_priv);
if (ret)
return ret;
@ -4312,7 +4311,10 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
memset(&rep, 0, sizeof rep);
rep.status = status;
if (status == IB_SIDR_SUCCESS) {
ret = cma_set_qkey(id_priv, qkey);
if (qkey)
ret = cma_set_qkey(id_priv, qkey);
else
ret = cma_set_default_qkey(id_priv);
if (ret)
return ret;
rep.qp_num = id_priv->qp_num;
@ -4516,9 +4518,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
enum ib_gid_type gid_type;
struct net_device *ndev;
if (!status)
status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
else
if (status)
pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
status);
@ -4546,7 +4546,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
}
event->param.ud.qp_num = 0xFFFFFF;
event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
event->param.ud.qkey = id_priv->qkey;
out:
if (ndev)
@ -4565,8 +4565,11 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
goto out;
cma_make_mc_event(status, id_priv, multicast, &event, mc);
ret = cma_cm_event_handler(id_priv, &event);
ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
if (!ret) {
cma_make_mc_event(status, id_priv, multicast, &event, mc);
ret = cma_cm_event_handler(id_priv, &event);
}
rdma_destroy_ah_attr(&event.param.ud.ah_attr);
WARN_ON(ret);
@ -4619,9 +4622,11 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
if (ret)
return ret;
ret = cma_set_qkey(id_priv, 0);
if (ret)
return ret;
if (!id_priv->qkey) {
ret = cma_set_default_qkey(id_priv);
if (ret)
return ret;
}
cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
rec.qkey = cpu_to_be32(id_priv->qkey);
@ -4709,9 +4714,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
ib.rec.pkey = cpu_to_be16(0xffff);
if (id_priv->id.ps == RDMA_PS_UDP)
ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
if (dev_addr->bound_dev_if)
ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
if (!ndev)
@ -4737,6 +4739,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
if (err || !ib.rec.mtu)
return err ?: -EINVAL;
if (!id_priv->qkey)
cma_set_default_qkey(id_priv);
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&ib.rec.port_gid);
INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
@ -4762,6 +4767,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
return -EINVAL;
if (id_priv->id.qp_type != IB_QPT_UD)
return -EINVAL;
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;

View File

@ -535,6 +535,8 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
ret = device->ops.create_ah(ah, &init_attr, udata);
if (ret) {
if (ah->sgid_attr)
rdma_put_gid_attr(ah->sgid_attr);
kfree(ah);
return ERR_PTR(ret);
}

View File

@ -425,10 +425,26 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_HDR;
break;
case MLX5E_PROT_MASK(MLX5E_100GAUI_1_100GBASE_CR_KR):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_NDR;
break;
case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_HDR;
break;
case MLX5E_PROT_MASK(MLX5E_200GAUI_2_200GBASE_CR2_KR2):
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_NDR;
break;
case MLX5E_PROT_MASK(MLX5E_400GAUI_8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_HDR;
break;
case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_NDR;
break;
default:
return -EINVAL;
}

View File

@ -624,10 +624,8 @@ static struct cal_ctx *cal_ctx_create(struct cal_dev *cal, int inst)
ctx->cport = inst;
ret = cal_ctx_v4l2_init(ctx);
if (ret) {
kfree(ctx);
if (ret)
return NULL;
}
return ctx;
}

View File

@ -153,7 +153,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_state = STATE_EMPTY;
ret = mtd_read(mtd, sect_start, sect_size,
&retlen, mtdblk->cache_data);
if (ret)
if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != sect_size)
return -EIO;
@ -188,8 +188,12 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
if (!sect_size)
return mtd_read(mtd, pos, len, &retlen, buf);
if (!sect_size) {
ret = mtd_read(mtd, pos, len, &retlen, buf);
if (ret && !mtd_is_bitflip(ret))
return ret;
return 0;
}
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@ -209,7 +213,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
ret = mtd_read(mtd, pos, size, &retlen, buf);
if (ret)
if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != size)
return -EIO;

View File

@ -276,7 +276,7 @@ static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
if (raw) {
len = mtd->writesize + mtd->oobsize;
cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir);
cmd = (len & GENMASK(13, 0)) | scrambler | DMA_DIR(dir);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
return;
}
@ -540,7 +540,7 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
if (ret)
goto out;
cmd = NFC_CMD_N2M | (len & GENMASK(5, 0));
cmd = NFC_CMD_N2M | (len & GENMASK(13, 0));
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_drain_cmd(nfc);
@ -564,7 +564,7 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
if (ret)
return ret;
cmd = NFC_CMD_M2N | (len & GENMASK(5, 0));
cmd = NFC_CMD_M2N | (len & GENMASK(13, 0));
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_drain_cmd(nfc);

View File

@ -1525,6 +1525,9 @@ static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
if (IS_ERR(sdrt))
return PTR_ERR(sdrt);
if (conf->timings.mode > 3)
return -EOPNOTSUPP;
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;

View File

@ -665,12 +665,6 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
ubi->vid_hdr_alsize)) {
ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
return -EINVAL;
}
dbg_gen("min_io_size %d", ubi->min_io_size);
dbg_gen("max_write_size %d", ubi->max_write_size);
dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
@ -688,6 +682,21 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->vid_hdr_aloffset;
}
/*
* Memory allocation for VID header is ubi->vid_hdr_alsize
* which is described in comments in io.c.
* Make sure VID header shift + UBI_VID_HDR_SIZE not exceeds
* ubi->vid_hdr_alsize, so that all vid header operations
* won't access memory out of bounds.
*/
if ((ubi->vid_hdr_shift + UBI_VID_HDR_SIZE) > ubi->vid_hdr_alsize) {
ubi_err(ubi, "Invalid VID header offset %d, VID header shift(%d)"
" + VID header size(%zu) > VID header aligned size(%d).",
ubi->vid_hdr_offset, ubi->vid_hdr_shift,
UBI_VID_HDR_SIZE, ubi->vid_hdr_alsize);
return -EINVAL;
}
/* Similar for the data offset */
ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);

View File

@ -575,6 +575,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* @vol_id: the volume ID that last used this PEB
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
* @nested: denotes whether the work_sem is already held
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
@ -1066,8 +1067,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
* __erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
* @wl_wrk: the work object
* @shutdown: non-zero if the worker has to free memory and exit
* because the WL sub-system is shutting down
*
* This function erases a physical eraseblock and perform torture testing if
* needed. It also takes care about marking the physical eraseblock bad if
@ -1122,7 +1121,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
int err1;
/* Re-schedule the LEB for erasure */
err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
if (err1) {
spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);

View File

@ -884,6 +884,10 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
}
#endif
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
#ifdef CONFIG_MACB_USE_HWSTAMP
if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
addr &= ~GEM_BIT(DMA_RXVALID);
#endif
return addr;
}

View File

@ -628,7 +628,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
int i, err, ring;
if (dev->flags & QLCNIC_NEED_FLR) {
pci_reset_function(dev->pdev);
err = pci_reset_function(dev->pdev);
if (err) {
dev_err(&dev->pdev->dev,
"Adapter reset failed (%d). Please reboot\n",
err);
return err;
}
dev->flags &= ~QLCNIC_NEED_FLR;
}

View File

@ -4940,7 +4940,7 @@ static void stmmac_napi_del(struct net_device *dev)
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret = 0;
int ret = 0, i;
if (netif_running(dev))
stmmac_release(dev);
@ -4949,6 +4949,10 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->plat->rx_queues_to_use = rx_cnt;
priv->plat->tx_queues_to_use = tx_cnt;
if (!netif_is_rxfh_configured(dev))
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
rx_cnt);
stmmac_napi_add(dev);

View File

@ -4503,7 +4503,7 @@ static int niu_alloc_channels(struct niu *np)
err = niu_rbr_fill(np, rp, GFP_KERNEL);
if (err)
return err;
goto out_err;
}
tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),

View File

@ -2153,7 +2153,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return 0;
err_of_clear:
of_platform_device_destroy(common->mdio_dev, NULL);
if (common->mdio_dev)
of_platform_device_destroy(common->mdio_dev, NULL);
err_pm_clear:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@ -2179,7 +2180,8 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
of_platform_device_destroy(common->mdio_dev, NULL);
if (common->mdio_dev)
of_platform_device_destroy(common->mdio_dev, NULL);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);

View File

@ -207,6 +207,12 @@ static const enum gpiod_flags gpio_flags[] = {
*/
#define SFP_PHY_ADDR 22
/* SFP_EEPROM_BLOCK_SIZE is the size of data chunk to read the EEPROM
* at a time. Some SFP modules and also some Linux I2C drivers do not like
* reads longer than 16 bytes.
*/
#define SFP_EEPROM_BLOCK_SIZE 16
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
@ -1754,11 +1760,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
u8 check;
int ret;
/* Some SFP modules and also some Linux I2C drivers do not like reads
* longer than 16 bytes, so read the EEPROM in chunks of 16 bytes at
* a time.
*/
sfp->i2c_block_size = 16;
sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
@ -2385,6 +2387,7 @@ static struct sfp *sfp_alloc(struct device *dev)
return ERR_PTR(-ENOMEM);
sfp->dev = dev;
sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
mutex_init(&sfp->sm_mutex);
mutex_init(&sfp->st_mutex);

View File

@ -183,7 +183,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
.can_ext_scan = true,
};
static const struct of_device_id mwifiex_pcie_of_match_table[] = {
static const struct of_device_id mwifiex_pcie_of_match_table[] __maybe_unused = {
{ .compatible = "pci11ab,2b42" },
{ .compatible = "pci1b4b,2b42" },
{ }

View File

@ -484,7 +484,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
{"EXTLAST", NULL, 0, 0xFE},
};
static const struct of_device_id mwifiex_sdio_of_match_table[] = {
static const struct of_device_id mwifiex_sdio_of_match_table[] __maybe_unused = {
{ .compatible = "marvell,sd8787" },
{ .compatible = "marvell,sd8897" },
{ .compatible = "marvell,sd8997" },

View File

@ -764,34 +764,32 @@ static const struct pinconf_ops amd_pinconf_ops = {
.pin_config_group_set = amd_pinconf_group_set,
};
static void amd_gpio_irq_init_pin(struct amd_gpio *gpio_dev, int pin)
static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
{
const struct pin_desc *pd;
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
unsigned long flags;
u32 pin_reg, mask;
int i;
mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
BIT(WAKE_CNTRL_OFF_S4);
pd = pin_desc_get(gpio_dev->pctrl, pin);
if (!pd)
return;
for (i = 0; i < desc->npins; i++) {
int pin = desc->pins[i].number;
const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + pin * 4);
pin_reg &= ~mask;
writel(pin_reg, gpio_dev->base + pin * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
if (!pd)
continue;
static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
{
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
int i;
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
for (i = 0; i < desc->npins; i++)
amd_gpio_irq_init_pin(gpio_dev, i);
pin_reg = readl(gpio_dev->base + i * 4);
pin_reg &= ~mask;
writel(pin_reg, gpio_dev->base + i * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
}
#ifdef CONFIG_PM_SLEEP
@ -844,10 +842,8 @@ static int amd_gpio_resume(struct device *dev)
for (i = 0; i < desc->npins; i++) {
int pin = desc->pins[i].number;
if (!amd_gpio_should_save(gpio_dev, pin)) {
amd_gpio_irq_init_pin(gpio_dev, pin);
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
}
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;

View File

@ -276,7 +276,7 @@ static int cros_usbpd_charger_get_power_info(struct port_data *port)
port->psy_current_max = 0;
break;
default:
dev_err(dev, "Port %d: default case!\n", port->port_number);
dev_dbg(dev, "Port %d: default case!\n", port->port_number);
port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
}

View File

@ -154,6 +154,7 @@ static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
state->enabled = (ret > 0);
state->period = EC_PWM_MAX_DUTY;
state->polarity = PWM_POLARITY_NORMAL;
/*
* Note that "disabled" and "duty cycle == 0" are treated the same. If

View File

@ -109,6 +109,7 @@ static void sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
duty = val & SPRD_PWM_DUTY_MSK;
tmp = (prescale + 1) * NSEC_PER_SEC * duty;
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, chn->clk_rate);
state->polarity = PWM_POLARITY_NORMAL;
/* Disable PWM clocks if the PWM channel is not in enable state. */
if (!state->enabled)

View File

@ -721,13 +721,12 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
iscsi_set_param(cls_conn, param, buf, buflen);
break;
case ISCSI_PARAM_DATADGST_EN:
iscsi_set_param(cls_conn, param, buf, buflen);
mutex_lock(&tcp_sw_conn->sock_lock);
if (!tcp_sw_conn->sock) {
mutex_unlock(&tcp_sw_conn->sock_lock);
return -ENOTCONN;
}
iscsi_set_param(cls_conn, param, buf, buflen);
tcp_sw_conn->sendpage = conn->datadgst_en ?
sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
mutex_unlock(&tcp_sw_conn->sock_lock);

View File

@ -503,9 +503,6 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
int i;
struct ses_component *scomp;
if (!edev->component[0].scratch)
return 0;
for (i = 0; i < edev->components; i++) {
scomp = edev->component[i].scratch;
if (scomp->addr != efd->addr)
@ -596,8 +593,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
components++,
type_ptr[0],
name);
else
else if (components < edev->components)
ecomp = &edev->component[components++];
else
ecomp = ERR_PTR(-EINVAL);
if (!IS_ERR(ecomp)) {
if (addl_desc_ptr) {
@ -728,11 +727,6 @@ static int ses_intf_add(struct device *cdev,
components += type_ptr[1];
}
if (components == 0) {
sdev_printk(KERN_WARNING, sdev, "enclosure has no enumerated components\n");
goto err_free;
}
ses_dev->page1 = buf;
ses_dev->page1_len = len;
buf = NULL;
@ -774,9 +768,11 @@ static int ses_intf_add(struct device *cdev,
buf = NULL;
}
page2_not_supported:
scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
if (!scomp)
goto err_free;
if (components > 0) {
scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
if (!scomp)
goto err_free;
}
edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
components, &ses_enclosure_callbacks);

View File

@ -809,11 +809,17 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
struct lpuart_port, port);
unsigned long stat = lpuart32_read(port, UARTSTAT);
unsigned long sfifo = lpuart32_read(port, UARTFIFO);
unsigned long ctrl = lpuart32_read(port, UARTCTRL);
if (sport->dma_tx_in_progress)
return 0;
if (stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT)
/*
* LPUART Transmission Complete Flag may never be set while queuing a break
* character, so avoid checking for transmission complete when UARTCTRL_SBK
* is asserted.
*/
if ((stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT) || ctrl & UARTCTRL_SBK)
return TIOCSER_TEMT;
return 0;

View File

@ -31,6 +31,7 @@
#include <linux/ioport.h>
#include <linux/ktime.h>
#include <linux/major.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/of.h>
@ -2923,6 +2924,13 @@ static int sci_init_single(struct platform_device *dev,
sci_port->irqs[i] = platform_get_irq(dev, i);
}
/*
* The fourth interrupt on SCI port is transmit end interrupt, so
* shuffle the interrupts.
*/
if (p->type == PORT_SCI)
swap(sci_port->irqs[SCIx_BRI_IRQ], sci_port->irqs[SCIx_TEI_IRQ]);
/* The SCI generates several interrupts. They can be muxed together or
* connected to different interrupt lines. In the muxed case only one
* interrupt resource is specified as there is only one interrupt ID.
@ -2988,7 +2996,7 @@ static int sci_init_single(struct platform_device *dev,
port->flags = UPF_FIXED_PORT | UPF_BOOT_AUTOCONF | p->flags;
port->fifosize = sci_port->params->fifosize;
if (port->type == PORT_SCI) {
if (port->type == PORT_SCI && !dev->dev.of_node) {
if (sci_port->reg_size >= 0x20)
port->regshift = 2;
else

View File

@ -1175,6 +1175,9 @@ static void tegra_xhci_id_work(struct work_struct *work)
mutex_unlock(&tegra->lock);
tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl,
tegra->otg_usb2_port);
if (tegra->host_mode) {
/* switch to host mode */
if (tegra->otg_usb3_port >= 0) {
@ -1243,9 +1246,6 @@ static int tegra_xhci_id_notify(struct notifier_block *nb,
}
tegra->otg_usb2_port = tegra_xusb_get_usb2_port(tegra, usbphy);
tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(
tegra->padctl,
tegra->otg_usb2_port);
tegra->host_mode = (usbphy->last_event == USB_EVENT_ID) ? true : false;

View File

@ -9,6 +9,7 @@
*/
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/log2.h>
@ -223,6 +224,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct iommu_domain *domain;
int err, i;
u64 val;
u32 intrs;
@ -241,7 +243,9 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
* an iommu. Doing anything when there is no iommu is definitely
* unsafe...
*/
if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
domain = iommu_get_domain_for_dev(dev);
if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
domain->type == IOMMU_DOMAIN_IDENTITY)
return;
xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");

View File

@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
{ USB_DEVICE(0x10C4, 0x82AA) }, /* Silicon Labs IFS-USB-DATACABLE used with Quint UPS */
{ USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
{ USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
{ USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */

View File

@ -1198,6 +1198,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */
.driver_info = ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
@ -1300,6 +1302,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */
.driver_info = RSVD(0) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990 (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990 (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),

View File

@ -101,8 +101,12 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
if (dp->data.status & DP_STATUS_PREFER_MULTI_FUNC &&
pin_assign & DP_PIN_ASSIGN_MULTI_FUNC_MASK)
pin_assign &= DP_PIN_ASSIGN_MULTI_FUNC_MASK;
else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK)
else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK) {
pin_assign &= DP_PIN_ASSIGN_DP_ONLY_MASK;
/* Default to pin assign C if available */
if (pin_assign & BIT(DP_PIN_ASSIGN_C))
pin_assign = BIT(DP_PIN_ASSIGN_C);
}
if (!pin_assign)
return -EINVAL;

View File

@ -1117,6 +1117,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
case FBIOPUT_VSCREENINFO:
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
/* only for kernel-internal use */
var.activate &= ~FB_ACTIVATE_KD_TEXT;
console_lock();
lock_fb_info(info);
ret = fbcon_modechange_possible(info, &var);

View File

@ -121,6 +121,7 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
wdd->timeout = timeout;
timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
if (action)
writel(gwdt->clk * timeout,

View File

@ -2246,6 +2246,23 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
fs_info->csum_shash = csum_shash;
/*
* Check if the checksum implementation is a fast accelerated one.
* As-is this is a bit of a hack and should be replaced once the csum
* implementations provide that information themselves.
*/
switch (csum_type) {
case BTRFS_CSUM_TYPE_CRC32:
if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
break;
default:
break;
}
btrfs_info(fs_info, "using %s (%s) checksum algorithm",
btrfs_super_csum_name(csum_type),
crypto_shash_driver_name(csum_shash));
return 0;
}

View File

@ -1692,8 +1692,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
} else {
snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
btrfs_sb(s)->bdev_holder = fs_type;
if (!strstr(crc32c_impl(), "generic"))
set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
error = btrfs_fill_super(s, fs_devices, data);
}
if (!error)

View File

@ -875,8 +875,8 @@ static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct r
if (!kcred)
return NULL;
kcred->uid = ses->se_cb_sec.uid;
kcred->gid = ses->se_cb_sec.gid;
kcred->fsuid = ses->se_cb_sec.uid;
kcred->fsgid = ses->se_cb_sec.gid;
return kcred;
}
}

View File

@ -2614,11 +2614,10 @@ static int nilfs_segctor_thread(void *arg)
goto loop;
end_thread:
spin_unlock(&sci->sc_state_lock);
/* end sync. */
sci->sc_task = NULL;
wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
spin_unlock(&sci->sc_state_lock);
return 0;
}

View File

@ -483,6 +483,7 @@ static void nilfs_put_super(struct super_block *sb)
up_write(&nilfs->ns_sem);
}
nilfs_sysfs_delete_device_group(nilfs);
iput(nilfs->ns_sufile);
iput(nilfs->ns_cpfile);
iput(nilfs->ns_dat);
@ -1106,6 +1107,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
nilfs_put_root(fsroot);
failed_unload:
nilfs_sysfs_delete_device_group(nilfs);
iput(nilfs->ns_sufile);
iput(nilfs->ns_cpfile);
iput(nilfs->ns_dat);

View File

@ -87,7 +87,6 @@ void destroy_nilfs(struct the_nilfs *nilfs)
{
might_sleep();
if (nilfs_init(nilfs)) {
nilfs_sysfs_delete_device_group(nilfs);
brelse(nilfs->ns_sbh[0]);
brelse(nilfs->ns_sbh[1]);
}
@ -305,6 +304,10 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
goto failed;
}
err = nilfs_sysfs_create_device_group(sb);
if (unlikely(err))
goto sysfs_error;
if (valid_fs)
goto skip_recovery;
@ -366,6 +369,9 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
goto failed;
failed_unload:
nilfs_sysfs_delete_device_group(nilfs);
sysfs_error:
iput(nilfs->ns_cpfile);
iput(nilfs->ns_sufile);
iput(nilfs->ns_dat);
@ -697,10 +703,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
if (err)
goto failed_sbh;
err = nilfs_sysfs_create_device_group(sb);
if (err)
goto failed_sbh;
set_nilfs_init(nilfs);
err = 0;
out:

View File

@ -3396,10 +3396,12 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
osb->cconn = NULL;
if (osb->cconn) {
ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
osb->cconn = NULL;
ocfs2_dlm_shutdown_debug(osb);
ocfs2_dlm_shutdown_debug(osb);
}
}
static int ocfs2_drop_lock(struct ocfs2_super *osb,

View File

@ -1923,8 +1923,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
!ocfs2_is_hard_readonly(osb))
hangup_needed = 1;
if (osb->cconn)
ocfs2_dlm_shutdown(osb, hangup_needed);
ocfs2_dlm_shutdown(osb, hangup_needed);
ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats);
debugfs_remove_recursive(osb->osb_debug_root);

View File

@ -1106,6 +1106,11 @@ static int sysctl_check_table_array(const char *path, struct ctl_table *table)
err |= sysctl_err(path, table, "array not allowed");
}
if (table->proc_handler == proc_dou8vec_minmax) {
if (table->maxlen != sizeof(u8))
err |= sysctl_err(path, table, "array not allowed");
}
return err;
}
@ -1121,6 +1126,7 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
(table->proc_handler == proc_douintvec) ||
(table->proc_handler == proc_douintvec_minmax) ||
(table->proc_handler == proc_dointvec_minmax) ||
(table->proc_handler == proc_dou8vec_minmax) ||
(table->proc_handler == proc_dointvec_jiffies) ||
(table->proc_handler == proc_dointvec_userhz_jiffies) ||
(table->proc_handler == proc_dointvec_ms_jiffies) ||

View File

@ -811,7 +811,7 @@ static inline void __ftrace_enabled_restore(int enabled)
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
static inline unsigned long get_lock_parent_ip(void)
static __always_inline unsigned long get_lock_parent_ip(void)
{
unsigned long addr = CALLER_ADDR0;

View File

@ -380,8 +380,8 @@ extern note_buf_t __percpu *crash_notes;
extern bool kexec_in_progress;
int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
ssize_t crash_get_memory_size(void);
void arch_kexec_protect_crashkres(void);
void arch_kexec_unprotect_crashkres(void);

View File

@ -53,6 +53,8 @@ int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *);
int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *);
int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
int proc_dou8vec_minmax(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *);
int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *,
loff_t *);

View File

@ -85,41 +85,41 @@ struct netns_ipv4 {
struct xt_table *nat_table;
#endif
int sysctl_icmp_echo_ignore_all;
int sysctl_icmp_echo_ignore_broadcasts;
int sysctl_icmp_ignore_bogus_error_responses;
u8 sysctl_icmp_echo_ignore_all;
u8 sysctl_icmp_echo_ignore_broadcasts;
u8 sysctl_icmp_ignore_bogus_error_responses;
u8 sysctl_icmp_errors_use_inbound_ifaddr;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
int sysctl_icmp_errors_use_inbound_ifaddr;
struct local_ports ip_local_ports;
int sysctl_tcp_ecn;
int sysctl_tcp_ecn_fallback;
u8 sysctl_tcp_ecn;
u8 sysctl_tcp_ecn_fallback;
int sysctl_ip_default_ttl;
int sysctl_ip_no_pmtu_disc;
int sysctl_ip_fwd_use_pmtu;
u8 sysctl_ip_default_ttl;
u8 sysctl_ip_no_pmtu_disc;
u8 sysctl_ip_fwd_use_pmtu;
int sysctl_ip_fwd_update_priority;
int sysctl_ip_nonlocal_bind;
int sysctl_ip_autobind_reuse;
u8 sysctl_ip_nonlocal_bind;
u8 sysctl_ip_autobind_reuse;
/* Shall we try to damage output packets if routing dev changes? */
int sysctl_ip_dynaddr;
int sysctl_ip_early_demux;
u8 sysctl_ip_dynaddr;
u8 sysctl_ip_early_demux;
#ifdef CONFIG_NET_L3_MASTER_DEV
int sysctl_raw_l3mdev_accept;
u8 sysctl_raw_l3mdev_accept;
#endif
int sysctl_tcp_early_demux;
int sysctl_udp_early_demux;
int sysctl_nexthop_compat_mode;
u8 sysctl_nexthop_compat_mode;
int sysctl_fwmark_reflect;
int sysctl_tcp_fwmark_accept;
u8 sysctl_fwmark_reflect;
u8 sysctl_tcp_fwmark_accept;
#ifdef CONFIG_NET_L3_MASTER_DEV
int sysctl_tcp_l3mdev_accept;
u8 sysctl_tcp_l3mdev_accept;
#endif
int sysctl_tcp_mtu_probing;
u8 sysctl_tcp_mtu_probing;
int sysctl_tcp_mtu_probe_floor;
int sysctl_tcp_base_mss;
int sysctl_tcp_min_snd_mss;
@ -127,46 +127,47 @@ struct netns_ipv4 {
u32 sysctl_tcp_probe_interval;
int sysctl_tcp_keepalive_time;
int sysctl_tcp_keepalive_probes;
int sysctl_tcp_keepalive_intvl;
u8 sysctl_tcp_keepalive_probes;
int sysctl_tcp_syn_retries;
int sysctl_tcp_synack_retries;
int sysctl_tcp_syncookies;
u8 sysctl_tcp_syn_retries;
u8 sysctl_tcp_synack_retries;
u8 sysctl_tcp_syncookies;
int sysctl_tcp_reordering;
int sysctl_tcp_retries1;
int sysctl_tcp_retries2;
int sysctl_tcp_orphan_retries;
u8 sysctl_tcp_retries1;
u8 sysctl_tcp_retries2;
u8 sysctl_tcp_orphan_retries;
u8 sysctl_tcp_tw_reuse;
int sysctl_tcp_fin_timeout;
unsigned int sysctl_tcp_notsent_lowat;
int sysctl_tcp_tw_reuse;
int sysctl_tcp_sack;
int sysctl_tcp_window_scaling;
int sysctl_tcp_timestamps;
int sysctl_tcp_early_retrans;
int sysctl_tcp_recovery;
int sysctl_tcp_thin_linear_timeouts;
int sysctl_tcp_slow_start_after_idle;
int sysctl_tcp_retrans_collapse;
int sysctl_tcp_stdurg;
int sysctl_tcp_rfc1337;
int sysctl_tcp_abort_on_overflow;
int sysctl_tcp_fack;
u8 sysctl_tcp_sack;
u8 sysctl_tcp_window_scaling;
u8 sysctl_tcp_timestamps;
u8 sysctl_tcp_early_retrans;
u8 sysctl_tcp_recovery;
u8 sysctl_tcp_thin_linear_timeouts;
u8 sysctl_tcp_slow_start_after_idle;
u8 sysctl_tcp_retrans_collapse;
u8 sysctl_tcp_stdurg;
u8 sysctl_tcp_rfc1337;
u8 sysctl_tcp_abort_on_overflow;
u8 sysctl_tcp_fack; /* obsolete */
int sysctl_tcp_max_reordering;
int sysctl_tcp_dsack;
int sysctl_tcp_app_win;
int sysctl_tcp_adv_win_scale;
int sysctl_tcp_frto;
int sysctl_tcp_nometrics_save;
int sysctl_tcp_no_ssthresh_metrics_save;
int sysctl_tcp_moderate_rcvbuf;
int sysctl_tcp_tso_win_divisor;
int sysctl_tcp_workaround_signed_windows;
u8 sysctl_tcp_dsack;
u8 sysctl_tcp_app_win;
u8 sysctl_tcp_frto;
u8 sysctl_tcp_nometrics_save;
u8 sysctl_tcp_no_ssthresh_metrics_save;
u8 sysctl_tcp_moderate_rcvbuf;
u8 sysctl_tcp_tso_win_divisor;
u8 sysctl_tcp_workaround_signed_windows;
int sysctl_tcp_limit_output_bytes;
int sysctl_tcp_challenge_ack_limit;
int sysctl_tcp_min_tso_segs;
int sysctl_tcp_min_rtt_wlen;
int sysctl_tcp_autocorking;
u8 sysctl_tcp_min_tso_segs;
u8 sysctl_tcp_autocorking;
u8 sysctl_tcp_reflect_tos;
int sysctl_tcp_invalid_ratelimit;
int sysctl_tcp_pacing_ss_ratio;
int sysctl_tcp_pacing_ca_ratio;
@ -184,7 +185,6 @@ struct netns_ipv4 {
unsigned int sysctl_tcp_fastopen_blackhole_timeout;
atomic_t tfo_active_disable_times;
unsigned long tfo_active_disable_stamp;
int sysctl_tcp_reflect_tos;
int sysctl_udp_wmem_min;
int sysctl_udp_rmem_min;

View File

@ -47,6 +47,18 @@ config CLANG_VERSION
int
default $(shell,$(srctree)/scripts/clang-version.sh $(CC))
config AS_IS_GNU
def_bool $(success,test "$(as-name)" = GNU)
config AS_IS_LLVM
def_bool $(success,test "$(as-name)" = LLVM)
config AS_VERSION
int
# Use clang version if this is the integrated assembler
default CLANG_VERSION if AS_IS_LLVM
default $(as-version)
config LLD_VERSION
int
default $(shell,$(srctree)/scripts/lld-version.sh $(LD))

View File

@ -2214,11 +2214,15 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
static void cpuset_cancel_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
struct cpuset *cs;
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
mutex_lock(&cpuset_mutex);
css_cs(css)->attach_in_progress--;
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
mutex_unlock(&cpuset_mutex);
}

View File

@ -11577,7 +11577,7 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
/*
* If its not a per-cpu rb, it must be the same task.
*/
if (output_event->cpu == -1 && output_event->ctx != event->ctx)
if (output_event->cpu == -1 && output_event->hw.target != event->hw.target)
goto out;
/*

View File

@ -110,6 +110,14 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
unsigned long i;
int ret;
/*
* Because we write directly to the reserved memory region when loading
* crash kernels we need a serialization here to prevent multiple crash
* kernels from attempting to load simultaneously.
*/
if (!kexec_trylock())
return -EBUSY;
if (flags & KEXEC_ON_CRASH) {
dest_image = &kexec_crash_image;
if (kexec_crash_image)
@ -121,7 +129,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
if (nr_segments == 0) {
/* Uninstall image */
kimage_free(xchg(dest_image, NULL));
return 0;
ret = 0;
goto out_unlock;
}
if (flags & KEXEC_ON_CRASH) {
/*
@ -134,7 +143,7 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
if (ret)
return ret;
goto out_unlock;
if (flags & KEXEC_PRESERVE_CONTEXT)
image->preserve_context = 1;
@ -171,6 +180,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
arch_kexec_protect_crashkres();
kimage_free(image);
out_unlock:
kexec_unlock();
return ret;
}
@ -247,21 +258,8 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
return -EINVAL;
/* Because we write directly to the reserved memory
* region when loading crash kernels we need a mutex here to
* prevent multiple crash kernels from attempting to load
* simultaneously, and to prevent a crash kernel from loading
* over the top of a in use crash kernel.
*
* KISS: always take the mutex.
*/
if (!mutex_trylock(&kexec_mutex))
return -EBUSY;
result = do_kexec_load(entry, nr_segments, segments, flags);
mutex_unlock(&kexec_mutex);
return result;
}
@ -301,21 +299,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
return -EFAULT;
}
/* Because we write directly to the reserved memory
* region when loading crash kernels we need a mutex here to
* prevent multiple crash kernels from attempting to load
* simultaneously, and to prevent a crash kernel from loading
* over the top of a in use crash kernel.
*
* KISS: always take the mutex.
*/
if (!mutex_trylock(&kexec_mutex))
return -EBUSY;
result = do_kexec_load(entry, nr_segments, ksegments, flags);
mutex_unlock(&kexec_mutex);
return result;
}
#endif

View File

@ -45,7 +45,7 @@
#include <crypto/sha.h>
#include "kexec_internal.h"
DEFINE_MUTEX(kexec_mutex);
atomic_t __kexec_lock = ATOMIC_INIT(0);
/* Per cpu memory for storing cpu states in case of system crash. */
note_buf_t __percpu *crash_notes;
@ -943,7 +943,7 @@ int kexec_load_disabled;
*/
void __noclone __crash_kexec(struct pt_regs *regs)
{
/* Take the kexec_mutex here to prevent sys_kexec_load
/* Take the kexec_lock here to prevent sys_kexec_load
* running on one cpu from replacing the crash kernel
* we are using after a panic on a different cpu.
*
@ -951,7 +951,7 @@ void __noclone __crash_kexec(struct pt_regs *regs)
* of memory the xchg(&kexec_crash_image) would be
* sufficient. But since I reuse the memory...
*/
if (mutex_trylock(&kexec_mutex)) {
if (kexec_trylock()) {
if (kexec_crash_image) {
struct pt_regs fixed_regs;
@ -960,7 +960,7 @@ void __noclone __crash_kexec(struct pt_regs *regs)
machine_crash_shutdown(&fixed_regs);
machine_kexec(kexec_crash_image);
}
mutex_unlock(&kexec_mutex);
kexec_unlock();
}
}
STACK_FRAME_NON_STANDARD(__crash_kexec);
@ -989,14 +989,17 @@ void crash_kexec(struct pt_regs *regs)
}
}
size_t crash_get_memory_size(void)
ssize_t crash_get_memory_size(void)
{
size_t size = 0;
ssize_t size = 0;
if (!kexec_trylock())
return -EBUSY;
mutex_lock(&kexec_mutex);
if (crashk_res.end != crashk_res.start)
size = resource_size(&crashk_res);
mutex_unlock(&kexec_mutex);
kexec_unlock();
return size;
}
@ -1016,7 +1019,8 @@ int crash_shrink_memory(unsigned long new_size)
unsigned long old_size;
struct resource *ram_res;
mutex_lock(&kexec_mutex);
if (!kexec_trylock())
return -EBUSY;
if (kexec_crash_image) {
ret = -ENOENT;
@ -1054,7 +1058,7 @@ int crash_shrink_memory(unsigned long new_size)
insert_resource(&iomem_resource, ram_res);
unlock:
mutex_unlock(&kexec_mutex);
kexec_unlock();
return ret;
}
@ -1126,7 +1130,7 @@ int kernel_kexec(void)
{
int error = 0;
if (!mutex_trylock(&kexec_mutex))
if (!kexec_trylock())
return -EBUSY;
if (!kexec_image) {
error = -EINVAL;
@ -1201,7 +1205,7 @@ int kernel_kexec(void)
#endif
Unlock:
mutex_unlock(&kexec_mutex);
kexec_unlock();
return error;
}

View File

@ -343,7 +343,7 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
image = NULL;
if (!mutex_trylock(&kexec_mutex))
if (!kexec_trylock())
return -EBUSY;
dest_image = &kexec_image;
@ -415,7 +415,7 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
if ((flags & KEXEC_FILE_ON_CRASH) && kexec_crash_image)
arch_kexec_protect_crashkres();
mutex_unlock(&kexec_mutex);
kexec_unlock();
kimage_free(image);
return ret;
}

View File

@ -15,7 +15,20 @@ int kimage_is_destination_range(struct kimage *image,
int machine_kexec_post_load(struct kimage *image);
extern struct mutex kexec_mutex;
/*
* Whatever is used to serialize accesses to the kexec_crash_image needs to be
* NMI safe, as __crash_kexec() can happen during nmi_panic(), so here we use a
* "simple" atomic variable that is acquired with a cmpxchg().
*/
extern atomic_t __kexec_lock;
static inline bool kexec_trylock(void)
{
return atomic_cmpxchg_acquire(&__kexec_lock, 0, 1) == 0;
}
static inline void kexec_unlock(void)
{
atomic_set_release(&__kexec_lock, 0);
}
#ifdef CONFIG_KEXEC_FILE
#include <linux/purgatory.h>

View File

@ -106,7 +106,12 @@ KERNEL_ATTR_RO(kexec_crash_loaded);
static ssize_t kexec_crash_size_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%zu\n", crash_get_memory_size());
ssize_t size = crash_get_memory_size();
if (size < 0)
return size;
return sprintf(buf, "%zd\n", size);
}
static ssize_t kexec_crash_size_store(struct kobject *kobj,
struct kobj_attribute *attr,

View File

@ -9481,8 +9481,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) /
local->group_capacity;
sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
sds->total_capacity;
/*
* If the local group is more loaded than the selected
* busiest group don't try to pull any tasks.
@ -9491,6 +9489,19 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
env->imbalance = 0;
return;
}
sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
sds->total_capacity;
/*
* If the local group is more loaded than the average system
* load, don't try to pull any tasks.
*/
if (local->avg_load >= sds->avg_load) {
env->imbalance = 0;
return;
}
}
/*

View File

@ -1064,6 +1064,65 @@ int proc_douintvec_minmax(struct ctl_table *table, int write,
do_proc_douintvec_minmax_conv, &param);
}
/**
* proc_dou8vec_minmax - read a vector of unsigned chars with min/max values
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
*
* Reads/writes up to table->maxlen/sizeof(u8) unsigned chars
* values from/to the user buffer, treated as an ASCII string. Negative
* strings are not allowed.
*
* This routine will ensure the values are within the range specified by
* table->extra1 (min) and table->extra2 (max).
*
* Returns 0 on success or an error on write when the range check fails.
*/
int proc_dou8vec_minmax(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table tmp;
unsigned int min = 0, max = 255U, val;
u8 *data = table->data;
struct do_proc_douintvec_minmax_conv_param param = {
.min = &min,
.max = &max,
};
int res;
/* Do not support arrays yet. */
if (table->maxlen != sizeof(u8))
return -EINVAL;
if (table->extra1) {
min = *(unsigned int *) table->extra1;
if (min > 255U)
return -EINVAL;
}
if (table->extra2) {
max = *(unsigned int *) table->extra2;
if (max > 255U)
return -EINVAL;
}
tmp = *table;
tmp.maxlen = sizeof(val);
tmp.data = &val;
val = READ_ONCE(*data);
res = do_proc_douintvec(&tmp, write, buffer, lenp, ppos,
do_proc_douintvec_minmax_conv, &param);
if (res)
return res;
if (write)
WRITE_ONCE(*data, val);
return 0;
}
EXPORT_SYMBOL_GPL(proc_dou8vec_minmax);
static int do_proc_dopipe_max_size_conv(unsigned long *lvalp,
unsigned int *valp,
int write, void *data)
@ -1615,6 +1674,12 @@ int proc_douintvec_minmax(struct ctl_table *table, int write,
return -ENOSYS;
}
int proc_dou8vec_minmax(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_jiffies(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{

View File

@ -5390,12 +5390,15 @@ int modify_ftrace_direct(unsigned long ip,
ret = 0;
}
if (unlikely(ret && new_direct)) {
direct->count++;
list_del_rcu(&new_direct->next);
synchronize_rcu_tasks();
kfree(new_direct);
ftrace_direct_func_count--;
if (ret) {
direct->addr = old_addr;
if (unlikely(new_direct)) {
direct->count++;
list_del_rcu(&new_direct->next);
synchronize_rcu_tasks();
kfree(new_direct);
ftrace_direct_func_count--;
}
}
out_unlock:

View File

@ -2962,6 +2962,10 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
if (RB_WARN_ON(cpu_buffer,
rb_is_reader_page(cpu_buffer->tail_page)))
return;
/*
* No need for a memory barrier here, as the update
* of the tail_page did it for this page.
*/
local_set(&cpu_buffer->commit_page->page->commit,
rb_page_write(cpu_buffer->commit_page));
rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
@ -2971,6 +2975,8 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
while (rb_commit_index(cpu_buffer) !=
rb_page_write(cpu_buffer->commit_page)) {
/* Make sure the readers see the content of what is committed. */
smp_wmb();
local_set(&cpu_buffer->commit_page->page->commit,
rb_page_write(cpu_buffer->commit_page));
RB_WARN_ON(cpu_buffer,
@ -4390,7 +4396,12 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
/*
* Make sure we see any padding after the write update
* (see rb_reset_tail())
* (see rb_reset_tail()).
*
* In addition, a writer may be writing on the reader page
* if the page has not been fully filled, so the read barrier
* is also needed to make sure we see the content of what is
* committed by the writer (see rb_set_commit_to_write()).
*/
smp_rmb();

View File

@ -8896,6 +8896,7 @@ static int __remove_instance(struct trace_array *tr)
ftrace_destroy_function_files(tr);
tracefs_remove(tr->dir);
free_trace_buffers(tr);
clear_tracing_err_log(tr);
for (i = 0; i < tr->nr_topts; i++) {
kfree(tr->topts[i].topts);

View File

@ -668,6 +668,7 @@ static void __del_from_avail_list(struct swap_info_struct *p)
{
int nid;
assert_spin_locked(&p->lock);
for_each_node(nid)
plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
}
@ -2630,8 +2631,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
spin_unlock(&swap_lock);
goto out_dput;
}
del_from_avail_list(p);
spin_lock(&p->lock);
del_from_avail_list(p);
if (p->prio < 0) {
struct swap_info_struct *si = p;
int nid;

Some files were not shown because too many files have changed in this diff Show More