This is the 5.10.62 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmEx2IsACgkQONu9yGCS
 aT6bihAA1pqL2x40IIk7nmu3XW/lNJxt4WLRjT7kGehDYE5gNz5VTQDaccb/+PiM
 18XjLN0RcSkhiai8410A/SNoHBvPSMWrNvf093NpFcGm0WcvsZVyNUfRgdXfs1N8
 FM1DAgA5eVowL+CthZyJIIRX6VllLAg1b3Z61frNO/t1nOUsS26enoxMSeaziqjz
 BuvajftaRGtJBMA3iNB1iKR3EEHRqfuWMT4swfQGDshdHlzXbdDOs0KV4bTYRo+f
 76n1bvftiMH4vRONIR3T+ZeJWeaL6IfW01v7qYx+06hBa6TtkwDc+Ofw5zxWFtj1
 7abou5U46gHIp4Ce8ANJzmnvd7iOssFk7+6y8Z7lZgUQCdwHFLP70VX9Jd3bRlsK
 UIwG0qiIG0jdLgUvjgritDWIcxuSniWIbecLr1xfHIzJAIvVr6bjRNyOz3gXgaa/
 h16oHZRhprC3+4baUQPjPiSb54z+y0Xrx8x9ZSvgHe4gQmctjzoKMVr9Fy7NSpQv
 MVVNi9GvqRI+o+Tu7/N3yxShdXvuRvOOFgeOcyvA7BigrxMb+LvOsPcISFIBva6a
 6+vKuRY79TMwEbDJkURnSNqkVyvMuqKhH5OGiKVHkQrp3AkltpDyDuap+6i2oar4
 ba20HaVstcSdCfx8iCmiy/l0g16vOyNC8cVVdB4WoX1fU6aiv9Y=
 =g+cC
 -----END PGP SIGNATURE-----

Merge 5.10.62 into android12-5.10-lts

Changes in 5.10.62
	net: qrtr: fix another OOB Read in qrtr_endpoint_post
	bpf: Fix ringbuf helper function compatibility
	bpf: Fix NULL pointer dereference in bpf_get_local_storage() helper
	ASoC: rt5682: Adjust headset volume button threshold
	ASoC: component: Remove misplaced prefix handling in pin control functions
	ARC: Fix CONFIG_STACKDEPOT
	netfilter: conntrack: collect all entries in one cycle
	once: Fix panic when module unload
	blk-iocost: fix lockdep warning on blkcg->lock
	ovl: fix uninitialized pointer read in ovl_lookup_real_one()
	net: mscc: Fix non-GPL export of regmap APIs
	can: usb: esd_usb2: esd_usb2_rx_event(): fix the interchange of the CAN RX and TX error counters
	ceph: correctly handle releasing an embedded cap flush
	riscv: Ensure the value of FP registers in the core dump file is up to date
	Revert "btrfs: compression: don't try to compress if we don't have enough pages"
	drm/amdgpu: Cancel delayed work when GFXOFF is disabled
	Revert "USB: serial: ch341: fix character loss at high transfer rates"
	USB: serial: option: add new VID/PID to support Fibocom FG150
	usb: renesas-xhci: Prefer firmware loading on unknown ROM state
	usb: dwc3: gadget: Fix dwc3_calc_trbs_left()
	usb: dwc3: gadget: Stop EP0 transfers during pullup disable
	scsi: core: Fix hang of freezing queue between blocking and running device
	RDMA/bnxt_re: Add missing spin lock initialization
	IB/hfi1: Fix possible null-pointer dereference in _extend_sdma_tx_descs()
	RDMA/bnxt_re: Remove unpaired rtnl unlock in bnxt_re_dev_init()
	ice: do not abort devlink info if board identifier can't be found
	net: usb: pegasus: fixes of set_register(s) return value evaluation;
	igc: fix page fault when thunderbolt is unplugged
	igc: Use num_tx_queues when iterating over tx_ring queue
	e1000e: Fix the max snoop/no-snoop latency for 10M
	e1000e: Do not take care about recovery NVM checksum
	RDMA/efa: Free IRQ vectors on error flow
	ip_gre: add validation for csum_start
	xgene-v2: Fix a resource leak in the error handling path of 'xge_probe()'
	net: marvell: fix MVNETA_TX_IN_PRGRS bit number
	ucounts: Increase ucounts reference counter before the security hook
	net/sched: ets: fix crash when flipping from 'strict' to 'quantum'
	ipv6: use siphash in rt6_exception_hash()
	ipv4: use siphash instead of Jenkins in fnhe_hashfun()
	cxgb4: dont touch blocked freelist bitmap after free
	rtnetlink: Return correct error on changing device netns
	net: hns3: clear hardware resource when loading driver
	net: hns3: add waiting time before cmdq memory is released
	net: hns3: fix duplicate node in VLAN list
	net: hns3: fix get wrong pfc_en when query PFC configuration
	Revert "mmc: sdhci-iproc: Set SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN on BCM2711"
	net: stmmac: add mutex lock to protect est parameters
	net: stmmac: fix kernel panic due to NULL pointer dereference of plat->est
	drm/i915: Fix syncmap memory leak
	usb: gadget: u_audio: fix race condition on endpoint stop
	dt-bindings: sifive-l2-cache: Fix 'select' matching
	perf/x86/intel/uncore: Fix integer overflow on 23 bit left shift of a u32
	clk: renesas: rcar-usb2-clock-sel: Fix kernel NULL pointer dereference
	iwlwifi: pnvm: accept multiple HW-type TLVs
	opp: remove WARN when no valid OPPs remain
	cpufreq: blocklist Qualcomm sm8150 in cpufreq-dt-platdev
	virtio: Improve vq->broken access to avoid any compiler optimization
	virtio_pci: Support surprise removal of virtio pci device
	virtio_vdpa: reject invalid vq indices
	vringh: Use wiov->used to check for read/write desc order
	tools/virtio: fix build
	qed: qed ll2 race condition fixes
	qed: Fix null-pointer dereference in qed_rdma_create_qp()
	Revert "drm/amd/pm: fix workload mismatch on vega10"
	drm/amd/pm: change the workload type for some cards
	blk-mq: don't grab rq's refcount in blk_mq_check_expired()
	drm: Copy drm_wait_vblank to user before returning
	drm/nouveau/disp: power down unused DP links during init
	drm/nouveau/kms/nv50: workaround EFI GOP window channel format differences
	net/rds: dma_map_sg is entitled to merge entries
	btrfs: fix race between marking inode needs to be logged and log syncing
	pipe: avoid unnecessary EPOLLET wakeups under normal loads
	pipe: do FASYNC notifications for every pipe IO, not just state changes
	mtd: spinand: Fix incorrect parameters for on-die ECC
	tipc: call tipc_wait_for_connect only when dlen is not 0
	vt_kdsetmode: extend console locking
	Bluetooth: btusb: check conditions before enabling USB ALT 3 for WBS
	riscv: Fixup wrong ftrace remove cflag
	riscv: Fixup patch_text panic in ftrace
	perf env: Fix memory leak of bpf_prog_info_linear member
	perf symbol-elf: Fix memory leak by freeing sdt_note.args
	perf record: Fix memory leak in vDSO found using ASAN
	perf tools: Fix arm64 build error with gcc-11
	perf annotate: Fix jump parsing for C++ code.
	powerpc/perf: Invoke per-CPU variable access with disabled interrupts
	srcu: Provide internal interface to start a Tree SRCU grace period
	srcu: Provide polling interfaces for Tree SRCU grace periods
	srcu: Provide internal interface to start a Tiny SRCU grace period
	srcu: Make Tiny SRCU use multi-bit grace-period counter
	srcu: Provide polling interfaces for Tiny SRCU grace periods
	tracepoint: Use rcu get state and cond sync for static call updates
	usb: typec: ucsi: acpi: Always decode connector change information
	usb: typec: ucsi: Work around PPM losing change information
	usb: typec: ucsi: Clear pending after acking connector change
	net: dsa: mt7530: fix VLAN traffic leaks again
	lkdtm: Enable DOUBLE_FAULT on all architectures
	arm64: dts: qcom: msm8994-angler: Fix gpio-reserved-ranges 85-88
	btrfs: fix NULL pointer dereference when deleting device by invalid id
	kthread: Fix PF_KTHREAD vs to_kthread() race
	Revert "floppy: reintroduce O_NDELAY fix"
	Revert "parisc: Add assembly implementations for memset, strlen, strcpy, strncpy and strcat"
	net: don't unconditionally copy_from_user a struct ifreq for socket ioctls
	audit: move put_tree() to avoid trim_trees refcount underflow and UAF
	bpf: Fix potentially incorrect results with bpf_get_local_storage()
	Linux 5.10.62

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I5a9bf4b2c254ae21a10f838494cae1c3fa016be3
This commit is contained in:
Greg Kroah-Hartman 2021-09-03 10:51:56 +02:00
commit 674d2ac211
133 changed files with 1107 additions and 626 deletions

View File

@ -24,9 +24,9 @@ allOf:
select:
properties:
compatible:
items:
- enum:
- sifive,fu540-c000-ccache
contains:
enum:
- sifive,fu540-c000-ccache
required:
- compatible

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 61
SUBLEVEL = 62
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -88,6 +88,8 @@ SECTIONS
CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.fixup)
*(.gnu.warning)
}

View File

@ -32,3 +32,7 @@ serial@f991e000 {
};
};
};
&tlmm {
gpio-reserved-ranges = <85 4>;
};

View File

@ -8,19 +8,4 @@ extern void * memset(void *, int, size_t);
#define __HAVE_ARCH_MEMCPY
void * memcpy(void * dest,const void *src,size_t count);
#define __HAVE_ARCH_STRLEN
extern size_t strlen(const char *s);
#define __HAVE_ARCH_STRCPY
extern char *strcpy(char *dest, const char *src);
#define __HAVE_ARCH_STRNCPY
extern char *strncpy(char *dest, const char *src, size_t count);
#define __HAVE_ARCH_STRCAT
extern char *strcat(char *dest, const char *src);
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, size_t);
#endif

View File

@ -17,10 +17,6 @@
#include <linux/string.h>
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
#include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8);

View File

@ -3,7 +3,7 @@
# Makefile for parisc-specific library files
#
lib-y := lusercopy.o bitops.o checksum.o io.o memcpy.o \
ucmpdi2.o delay.o string.o
lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
ucmpdi2.o delay.o
obj-y := iomap.o

72
arch/parisc/lib/memset.c Normal file
View File

@ -0,0 +1,72 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <linux/types.h>
#include <asm/string.h>
#define OPSIZ (BITS_PER_LONG/8)
typedef unsigned long op_t;
void *
memset (void *dstpp, int sc, size_t len)
{
unsigned int c = sc;
long int dstp = (long int) dstpp;
if (len >= 8)
{
size_t xlen;
op_t cccc;
cccc = (unsigned char) c;
cccc |= cccc << 8;
cccc |= cccc << 16;
if (OPSIZ > 4)
/* Do the shift in two steps to avoid warning if long has 32 bits. */
cccc |= (cccc << 16) << 16;
/* There are at least some bytes to set.
No need to test for LEN == 0 in this alignment loop. */
while (dstp % OPSIZ != 0)
{
((unsigned char *) dstp)[0] = c;
dstp += 1;
len -= 1;
}
/* Write 8 `op_t' per iteration until less than 8 `op_t' remain. */
xlen = len / (OPSIZ * 8);
while (xlen > 0)
{
((op_t *) dstp)[0] = cccc;
((op_t *) dstp)[1] = cccc;
((op_t *) dstp)[2] = cccc;
((op_t *) dstp)[3] = cccc;
((op_t *) dstp)[4] = cccc;
((op_t *) dstp)[5] = cccc;
((op_t *) dstp)[6] = cccc;
((op_t *) dstp)[7] = cccc;
dstp += 8 * OPSIZ;
xlen -= 1;
}
len %= OPSIZ * 8;
/* Write 1 `op_t' per iteration until less than OPSIZ bytes remain. */
xlen = len / OPSIZ;
while (xlen > 0)
{
((op_t *) dstp)[0] = cccc;
dstp += OPSIZ;
xlen -= 1;
}
len %= OPSIZ;
}
/* Write the last few bytes. */
while (len > 0)
{
((unsigned char *) dstp)[0] = c;
dstp += 1;
len -= 1;
}
return dstpp;
}

View File

@ -1,136 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* PA-RISC assembly string functions
*
* Copyright (C) 2019 Helge Deller <deller@gmx.de>
*/
#include <asm/assembly.h>
#include <linux/linkage.h>
.section .text.hot
.level PA_ASM_LEVEL
t0 = r20
t1 = r21
t2 = r22
ENTRY_CFI(strlen, frame=0,no_calls)
or,COND(<>) arg0,r0,ret0
b,l,n .Lstrlen_null_ptr,r0
depwi 0,31,2,ret0
cmpb,COND(<>) arg0,ret0,.Lstrlen_not_aligned
ldw,ma 4(ret0),t0
cmpib,tr 0,r0,.Lstrlen_loop
uxor,nbz r0,t0,r0
.Lstrlen_not_aligned:
uaddcm arg0,ret0,t1
shladd t1,3,r0,t1
mtsar t1
depwi -1,%sar,32,t0
uxor,nbz r0,t0,r0
.Lstrlen_loop:
b,l,n .Lstrlen_end_loop,r0
ldw,ma 4(ret0),t0
cmpib,tr 0,r0,.Lstrlen_loop
uxor,nbz r0,t0,r0
.Lstrlen_end_loop:
extrw,u,<> t0,7,8,r0
addib,tr,n -3,ret0,.Lstrlen_out
extrw,u,<> t0,15,8,r0
addib,tr,n -2,ret0,.Lstrlen_out
extrw,u,<> t0,23,8,r0
addi -1,ret0,ret0
.Lstrlen_out:
bv r0(rp)
uaddcm ret0,arg0,ret0
.Lstrlen_null_ptr:
bv,n r0(rp)
ENDPROC_CFI(strlen)
ENTRY_CFI(strcpy, frame=0,no_calls)
ldb 0(arg1),t0
stb t0,0(arg0)
ldo 0(arg0),ret0
ldo 1(arg1),t1
cmpb,= r0,t0,2f
ldo 1(arg0),t2
1: ldb 0(t1),arg1
stb arg1,0(t2)
ldo 1(t1),t1
cmpb,<> r0,arg1,1b
ldo 1(t2),t2
2: bv,n r0(rp)
ENDPROC_CFI(strcpy)
ENTRY_CFI(strncpy, frame=0,no_calls)
ldb 0(arg1),t0
stb t0,0(arg0)
ldo 1(arg1),t1
ldo 0(arg0),ret0
cmpb,= r0,t0,2f
ldo 1(arg0),arg1
1: ldo -1(arg2),arg2
cmpb,COND(=),n r0,arg2,2f
ldb 0(t1),arg0
stb arg0,0(arg1)
ldo 1(t1),t1
cmpb,<> r0,arg0,1b
ldo 1(arg1),arg1
2: bv,n r0(rp)
ENDPROC_CFI(strncpy)
ENTRY_CFI(strcat, frame=0,no_calls)
ldb 0(arg0),t0
cmpb,= t0,r0,2f
ldo 0(arg0),ret0
ldo 1(arg0),arg0
1: ldb 0(arg0),t1
cmpb,<>,n r0,t1,1b
ldo 1(arg0),arg0
2: ldb 0(arg1),t2
stb t2,0(arg0)
ldo 1(arg0),arg0
ldb 0(arg1),t0
cmpb,<> r0,t0,2b
ldo 1(arg1),arg1
bv,n r0(rp)
ENDPROC_CFI(strcat)
ENTRY_CFI(memset, frame=0,no_calls)
copy arg0,ret0
cmpb,COND(=) r0,arg0,4f
copy arg0,t2
cmpb,COND(=) r0,arg2,4f
ldo -1(arg2),arg3
subi -1,arg3,t0
subi 0,t0,t1
cmpiclr,COND(>=) 0,t1,arg2
ldo -1(t1),arg2
extru arg2,31,2,arg0
2: stb arg1,0(t2)
ldo 1(t2),t2
addib,>= -1,arg0,2b
ldo -1(arg3),arg3
cmpiclr,COND(<=) 4,arg2,r0
b,l,n 4f,r0
#ifdef CONFIG_64BIT
depd,* r0,63,2,arg2
#else
depw r0,31,2,arg2
#endif
ldo 1(t2),t2
3: stb arg1,-1(t2)
stb arg1,0(t2)
stb arg1,1(t2)
stb arg1,2(t2)
addib,COND(>) -4,arg2,3b
ldo 4(t2),t2
4: bv,n r0(rp)
ENDPROC_CFI(memset)
.end

View File

@ -1884,7 +1884,7 @@ static bool is_event_blacklisted(u64 ev)
static int power_pmu_event_init(struct perf_event *event)
{
u64 ev;
unsigned long flags;
unsigned long flags, irq_flags;
struct perf_event *ctrs[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int cflags[MAX_HWEVENTS];
@ -1992,7 +1992,9 @@ static int power_pmu_event_init(struct perf_event *event)
if (check_excludes(ctrs, cflags, n, 1))
return -EINVAL;
cpuhw = &get_cpu_var(cpu_hw_events);
local_irq_save(irq_flags);
cpuhw = this_cpu_ptr(&cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
if (has_branch_stack(event)) {
@ -2003,13 +2005,13 @@ static int power_pmu_event_init(struct perf_event *event)
event->attr.branch_sample_type);
if (bhrb_filter == -1) {
put_cpu_var(cpu_hw_events);
local_irq_restore(irq_flags);
return -EOPNOTSUPP;
}
cpuhw->bhrb_filter = bhrb_filter;
}
put_cpu_var(cpu_hw_events);
local_irq_restore(irq_flags);
if (err)
return -EINVAL;

View File

@ -4,8 +4,9 @@
#
ifdef CONFIG_FTRACE
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_patch.o = -pg
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
endif
extra-y += head.o

View File

@ -10,6 +10,7 @@
#include <asm/ptrace.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
#include <asm/switch_to.h>
#include <linux/audit.h>
#include <linux/ptrace.h>
#include <linux/elf.h>
@ -56,6 +57,9 @@ static int riscv_fpr_get(struct task_struct *target,
{
struct __riscv_d_ext_state *fstate = &target->thread.fstate;
if (target == current)
fstate_save(current, task_pt_regs(current));
membuf_write(&to, fstate, offsetof(struct __riscv_d_ext_state, fcsr));
membuf_store(&to, fstate->fcsr);
return membuf_zero(&to, 4); // explicitly pad

View File

@ -2,7 +2,8 @@
CFLAGS_init.o := -mcmodel=medany
ifdef CONFIG_FTRACE
CFLAGS_REMOVE_init.o = -pg
CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_cacheflush.o = $(CC_FLAGS_FTRACE)
endif
KCOV_INSTRUMENT_init.o := n

View File

@ -4669,7 +4669,7 @@ static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
return;
pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
pci_read_config_dword(pdev, mem_offset, &pci_dword);
addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;

View File

@ -3039,19 +3039,19 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
return -EINVAL;
spin_lock(&blkcg->lock);
spin_lock_irq(&blkcg->lock);
iocc->dfl_weight = v * WEIGHT_ONE;
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
struct ioc_gq *iocg = blkg_to_iocg(blkg);
if (iocg) {
spin_lock_irq(&iocg->ioc->lock);
spin_lock(&iocg->ioc->lock);
ioc_now(iocg->ioc, &now);
weight_updated(iocg, &now);
spin_unlock_irq(&iocg->ioc->lock);
spin_unlock(&iocg->ioc->lock);
}
}
spin_unlock(&blkcg->lock);
spin_unlock_irq(&blkcg->lock);
return nbytes;
}

View File

@ -945,34 +945,14 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
unsigned long *next = priv;
/*
* Just do a quick check if it is expired before locking the request in
* so we're not unnecessarilly synchronizing across CPUs.
*/
if (!blk_mq_req_expired(rq, next))
return true;
/*
* We have reason to believe the request may be expired. Take a
* reference on the request to lock this request lifetime into its
* currently allocated context to prevent it from being reallocated in
* the event the completion by-passes this timeout handler.
*
* If the reference was already released, then the driver beat the
* timeout handler to posting a natural completion.
*/
if (!refcount_inc_not_zero(&rq->ref))
return true;
/*
* The request is now locked and cannot be reallocated underneath the
* timeout handler's processing. Re-verify this exact request is truly
* expired; if it is not expired, then the request was completed and
* reallocated as a new request.
* blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
* be reallocated underneath the timeout handler's processing, then
* the expire check is reliable. If the request is not expired, then
* it was completed and reallocated as a new request after returning
* from blk_mq_check_expired().
*/
if (blk_mq_req_expired(rq, next))
blk_mq_rq_timed_out(rq, reserved);
blk_mq_put_rq_ref(rq);
return true;
}

View File

@ -4120,23 +4120,23 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (fdc_state[FDC(drive)].rawcmd == 1)
fdc_state[FDC(drive)].rawcmd = 2;
if (mode & (FMODE_READ|FMODE_WRITE)) {
drive_state[drive].last_checked = 0;
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags);
if (bdev_check_media_change(bdev))
floppy_revalidate(bdev->bd_disk);
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
goto out;
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
if (!(mode & FMODE_NDELAY)) {
if (mode & (FMODE_READ|FMODE_WRITE)) {
drive_state[drive].last_checked = 0;
clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
&drive_state[drive].flags);
if (bdev_check_media_change(bdev))
floppy_revalidate(bdev->bd_disk);
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
goto out;
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
goto out;
}
res = -EROFS;
if ((mode & FMODE_WRITE) &&
!test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
goto out;
}
res = -EROFS;
if ((mode & FMODE_WRITE) &&
!test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
goto out;
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;

View File

@ -486,6 +486,7 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
#define BTUSB_HW_RESET_ACTIVE 12
#define BTUSB_TX_WAIT_VND_EVT 13
#define BTUSB_WAKEUP_DISABLE 14
#define BTUSB_USE_ALT3_FOR_WBS 15
struct btusb_data {
struct hci_dev *hdev;
@ -1718,16 +1719,20 @@ static void btusb_work(struct work_struct *work)
/* Bluetooth USB spec recommends alt 6 (63 bytes), but
* many adapters do not support it. Alt 1 appears to
* work for all adapters that do not have alt 6, and
* which work with WBS at all.
* which work with WBS at all. Some devices prefer
* alt 3 (HCI payload >= 60 Bytes let air packet
* data satisfy 60 bytes), requiring
* MTU >= 3 (packets) * 25 (size) - 3 (headers) = 72
* see also Core spec 5, vol 4, B 2.1.1 & Table 2.1.
*/
new_alts = btusb_find_altsetting(data, 6) ? 6 : 1;
/* Because mSBC frames do not need to be aligned to the
* SCO packet boundary. If support the Alt 3, use the
* Alt 3 for HCI payload >= 60 Bytes let air packet
* data satisfy 60 bytes.
*/
if (new_alts == 1 && btusb_find_altsetting(data, 3))
if (btusb_find_altsetting(data, 6))
new_alts = 6;
else if (btusb_find_altsetting(data, 3) &&
hdev->sco_mtu >= 72 &&
test_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags))
new_alts = 3;
else
new_alts = 1;
}
if (btusb_switch_alt_setting(hdev, new_alts) < 0)
@ -4170,6 +4175,7 @@ static int btusb_probe(struct usb_interface *intf,
* (DEVICE_REMOTE_WAKEUP)
*/
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
set_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags);
}
if (!reset)

View File

@ -190,7 +190,7 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
init.num_parents = 0;
priv->hw.init = &init;
ret = devm_clk_hw_register(NULL, &priv->hw);
ret = devm_clk_hw_register(dev, &priv->hw);
if (ret)
goto pm_put;

View File

@ -136,6 +136,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "qcom,qcs404", },
{ .compatible = "qcom,sc7180", },
{ .compatible = "qcom,sdm845", },
{ .compatible = "qcom,sm8150", },
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },

View File

@ -2619,12 +2619,11 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
mutex_lock(&adev->gfx.gfx_off_mutex);
if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
adev->gfx.gfx_off_state = true;
}
mutex_unlock(&adev->gfx.gfx_off_mutex);
WARN_ON_ONCE(adev->gfx.gfx_off_state);
WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
adev->gfx.gfx_off_state = true;
}
/**

View File

@ -556,24 +556,38 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
mutex_lock(&adev->gfx.gfx_off_mutex);
if (!enable)
adev->gfx.gfx_off_req_count++;
else if (adev->gfx.gfx_off_req_count > 0)
if (enable) {
/* If the count is already 0, it means there's an imbalance bug somewhere.
* Note that the bug may be in a different caller than the one which triggers the
* WARN_ON_ONCE.
*/
if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
goto unlock;
adev->gfx.gfx_off_req_count--;
if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
} else if (!enable && adev->gfx.gfx_off_state) {
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
adev->gfx.gfx_off_state = false;
if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
} else {
if (adev->gfx.gfx_off_req_count == 0) {
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
if (adev->gfx.funcs->init_spm_golden) {
dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
amdgpu_gfx_init_spm_golden(adev);
if (adev->gfx.gfx_off_state &&
!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
adev->gfx.gfx_off_state = false;
if (adev->gfx.funcs->init_spm_golden) {
dev_dbg(adev->dev,
"GFXOFF is disabled, re-init SPM golden settings\n");
amdgpu_gfx_init_spm_golden(adev);
}
}
}
adev->gfx.gfx_off_req_count++;
}
unlock:
mutex_unlock(&adev->gfx.gfx_off_mutex);
}

View File

@ -5122,6 +5122,13 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
return size;
}
static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
return (adev->pdev->device == 0x6860);
}
static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
{
struct vega10_hwmgr *data = hwmgr->backend;
@ -5158,9 +5165,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
}
out:
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
if (vega10_get_power_profile_mode_quirks(hwmgr))
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
1 << power_profile_mode,
NULL);
else
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
(!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
NULL);
hwmgr->power_profile_mode = power_profile_mode;
return 0;

View File

@ -865,8 +865,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
if (err)
return err;
req32.reply.type = req.reply.type;
req32.reply.sequence = req.reply.sequence;
@ -875,7 +873,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
return 0;
return err;
}
#if defined(CONFIG_X86)

View File

@ -296,6 +296,14 @@ static void intel_timeline_fini(struct intel_timeline *timeline)
i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
i915_vma_put(timeline->hwsp_ggtt);
/*
* A small race exists between intel_gt_retire_requests_timeout and
* intel_timeline_exit which could result in the syncmap not getting
* free'd. Rather than work to hard to seal this race, simply cleanup
* the syncmap on fini.
*/
i915_syncmap_free(&timeline->sync);
}
struct intel_timeline *

View File

@ -2202,6 +2202,33 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
}
/* Finish updating head(s)...
*
* NVD is rather picky about both where window assignments can change,
* *and* about certain core and window channel states matching.
*
* The EFI GOP driver on newer GPUs configures window channels with a
* different output format to what we do, and the core channel update
* in the assign_windows case above would result in a state mismatch.
*
* Delay some of the head update until after that point to workaround
* the issue. This only affects the initial modeset.
*
* TODO: handle this better when adding flexible window mapping
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_head *head = nv50_head(crtc);
NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
asyh->set.mask, asyh->clr.mask);
if (asyh->set.mask) {
nv50_head_flush_set_wndw(head, asyh);
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
}
}
/* Update plane(s). */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);

View File

@ -49,11 +49,8 @@ nv50_head_flush_clr(struct nv50_head *head,
}
void
nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
{
if (asyh->set.view ) head->func->view (head, asyh);
if (asyh->set.mode ) head->func->mode (head, asyh);
if (asyh->set.core ) head->func->core_set(head, asyh);
if (asyh->set.olut ) {
asyh->olut.offset = nv50_lut_load(&head->olut,
asyh->olut.buffer,
@ -61,6 +58,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
asyh->olut.load);
head->func->olut_set(head, asyh);
}
}
void
nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
if (asyh->set.view ) head->func->view (head, asyh);
if (asyh->set.mode ) head->func->mode (head, asyh);
if (asyh->set.core ) head->func->core_set(head, asyh);
if (asyh->set.curs ) head->func->curs_set(head, asyh);
if (asyh->set.base ) head->func->base (head, asyh);
if (asyh->set.ovly ) head->func->ovly (head, asyh);

View File

@ -21,6 +21,7 @@ struct nv50_head {
struct nv50_head *nv50_head_create(struct drm_device *, int index);
void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh);
void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh);
void nv50_head_flush_clr(struct nv50_head *head,
struct nv50_head_atom *asyh, bool flush);

View File

@ -419,7 +419,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
return ret;
}
static void
void
nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
{
struct nvkm_dp *dp = nvkm_dp(outp);

View File

@ -32,6 +32,7 @@ struct nvkm_dp {
int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
struct nvkm_outp **);
void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
/* DPCD Receiver Capabilities */
#define DPCD_RC00_DPCD_REV 0x00000

View File

@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "outp.h"
#include "dp.h"
#include "ior.h"
#include <subdev/bios.h>
@ -257,6 +258,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
if (!ior->arm.head || ior->arm.proto != proto) {
OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
ior->arm.proto, proto);
/* The EFI GOP driver on Ampere can leave unused DP links routed,
* which we don't expect. The DisableLT IED script *should* get
* us back to where we need to be.
*/
if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
nvkm_dp_disable(outp, ior);
return;
}

View File

@ -1691,6 +1691,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
if (nq)
nq->budget++;
atomic_inc(&rdev->srq_count);
spin_lock_init(&srq->lock);
return 0;

View File

@ -1410,7 +1410,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
memset(&rattr, 0, sizeof(rattr));
rc = bnxt_re_register_netdev(rdev);
if (rc) {
rtnl_unlock();
ibdev_err(&rdev->ibdev,
"Failed to register with netedev: %#x\n", rc);
return -EINVAL;

View File

@ -377,6 +377,7 @@ static int efa_enable_msix(struct efa_dev *dev)
}
if (irq_num != msix_vecs) {
efa_disable_msix(dev);
dev_err(&dev->pdev->dev,
"Allocated %d MSI-X (out of %d requested)\n",
irq_num, msix_vecs);

View File

@ -3055,6 +3055,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
{
int i;
struct sdma_desc *descp;
/* Handle last descriptor */
if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
@ -3075,12 +3076,10 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
if (unlikely(tx->num_desc == MAX_DESC))
goto enomem;
tx->descp = kmalloc_array(
MAX_DESC,
sizeof(struct sdma_desc),
GFP_ATOMIC);
if (!tx->descp)
descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC);
if (!descp)
goto enomem;
tx->descp = descp;
/* reserve last descriptor for coalescing */
tx->desc_limit = MAX_DESC - 1;

View File

@ -173,9 +173,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(USERCOPY_KERNEL),
CRASHTYPE(STACKLEAK_ERASING),
CRASHTYPE(CFI_FORWARD_PROTO),
#ifdef CONFIG_X86_32
CRASHTYPE(DOUBLE_FAULT),
#endif
};

View File

@ -295,8 +295,7 @@ static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
};
static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.ops = &sdhci_iproc_bcm2711_ops,
};

View File

@ -419,7 +419,7 @@ static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
* fixed, so let's return the maximum possible value so that
* wear-leveling layers move the data immediately.
*/
return nanddev_get_ecc_conf(nand)->strength;
return nanddev_get_ecc_requirements(nand)->strength;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
@ -1090,8 +1090,8 @@ static int spinand_init(struct spinand_device *spinand)
mtd->oobavail = ret;
/* Propagate ECC information to mtd_info */
mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
mtd->ecc_strength = nanddev_get_ecc_requirements(nand)->strength;
mtd->ecc_step_size = nanddev_get_ecc_requirements(nand)->step_size;
return 0;

View File

@ -84,11 +84,11 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
* data around if it's not necessary.
*/
if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr))
return nanddev_get_ecc_conf(nand)->strength;
return nanddev_get_ecc_requirements(nand)->strength;
if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength ||
if (WARN_ON(eccsr > nanddev_get_ecc_requirements(nand)->strength ||
!eccsr))
return nanddev_get_ecc_conf(nand)->strength;
return nanddev_get_ecc_requirements(nand)->strength;
return eccsr;

View File

@ -90,12 +90,12 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
* data around if it's not necessary.
*/
if (spi_mem_exec_op(spinand->spimem, &op))
return nanddev_get_ecc_conf(nand)->strength;
return nanddev_get_ecc_requirements(nand)->strength;
mbf >>= 4;
if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf))
return nanddev_get_ecc_conf(nand)->strength;
if (WARN_ON(mbf > nanddev_get_ecc_requirements(nand)->strength || !mbf))
return nanddev_get_ecc_requirements(nand)->strength;
return mbf;

View File

@ -224,8 +224,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
if (id == ESD_EV_CAN_ERROR_EXT) {
u8 state = msg->msg.rx.data[0];
u8 ecc = msg->msg.rx.data[1];
u8 txerr = msg->msg.rx.data[2];
u8 rxerr = msg->msg.rx.data[3];
u8 rxerr = msg->msg.rx.data[2];
u8 txerr = msg->msg.rx.data[3];
skb = alloc_can_err_skb(priv->netdev, &cf);
if (skb == NULL) {

View File

@ -1161,11 +1161,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
/* Remove this port from the port matrix of the other ports
* in the same bridge. If the port is disabled, port matrix
* is kept and not being setup until the port becomes enabled.
* And the other port's port matrix cannot be broken when the
* other port is still a VLAN-aware port.
*/
if (dsa_is_user_port(ds, i) && i != port &&
!dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
if (dsa_is_user_port(ds, i) && i != port) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)

View File

@ -677,11 +677,13 @@ static int xge_probe(struct platform_device *pdev)
ret = register_netdev(ndev);
if (ret) {
netdev_err(ndev, "Failed to register netdev\n");
goto err;
goto err_mdio_remove;
}
return 0;
err_mdio_remove:
xge_mdio_remove(ndev);
err:
free_netdev(ndev);

View File

@ -5072,6 +5072,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
ret = -ENOMEM;
goto bye;
}
bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz);
#endif
params[0] = FW_PARAM_PFVF(CLIP_START);
@ -6792,13 +6793,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
setup_memwin(adapter);
err = adap_init0(adapter, 0);
#ifdef CONFIG_DEBUG_FS
bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
#endif
setup_memwin_rdma(adapter);
if (err)
goto out_unmap_bar;
setup_memwin_rdma(adapter);
/* configure SGE_STAT_CFG_A to read WC stats */
if (!is_t4(adapter->params.chip))
t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |

View File

@ -521,9 +521,13 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
void hclge_cmd_uninit(struct hclge_dev *hdev)
{
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
/* wait to ensure that the firmware completes the possible left
* over commands.
*/
msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME);
spin_lock_bh(&hdev->hw.cmq.csq.lock);
spin_lock(&hdev->hw.cmq.crq.lock);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
hclge_cmd_uninit_regs(&hdev->hw);
spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);

View File

@ -9,6 +9,7 @@
#include "hnae3.h"
#define HCLGE_CMDQ_TX_TIMEOUT 30000
#define HCLGE_CMDQ_CLEAR_WAIT_TIME 200
#define HCLGE_DESC_DATA_LEN 6
struct hclge_dev;
@ -262,6 +263,9 @@ enum hclge_opcode_type {
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
/* clear hardware resource command */
HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
/* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,

View File

@ -283,21 +283,12 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
struct hclge_vport *vport = hclge_get_vport(h);
struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc;
int ret;
u8 i;
memset(pfc, 0, sizeof(*pfc));
pfc->pfc_cap = hdev->pfc_max;
prio_tc = hdev->tm_info.prio_tc;
pfc_map = hdev->tm_info.hw_pfc_map;
/* Pfc setting is based on TC */
for (i = 0; i < hdev->tm_info.num_tc; i++) {
for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
pfc->pfc_en |= BIT(j);
}
}
pfc->pfc_en = hdev->tm_info.pfc_en;
ret = hclge_pfc_tx_stats_get(hdev, requests);
if (ret)

View File

@ -8792,7 +8792,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
bool writen_to_tbl)
{
struct hclge_vport_vlan_cfg *vlan;
struct hclge_vport_vlan_cfg *vlan, *tmp;
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
if (vlan->vlan_id == vlan_id)
return;
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
if (!vlan)
@ -10030,6 +10034,28 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
}
}
static int hclge_clear_hw_resource(struct hclge_dev *hdev)
{
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
/* This new command is only supported by new firmware, it will
* fail with older firmware. Error value -EOPNOSUPP can only be
* returned by older firmware running this command, to keep code
* backward compatible we will override this value and return
* success.
*/
if (ret && ret != -EOPNOTSUPP) {
dev_err(&hdev->pdev->dev,
"failed to clear hw resource, ret = %d\n", ret);
return ret;
}
return 0;
}
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@ -10067,6 +10093,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_cmd_uninit;
ret = hclge_clear_hw_resource(hdev);
if (ret)
goto err_cmd_uninit;
ret = hclge_get_cap(hdev);
if (ret)
goto err_cmd_uninit;

View File

@ -472,12 +472,17 @@ static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
{
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
/* wait to ensure that the firmware completes the possible left
* over commands.
*/
msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME);
spin_lock_bh(&hdev->hw.cmq.csq.lock);
spin_lock(&hdev->hw.cmq.crq.lock);
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
hclgevf_cmd_uninit_regs(&hdev->hw);
spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
}

View File

@ -8,6 +8,7 @@
#include "hnae3.h"
#define HCLGEVF_CMDQ_TX_TIMEOUT 30000
#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200
#define HCLGEVF_CMDQ_RX_INVLD_B 0
#define HCLGEVF_CMDQ_RX_OUTVLD_B 1

View File

@ -1006,6 +1006,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
{
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
u16 lat_enc_d = 0; /* latency decoded */
u16 lat_enc = 0; /* latency encoded */
if (link) {
@ -1059,7 +1061,17 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
if (lat_enc > max_ltr_enc)
lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
(1U << (E1000_LTRV_SCALE_FACTOR *
((lat_enc & E1000_LTRV_SCALE_MASK)
>> E1000_LTRV_SCALE_SHIFT)));
max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
(1U << (E1000_LTRV_SCALE_FACTOR *
((max_ltr_enc & E1000_LTRV_SCALE_MASK)
>> E1000_LTRV_SCALE_SHIFT)));
if (lat_enc_d > max_ltr_enc_d)
lat_enc = max_ltr_enc;
}
@ -4122,13 +4134,17 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
return ret_val;
if (!(data & valid_csum_mask)) {
data |= valid_csum_mask;
ret_val = e1000_write_nvm(hw, word, 1, &data);
if (ret_val)
return ret_val;
ret_val = e1000e_update_nvm_checksum(hw);
if (ret_val)
return ret_val;
e_dbg("NVM Checksum Invalid\n");
if (hw->mac.type < e1000_pch_cnp) {
data |= valid_csum_mask;
ret_val = e1000_write_nvm(hw, word, 1, &data);
if (ret_val)
return ret_val;
ret_val = e1000e_update_nvm_checksum(hw);
if (ret_val)
return ret_val;
}
}
return e1000e_validate_nvm_checksum_generic(hw);

View File

@ -274,8 +274,11 @@
/* Latency Tolerance Reporting */
#define E1000_LTRV 0x000F8
#define E1000_LTRV_VALUE_MASK 0x000003FF
#define E1000_LTRV_SCALE_MAX 5
#define E1000_LTRV_SCALE_FACTOR 5
#define E1000_LTRV_SCALE_SHIFT 10
#define E1000_LTRV_SCALE_MASK 0x00001C00
#define E1000_LTRV_REQ_SHIFT 15
#define E1000_LTRV_NOSNOOP_SHIFT 16
#define E1000_LTRV_SEND (1 << 30)

View File

@ -23,7 +23,9 @@ static int ice_info_pba(struct ice_pf *pf, char *buf, size_t len)
status = ice_read_pba_string(hw, (u8 *)buf, len);
if (status)
return -EIO;
/* We failed to locate the PBA, so just skip this entry */
dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n",
ice_stat_str(status));
return 0;
}

View File

@ -138,6 +138,9 @@ static void igc_release_hw_control(struct igc_adapter *adapter)
struct igc_hw *hw = &adapter->hw;
u32 ctrl_ext;
if (!pci_device_is_present(adapter->pdev))
return;
/* Let firmware take over control of h/w */
ctrl_ext = rd32(IGC_CTRL_EXT);
wr32(IGC_CTRL_EXT,
@ -3782,26 +3785,29 @@ void igc_down(struct igc_adapter *adapter)
igc_ptp_suspend(adapter);
/* disable receives in the hardware */
rctl = rd32(IGC_RCTL);
wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
/* flush and sleep below */
if (pci_device_is_present(adapter->pdev)) {
/* disable receives in the hardware */
rctl = rd32(IGC_RCTL);
wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
/* flush and sleep below */
}
/* set trans_start so we don't get spurious watchdogs during reset */
netif_trans_update(netdev);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
/* disable transmits in the hardware */
tctl = rd32(IGC_TCTL);
tctl &= ~IGC_TCTL_EN;
wr32(IGC_TCTL, tctl);
/* flush both disables and wait for them to finish */
wrfl();
usleep_range(10000, 20000);
if (pci_device_is_present(adapter->pdev)) {
/* disable transmits in the hardware */
tctl = rd32(IGC_TCTL);
tctl &= ~IGC_TCTL_EN;
wr32(IGC_TCTL, tctl);
/* flush both disables and wait for them to finish */
wrfl();
usleep_range(10000, 20000);
igc_irq_disable(adapter);
igc_irq_disable(adapter);
}
adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
@ -4755,7 +4761,7 @@ static bool validate_schedule(struct igc_adapter *adapter,
if (e->command != TC_TAPRIO_CMD_SET_GATES)
return false;
for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
for (i = 0; i < adapter->num_tx_queues; i++) {
if (e->gate_mask & BIT(i))
queue_uses[i]++;
@ -4812,7 +4818,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
end_time += e->interval;
for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
if (!(e->gate_mask & BIT(i)))

View File

@ -557,7 +557,8 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
igc_ptp_time_save(adapter);
if (pci_device_is_present(adapter->pdev))
igc_ptp_time_save(adapter);
}
/**

View File

@ -103,7 +103,7 @@
#define MVNETA_DESC_SWAP BIT(6)
#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
#define MVNETA_PORT_STATUS 0x2444
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_IN_PRGRS BIT(0)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
/* Only exists on Armada XP and Armada 370 */

View File

@ -21,7 +21,7 @@ u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
ocelot->map[target][reg & REG_MASK] + offset, &val);
return val;
}
EXPORT_SYMBOL(__ocelot_read_ix);
EXPORT_SYMBOL_GPL(__ocelot_read_ix);
void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
{
@ -32,7 +32,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
regmap_write(ocelot->targets[target],
ocelot->map[target][reg & REG_MASK] + offset, val);
}
EXPORT_SYMBOL(__ocelot_write_ix);
EXPORT_SYMBOL_GPL(__ocelot_write_ix);
void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
u32 offset)
@ -45,7 +45,7 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
ocelot->map[target][reg & REG_MASK] + offset,
mask, val);
}
EXPORT_SYMBOL(__ocelot_rmw_ix);
EXPORT_SYMBOL_GPL(__ocelot_rmw_ix);
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
{
@ -58,7 +58,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val);
return val;
}
EXPORT_SYMBOL(ocelot_port_readl);
EXPORT_SYMBOL_GPL(ocelot_port_readl);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
{
@ -69,7 +69,7 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val);
}
EXPORT_SYMBOL(ocelot_port_writel);
EXPORT_SYMBOL_GPL(ocelot_port_writel);
void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
{
@ -77,7 +77,7 @@ void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
ocelot_port_writel(port, (cur & (~mask)) | val, reg);
}
EXPORT_SYMBOL(ocelot_port_rmwl);
EXPORT_SYMBOL_GPL(ocelot_port_rmwl);
u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 reg, u32 offset)
@ -128,7 +128,7 @@ int ocelot_regfields_init(struct ocelot *ocelot,
return 0;
}
EXPORT_SYMBOL(ocelot_regfields_init);
EXPORT_SYMBOL_GPL(ocelot_regfields_init);
static struct regmap_config ocelot_regmap_config = {
.reg_bits = 32,
@ -148,4 +148,4 @@ struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
}
EXPORT_SYMBOL(ocelot_regmap_init);
EXPORT_SYMBOL_GPL(ocelot_regmap_init);

View File

@ -327,6 +327,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
unsigned long flags;
int rc = -EINVAL;
if (!p_ll2_conn)
return rc;
spin_lock_irqsave(&p_tx->lock, flags);
if (p_tx->b_completing_packet) {
rc = -EBUSY;
@ -500,7 +503,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
unsigned long flags = 0;
int rc = 0;
if (!p_ll2_conn)
return rc;
spin_lock_irqsave(&p_rx->lock, flags);
if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
spin_unlock_irqrestore(&p_rx->lock, flags);
return 0;
}
cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
@ -821,6 +833,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
int rc;
if (!p_ll2_conn)
return 0;
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
return 0;
@ -844,6 +859,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
u16 new_idx = 0, num_bds = 0;
int rc;
if (!p_ll2_conn)
return 0;
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
@ -1725,6 +1743,8 @@ int qed_ll2_post_rx_buffer(void *cxt,
if (!p_ll2_conn)
return -EINVAL;
p_rx = &p_ll2_conn->rx_queue;
if (!p_rx->set_prod_addr)
return -EIO;
spin_lock_irqsave(&p_rx->lock, flags);
if (!list_empty(&p_rx->free_descq))

View File

@ -1285,8 +1285,7 @@ qed_rdma_create_qp(void *rdma_cxt,
if (!rdma_cxt || !in_params || !out_params ||
!p_hwfn->p_rdma_info->active) {
DP_ERR(p_hwfn->cdev,
"qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
rdma_cxt, in_params, out_params);
return NULL;
}

View File

@ -689,14 +689,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
GFP_KERNEL);
if (!plat->est)
return -ENOMEM;
mutex_init(&priv->plat->est->lock);
} else {
memset(plat->est, 0, sizeof(*plat->est));
}
size = qopt->num_entries;
mutex_lock(&priv->plat->est->lock);
priv->plat->est->gcl_size = size;
priv->plat->est->enable = qopt->enable;
mutex_unlock(&priv->plat->est->lock);
for (i = 0; i < size; i++) {
s64 delta_ns = qopt->entries[i].interval;
@ -727,6 +731,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->gcl[i] = delta_ns | (gates << wid);
}
mutex_lock(&priv->plat->est->lock);
/* Adjust for real system time */
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
@ -751,19 +756,23 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
priv->plat->est->ctr[1] = (u32)ctr;
if (fpe && !priv->dma_cap.fpesel)
if (fpe && !priv->dma_cap.fpesel) {
mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
}
ret = stmmac_fpe_configure(priv, priv->ioaddr,
priv->plat->tx_queues_to_use,
priv->plat->rx_queues_to_use, fpe);
if (ret && fpe) {
mutex_unlock(&priv->plat->est->lock);
netdev_err(priv->dev, "failed to enable Frame Preemption\n");
return ret;
}
ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
if (ret) {
netdev_err(priv->dev, "failed to configure EST\n");
goto disable;
@ -773,9 +782,14 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return 0;
disable:
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
if (priv->plat->est) {
mutex_lock(&priv->plat->est->lock);
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
}
return ret;
}

View File

@ -471,7 +471,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
write_mii_word(pegasus, 0, 0x1b, &auxmode);
}
return 0;
return ret;
fail:
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return ret;
@ -861,7 +861,7 @@ static int pegasus_open(struct net_device *net)
if (!pegasus->rx_skb)
goto exit;
res = set_registers(pegasus, EthID, 6, net->dev_addr);
set_registers(pegasus, EthID, 6, net->dev_addr);
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
usb_rcvbulkpipe(pegasus->usb, 1),

View File

@ -38,6 +38,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
u32 sha1 = 0;
u16 mac_type = 0, rf_id = 0;
u8 *pnvm_data = NULL, *tmp;
bool hw_match = false;
u32 size = 0;
int ret;
@ -84,6 +85,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
break;
}
if (hw_match)
break;
mac_type = le16_to_cpup((__le16 *)data);
rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
@ -91,15 +95,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
"Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
mac_type, rf_id);
if (mac_type != CSR_HW_REV_TYPE(trans->hw_rev) ||
rf_id != CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
IWL_DEBUG_FW(trans,
"HW mismatch, skipping PNVM section, mac_type 0x%0x, rf_id 0x%0x.\n",
CSR_HW_REV_TYPE(trans->hw_rev), trans->hw_rf_id);
ret = -ENOENT;
goto out;
}
if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) &&
rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id))
hw_match = true;
break;
case IWL_UCODE_TLV_SEC_RT: {
struct iwl_pnvm_section *section = (void *)data;
@ -150,6 +148,15 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
}
done:
if (!hw_match) {
IWL_DEBUG_FW(trans,
"HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n",
CSR_HW_REV_TYPE(trans->hw_rev),
CSR_HW_RFID_TYPE(trans->hw_rf_id));
ret = -ENOENT;
goto out;
}
if (!size) {
IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n");
ret = -ENOENT;

View File

@ -870,8 +870,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
}
}
/* There should be one of more OPP defined */
if (WARN_ON(!count)) {
/* There should be one or more OPPs defined */
if (!count) {
dev_err(dev, "%s: no supported OPPs", __func__);
ret = -ENOENT;
goto remove_static_opp;
}

View File

@ -808,12 +808,15 @@ store_state_field(struct device *dev, struct device_attribute *attr,
ret = scsi_device_set_state(sdev, state);
/*
* If the device state changes to SDEV_RUNNING, we need to
* rescan the device to revalidate it, and run the queue to
* avoid I/O hang.
* run the queue to avoid I/O hang, and rescan the device
* to revalidate it. Running the queue first is necessary
* because another thread may be waiting inside
* blk_mq_freeze_queue_wait() and because that call may be
* waiting for pending I/O to finish.
*/
if (ret == 0 && state == SDEV_RUNNING) {
scsi_rescan_device(dev);
blk_mq_run_hw_queues(sdev->request_queue, true);
scsi_rescan_device(dev);
}
mutex_unlock(&sdev->state_mutex);

View File

@ -246,6 +246,8 @@ int vt_waitactive(int n)
*
* XXX It should at least call into the driver, fbdev's definitely need to
* restore their engine state. --BenH
*
* Called with the console lock held.
*/
static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
{
@ -262,7 +264,6 @@ static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
return -EINVAL;
}
/* FIXME: this needs the console lock extending */
if (vc->vc_mode == mode)
return 0;
@ -271,12 +272,10 @@ static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
return 0;
/* explicitly blank/unblank the screen if switching modes */
console_lock();
if (mode == KD_TEXT)
do_unblank_screen(1);
else
do_blank_screen(1);
console_unlock();
return 0;
}
@ -378,7 +377,10 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd,
if (!perm)
return -EPERM;
return vt_kdsetmode(vc, arg);
console_lock();
ret = vt_kdsetmode(vc, arg);
console_unlock();
return ret;
case KDGETMODE:
return put_user(vc->vc_mode, (int __user *)arg);

View File

@ -1123,19 +1123,19 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
{
struct dwc3_trb *tmp;
u8 trbs_left;
/*
* If enqueue & dequeue are equal than it is either full or empty.
*
* One way to know for sure is if the TRB right before us has HWO bit
* set or not. If it has, then we're definitely full and can't fit any
* more transfers in our ring.
* If the enqueue & dequeue are equal then the TRB ring is either full
* or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
* pending to be processed by the driver.
*/
if (dep->trb_enqueue == dep->trb_dequeue) {
tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
/*
* If there is any request remained in the started_list at
* this point, that means there is no TRB available.
*/
if (!list_empty(&dep->started_list))
return 0;
return DWC3_TRB_NUM - 1;

View File

@ -312,8 +312,6 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
if (!prm->ep_enabled)
return;
prm->ep_enabled = false;
audio_dev = uac->audio_dev;
params = &audio_dev->params;
@ -331,11 +329,12 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
}
}
prm->ep_enabled = false;
if (usb_ep_disable(ep))
dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
}
int u_audio_start_capture(struct g_audio *audio_dev)
{
struct snd_uac_chip *uac = audio_dev->uac;

View File

@ -207,7 +207,8 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
return 0;
case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
return 0;
dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
return -ENOENT;
case RENESAS_ROM_STATUS_ERROR: /* Error State */
default: /* All other states are marked as "Reserved states" */
@ -224,14 +225,6 @@ static int renesas_fw_check_running(struct pci_dev *pdev)
u8 fw_state;
int err;
/* Check if device has ROM and loaded, if so skip everything */
err = renesas_check_rom(pdev);
if (err) { /* we have rom */
err = renesas_check_rom_state(pdev);
if (!err)
return err;
}
/*
* Test if the device is actually needing the firmware. As most
* BIOSes will initialize the device for us. If the device is
@ -591,21 +584,39 @@ int renesas_xhci_check_request_fw(struct pci_dev *pdev,
(struct xhci_driver_data *)id->driver_data;
const char *fw_name = driver_data->firmware;
const struct firmware *fw;
bool has_rom;
int err;
/* Check if device has ROM and loaded, if so skip everything */
has_rom = renesas_check_rom(pdev);
if (has_rom) {
err = renesas_check_rom_state(pdev);
if (!err)
return 0;
else if (err != -ENOENT)
has_rom = false;
}
err = renesas_fw_check_running(pdev);
/* Continue ahead, if the firmware is already running. */
if (err == 0)
return 0;
/* no firmware interface available */
if (err != 1)
return err;
return has_rom ? 0 : err;
pci_dev_get(pdev);
err = request_firmware(&fw, fw_name, &pdev->dev);
err = firmware_request_nowarn(&fw, fw_name, &pdev->dev);
pci_dev_put(pdev);
if (err) {
dev_err(&pdev->dev, "request_firmware failed: %d\n", err);
if (has_rom) {
dev_info(&pdev->dev, "failed to load firmware %s, fallback to ROM\n",
fw_name);
return 0;
}
dev_err(&pdev->dev, "failed to load firmware %s: %d\n",
fw_name, err);
return err;
}

View File

@ -853,7 +853,6 @@ static struct usb_serial_driver ch341_device = {
.owner = THIS_MODULE,
.name = "ch341-uart",
},
.bulk_in_size = 512,
.id_table = id_table,
.num_ports = 1,
.open = ch341_open,

View File

@ -2074,6 +2074,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */

View File

@ -341,7 +341,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
iov = wiov;
else {
iov = riov;
if (unlikely(wiov && wiov->i)) {
if (unlikely(wiov && wiov->used)) {
vringh_bad("Readable desc %p after writable",
&descs[i]);
err = -EINVAL;

View File

@ -576,6 +576,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
struct device *dev = get_device(&vp_dev->vdev.dev);
/*
* Device is marked broken on surprise removal so that virtio upper
* layers can abort any ongoing operation.
*/
if (!pci_device_is_present(pci_dev))
virtio_break_device(&vp_dev->vdev);
pci_disable_sriov(pci_dev);
unregister_virtio_device(&vp_dev->vdev);

View File

@ -2262,7 +2262,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return vq->broken;
return READ_ONCE(vq->broken);
}
EXPORT_SYMBOL_GPL(virtqueue_is_broken);
@ -2276,7 +2276,9 @@ void virtio_break_device(struct virtio_device *dev)
list_for_each_entry(_vq, &dev->vqs, list) {
struct vring_virtqueue *vq = to_vvq(_vq);
vq->broken = true;
/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
WRITE_ONCE(vq->broken, true);
}
}
EXPORT_SYMBOL_GPL(virtio_break_device);

View File

@ -149,6 +149,9 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
if (!name)
return NULL;
if (index >= vdpa->nvqs)
return ERR_PTR(-ENOENT);
/* Queue shouldn't already be set up. */
if (ops->get_vq_ready(vdpa, index))
return ERR_PTR(-ENOENT);

View File

@ -308,6 +308,21 @@ static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
mod);
}
/*
* Called every time after doing a buffered, direct IO or memory mapped write.
*
* This is to ensure that if we write to a file that was previously fsynced in
* the current transaction, then try to fsync it again in the same transaction,
* we will know that there were changes in the file and that it needs to be
* logged.
*/
static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode)
{
spin_lock(&inode->lock);
inode->last_sub_trans = inode->root->log_transid;
spin_unlock(&inode->lock);
}
static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
{
int ret = 0;

View File

@ -1862,7 +1862,6 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start_pos;
u64 end_pos;
ssize_t num_written = 0;
@ -2006,14 +2005,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
inode_unlock(inode);
/*
* We also have to set last_sub_trans to the current log transid,
* otherwise subsequent syncs to a file that's been synced in this
* transaction will appear to have already occurred.
*/
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->last_sub_trans = root->log_transid;
spin_unlock(&BTRFS_I(inode)->lock);
btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
if (num_written > 0)
num_written = generic_write_sync(iocb, num_written);

View File

@ -547,7 +547,7 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
* inode has not been flagged as nocompress. This flag can
* change at any time if we discover bad compression ratios.
*/
if (nr_pages > 1 && inode_need_compress(BTRFS_I(inode), start, end)) {
if (inode_need_compress(BTRFS_I(inode), start, end)) {
WARN_ON(pages);
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!pages) {
@ -8449,9 +8449,7 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
set_page_dirty(page);
SetPageUptodate(page);
BTRFS_I(inode)->last_trans = fs_info->generation;
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
unlock_extent_cached(io_tree, page_start, page_end, &cached_state);

View File

@ -171,7 +171,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
spin_lock(&inode->lock);
inode->last_trans = trans->transaction->transid;
inode->last_sub_trans = inode->root->log_transid;
inode->last_log_commit = inode->root->last_log_commit;
inode->last_log_commit = inode->last_sub_trans - 1;
spin_unlock(&inode->lock);
}

View File

@ -2059,7 +2059,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
if (IS_ERR(device)) {
if (PTR_ERR(device) == -ENOENT &&
strcmp(device_path, "missing") == 0)
device_path && strcmp(device_path, "missing") == 0)
ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
else
ret = PTR_ERR(device);

View File

@ -1752,7 +1752,11 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
struct ceph_cap_flush *ceph_alloc_cap_flush(void)
{
return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
struct ceph_cap_flush *cf;
cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
cf->is_capsnap = false;
return cf;
}
void ceph_free_cap_flush(struct ceph_cap_flush *cf)
@ -1787,7 +1791,7 @@ static bool __detach_cap_flush_from_mdsc(struct ceph_mds_client *mdsc,
prev->wake = true;
wake = false;
}
list_del(&cf->g_list);
list_del_init(&cf->g_list);
return wake;
}
@ -1802,7 +1806,7 @@ static bool __detach_cap_flush_from_ci(struct ceph_inode_info *ci,
prev->wake = true;
wake = false;
}
list_del(&cf->i_list);
list_del_init(&cf->i_list);
return wake;
}
@ -2422,7 +2426,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) {
if (!cf->caps) {
if (cf->is_capsnap) {
last_snap_flush = cf->tid;
break;
}
@ -2441,7 +2445,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
first_tid = cf->tid + 1;
if (cf->caps) {
if (!cf->is_capsnap) {
struct cap_msg_args arg;
dout("kick_flushing_caps %p cap %p tid %llu %s\n",
@ -3564,7 +3568,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
cleaned = cf->caps;
/* Is this a capsnap? */
if (cf->caps == 0)
if (cf->is_capsnap)
continue;
if (cf->tid <= flush_tid) {
@ -3637,8 +3641,9 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
while (!list_empty(&to_remove)) {
cf = list_first_entry(&to_remove,
struct ceph_cap_flush, i_list);
list_del(&cf->i_list);
ceph_free_cap_flush(cf);
list_del_init(&cf->i_list);
if (!cf->is_capsnap)
ceph_free_cap_flush(cf);
}
if (wake_ci)

View File

@ -1618,7 +1618,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
spin_lock(&mdsc->cap_dirty_lock);
list_for_each_entry(cf, &to_remove, i_list)
list_del(&cf->g_list);
list_del_init(&cf->g_list);
if (!list_empty(&ci->i_dirty_item)) {
pr_warn_ratelimited(
@ -1670,8 +1670,9 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
struct ceph_cap_flush *cf;
cf = list_first_entry(&to_remove,
struct ceph_cap_flush, i_list);
list_del(&cf->i_list);
ceph_free_cap_flush(cf);
list_del_init(&cf->i_list);
if (!cf->is_capsnap)
ceph_free_cap_flush(cf);
}
wake_up_all(&ci->i_cap_wq);

View File

@ -487,6 +487,9 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
return;
}
capsnap->cap_flush.is_capsnap = true;
INIT_LIST_HEAD(&capsnap->cap_flush.i_list);
INIT_LIST_HEAD(&capsnap->cap_flush.g_list);
spin_lock(&ci->i_ceph_lock);
used = __ceph_caps_used(ci);

View File

@ -181,8 +181,9 @@ struct ceph_cap {
struct ceph_cap_flush {
u64 tid;
int caps; /* 0 means capsnap */
int caps;
bool wake; /* wake up flush waiters when finish ? */
bool is_capsnap; /* true means capsnap */
struct list_head g_list; // global
struct list_head i_list; // per inode
};

View File

@ -390,6 +390,7 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
*/
take_dentry_name_snapshot(&name, real);
this = lookup_one_len(name.name.name, connected, name.name.len);
release_dentry_name_snapshot(&name);
err = PTR_ERR(this);
if (IS_ERR(this)) {
goto fail;
@ -404,7 +405,6 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
}
out:
release_dentry_name_snapshot(&name);
dput(parent);
inode_unlock(dir);
return this;

View File

@ -363,10 +363,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
* _very_ unlikely case that the pipe was full, but we got
* no data.
*/
if (unlikely(was_full)) {
if (unlikely(was_full))
wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
/*
* But because we didn't read anything, at this point we can
@ -385,12 +384,11 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
wake_next_reader = false;
__pipe_unlock(pipe);
if (was_full) {
if (was_full)
wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (wake_next_reader)
wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
if (ret > 0)
file_accessed(filp);
return ret;
@ -444,9 +442,6 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
#endif
/*
* Epoll nonsensically wants a wakeup whether the pipe
* was already empty or not.
*
* If it wasn't empty we try to merge new data into
* the last buffer.
*
@ -455,9 +450,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
* spanning multiple pages.
*/
head = pipe->head;
was_empty = true;
was_empty = pipe_empty(head, pipe->tail);
chars = total_len & (PAGE_SIZE-1);
if (chars && !pipe_empty(head, pipe->tail)) {
if (chars && !was_empty) {
unsigned int mask = pipe->ring_size - 1;
struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
int offset = buf->offset + buf->len;
@ -568,10 +563,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
* become empty while we dropped the lock.
*/
__pipe_unlock(pipe);
if (was_empty) {
if (was_empty)
wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
__pipe_lock(pipe);
was_empty = pipe_empty(pipe->head, pipe->tail);
@ -590,11 +584,13 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
* This is particularly important for small writes, because of
* how (for example) the GNU make jobserver uses small writes to
* wake up pending jobs
*
* Epoll nonsensically wants a wakeup whether the pipe
* was already empty or not.
*/
if (was_empty) {
if (was_empty || pipe->poll_usage)
wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
if (wake_next_writer)
wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
@ -654,6 +650,9 @@ pipe_poll(struct file *filp, poll_table *wait)
struct pipe_inode_info *pipe = filp->private_data;
unsigned int head, tail;
/* Epoll has some historical nasty semantics, this enables them */
pipe->poll_usage = 1;
/*
* Reading pipe state only -- no need for acquiring the semaphore.
*

View File

@ -20,14 +20,25 @@ struct bpf_sock_ops_kern;
struct bpf_cgroup_storage;
struct ctl_table;
struct ctl_table_header;
struct task_struct;
#ifdef CONFIG_CGROUP_BPF
extern struct static_key_false cgroup_bpf_enabled_key;
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
DECLARE_PER_CPU(struct bpf_cgroup_storage*,
bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
#define BPF_CGROUP_STORAGE_NEST_MAX 8
struct bpf_cgroup_storage_info {
struct task_struct *task;
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
};
/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks
* to use bpf cgroup storage simultaneously.
*/
DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
#define for_each_cgroup_storage_type(stype) \
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
@ -156,13 +167,42 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type(
return BPF_CGROUP_STORAGE_SHARED;
}
static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
*storage[MAX_BPF_CGROUP_STORAGE_TYPE])
static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage
*storage[MAX_BPF_CGROUP_STORAGE_TYPE])
{
enum bpf_cgroup_storage_type stype;
int i, err = 0;
for_each_cgroup_storage_type(stype)
this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
preempt_disable();
for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL))
continue;
this_cpu_write(bpf_cgroup_storage_info[i].task, current);
for_each_cgroup_storage_type(stype)
this_cpu_write(bpf_cgroup_storage_info[i].storage[stype],
storage[stype]);
goto out;
}
err = -EBUSY;
WARN_ON_ONCE(1);
out:
preempt_enable();
return err;
}
static inline void bpf_cgroup_storage_unset(void)
{
int i;
for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
continue;
this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
return;
}
}
struct bpf_cgroup_storage *
@ -410,8 +450,9 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
return -EINVAL;
}
static inline void bpf_cgroup_storage_set(
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
static inline int bpf_cgroup_storage_set(
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; }
static inline void bpf_cgroup_storage_unset(void) {}
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(

View File

@ -1097,9 +1097,14 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
goto _out; \
_item = &_array->items[0]; \
while ((_prog = READ_ONCE(_item->prog))) { \
if (set_cg_storage) \
bpf_cgroup_storage_set(_item->cgroup_storage); \
_ret &= func(_prog, ctx); \
if (!set_cg_storage) { \
_ret &= func(_prog, ctx); \
} else { \
if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \
break; \
_ret &= func(_prog, ctx); \
bpf_cgroup_storage_unset(); \
} \
_item++; \
} \
_out: \
@ -1143,8 +1148,10 @@ _out: \
_array = rcu_dereference(array); \
_item = &_array->items[0]; \
while ((_prog = READ_ONCE(_item->prog))) { \
bpf_cgroup_storage_set(_item->cgroup_storage); \
if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \
break; \
ret = func(_prog, ctx); \
bpf_cgroup_storage_unset(); \
_ret &= (ret & 1); \
_cn |= (ret & 2); \
_item++; \

View File

@ -3931,6 +3931,10 @@ int netdev_rx_handler_register(struct net_device *dev,
void netdev_rx_handler_unregister(struct net_device *dev);
bool dev_valid_name(const char *name);
static inline bool is_socket_ioctl_cmd(unsigned int cmd)
{
return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
}
int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
bool *need_copyout);
int dev_ifconf(struct net *net, struct ifconf *, int);

View File

@ -7,7 +7,7 @@
bool __do_once_start(bool *done, unsigned long *flags);
void __do_once_done(bool *done, struct static_key_true *once_key,
unsigned long *flags);
unsigned long *flags, struct module *mod);
/* Call a function exactly once. The idea of DO_ONCE() is to perform
* a function call such as initialization of random seeds, etc, only
@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
if (unlikely(___ret)) { \
func(__VA_ARGS__); \
__do_once_done(&___done, &___once_key, \
&___flags); \
&___flags, THIS_MODULE); \
} \
} \
___ret; \

View File

@ -48,6 +48,7 @@ struct pipe_buffer {
* @files: number of struct file referring this pipe (protected by ->i_lock)
* @r_counter: reader counter
* @w_counter: writer counter
* @poll_usage: is this pipe used for epoll, which has crazy wakeups?
* @fasync_readers: reader side fasync
* @fasync_writers: writer side fasync
* @bufs: the circular array of pipe buffers
@ -70,6 +71,7 @@ struct pipe_inode_info {
unsigned int files;
unsigned int r_counter;
unsigned int w_counter;
unsigned int poll_usage;
struct page *tmp_page;
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;

View File

@ -33,6 +33,8 @@
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
#define ulong2long(a) (*(long *)(&(a)))
#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);

View File

@ -60,6 +60,9 @@ void cleanup_srcu_struct(struct srcu_struct *ssp);
int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
void synchronize_srcu(struct srcu_struct *ssp);
unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
#ifdef CONFIG_DEBUG_LOCK_ALLOC

View File

@ -15,7 +15,8 @@
struct srcu_struct {
short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
short srcu_idx; /* Current reader array element. */
unsigned short srcu_idx; /* Current reader array element in bit 0x2. */
unsigned short srcu_idx_max; /* Furthest future srcu_idx request. */
u8 srcu_gp_running; /* GP workqueue running? */
u8 srcu_gp_waiting; /* GP waiting for readers? */
struct swait_queue_head srcu_wq;
@ -59,7 +60,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
idx = READ_ONCE(ssp->srcu_idx);
idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
return idx;
}
@ -80,7 +81,7 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
{
int idx;
idx = READ_ONCE(ssp->srcu_idx) & 0x1;
idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
tt, tf, idx,
READ_ONCE(ssp->srcu_lock_nesting[!idx]),

View File

@ -112,6 +112,7 @@ struct stmmac_axi {
#define EST_GCL 1024
struct stmmac_est {
struct mutex lock;
int enable;
u32 btr_offset[2];
u32 btr[2];

View File

@ -593,7 +593,6 @@ static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
spin_lock(&hash_lock);
}
spin_unlock(&hash_lock);
put_tree(victim);
}
/*
@ -602,6 +601,7 @@ static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
static void prune_one(struct audit_tree *victim)
{
prune_tree_chunks(victim, false);
put_tree(victim);
}
/* trim the uncommitted chunks from tree */

View File

@ -372,8 +372,8 @@ const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
};
#ifdef CONFIG_CGROUP_BPF
DECLARE_PER_CPU(struct bpf_cgroup_storage*,
bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
{
@ -382,10 +382,17 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
* verifier checks that its value is correct.
*/
enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
struct bpf_cgroup_storage *storage;
struct bpf_cgroup_storage *storage = NULL;
void *ptr;
int i;
storage = this_cpu_read(bpf_cgroup_storage[stype]);
for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
continue;
storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
break;
}
if (stype == BPF_CGROUP_STORAGE_SHARED)
ptr = &READ_ONCE(storage->buf)->data[0];

View File

@ -9,10 +9,11 @@
#include <linux/slab.h>
#include <uapi/linux/btf.h>
DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
#ifdef CONFIG_CGROUP_BPF
DEFINE_PER_CPU(struct bpf_cgroup_storage_info,
bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
#include "../cgroup/cgroup-internal.h"
#define LOCAL_STORAGE_CREATE_FLAG_MASK \

View File

@ -4693,8 +4693,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_RINGBUF:
if (func_id != BPF_FUNC_ringbuf_output &&
func_id != BPF_FUNC_ringbuf_reserve &&
func_id != BPF_FUNC_ringbuf_submit &&
func_id != BPF_FUNC_ringbuf_discard &&
func_id != BPF_FUNC_ringbuf_query)
goto error;
break;
@ -4798,6 +4796,12 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
goto error;
break;
case BPF_FUNC_ringbuf_output:
case BPF_FUNC_ringbuf_reserve:
case BPF_FUNC_ringbuf_query:
if (map->map_type != BPF_MAP_TYPE_RINGBUF)
goto error;
break;
case BPF_FUNC_get_stackid:
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
goto error;

View File

@ -84,6 +84,25 @@ static inline struct kthread *to_kthread(struct task_struct *k)
return (__force void *)k->set_child_tid;
}
/*
* Variant of to_kthread() that doesn't assume @p is a kthread.
*
* Per construction; when:
*
* (p->flags & PF_KTHREAD) && p->set_child_tid
*
* the task is both a kthread and struct kthread is persistent. However
* PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
* begin_new_exec()).
*/
static inline struct kthread *__to_kthread(struct task_struct *p)
{
void *kthread = (__force void *)p->set_child_tid;
if (kthread && !(p->flags & PF_KTHREAD))
kthread = NULL;
return kthread;
}
void free_kthread_struct(struct task_struct *k)
{
struct kthread *kthread;
@ -168,8 +187,9 @@ EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
*/
void *kthread_func(struct task_struct *task)
{
if (task->flags & PF_KTHREAD)
return to_kthread(task)->threadfn;
struct kthread *kthread = __to_kthread(task);
if (kthread)
return kthread->threadfn;
return NULL;
}
EXPORT_SYMBOL_GPL(kthread_func);
@ -199,10 +219,11 @@ EXPORT_SYMBOL_GPL(kthread_data);
*/
void *kthread_probe_data(struct task_struct *task)
{
struct kthread *kthread = to_kthread(task);
struct kthread *kthread = __to_kthread(task);
void *data = NULL;
copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
if (kthread)
copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
return data;
}
@ -515,9 +536,9 @@ void kthread_set_per_cpu(struct task_struct *k, int cpu)
set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
}
bool kthread_is_per_cpu(struct task_struct *k)
bool kthread_is_per_cpu(struct task_struct *p)
{
struct kthread *kthread = to_kthread(k);
struct kthread *kthread = __to_kthread(p);
if (!kthread)
return false;

View File

@ -34,6 +34,7 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp)
ssp->srcu_gp_running = false;
ssp->srcu_gp_waiting = false;
ssp->srcu_idx = 0;
ssp->srcu_idx_max = 0;
INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
INIT_LIST_HEAD(&ssp->srcu_work.entry);
return 0;
@ -84,6 +85,8 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
WARN_ON(ssp->srcu_gp_waiting);
WARN_ON(ssp->srcu_cb_head);
WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max);
WARN_ON(ssp->srcu_idx & 0x1);
}
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
@ -114,7 +117,7 @@ void srcu_drive_gp(struct work_struct *wp)
struct srcu_struct *ssp;
ssp = container_of(wp, struct srcu_struct, srcu_work);
if (ssp->srcu_gp_running || !READ_ONCE(ssp->srcu_cb_head))
if (ssp->srcu_gp_running || USHORT_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
return; /* Already running or nothing to do. */
/* Remove recently arrived callbacks and wait for readers. */
@ -124,11 +127,12 @@ void srcu_drive_gp(struct work_struct *wp)
ssp->srcu_cb_head = NULL;
ssp->srcu_cb_tail = &ssp->srcu_cb_head;
local_irq_enable();
idx = ssp->srcu_idx;
WRITE_ONCE(ssp->srcu_idx, !ssp->srcu_idx);
idx = (ssp->srcu_idx & 0x2) / 2;
WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
/* Invoke the callbacks we removed above. */
while (lh) {
@ -146,11 +150,27 @@ void srcu_drive_gp(struct work_struct *wp)
* straighten that out.
*/
WRITE_ONCE(ssp->srcu_gp_running, false);
if (READ_ONCE(ssp->srcu_cb_head))
if (USHORT_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
schedule_work(&ssp->srcu_work);
}
EXPORT_SYMBOL_GPL(srcu_drive_gp);
static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
{
unsigned short cookie;
cookie = get_state_synchronize_srcu(ssp);
if (USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie))
return;
WRITE_ONCE(ssp->srcu_idx_max, cookie);
if (!READ_ONCE(ssp->srcu_gp_running)) {
if (likely(srcu_init_done))
schedule_work(&ssp->srcu_work);
else if (list_empty(&ssp->srcu_work.entry))
list_add(&ssp->srcu_work.entry, &srcu_boot_list);
}
}
/*
* Enqueue an SRCU callback on the specified srcu_struct structure,
* initiating grace-period processing if it is not already running.
@ -166,12 +186,7 @@ void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
*ssp->srcu_cb_tail = rhp;
ssp->srcu_cb_tail = &rhp->next;
local_irq_restore(flags);
if (!READ_ONCE(ssp->srcu_gp_running)) {
if (likely(srcu_init_done))
schedule_work(&ssp->srcu_work);
else if (list_empty(&ssp->srcu_work.entry))
list_add(&ssp->srcu_work.entry, &srcu_boot_list);
}
srcu_gp_start_if_needed(ssp);
}
EXPORT_SYMBOL_GPL(call_srcu);
@ -190,6 +205,48 @@ void synchronize_srcu(struct srcu_struct *ssp)
}
EXPORT_SYMBOL_GPL(synchronize_srcu);
/*
* get_state_synchronize_srcu - Provide an end-of-grace-period cookie
*/
unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
{
unsigned long ret;
barrier();
ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1;
barrier();
return ret & USHRT_MAX;
}
EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
/*
* start_poll_synchronize_srcu - Provide cookie and start grace period
*
* The difference between this and get_state_synchronize_srcu() is that
* this function ensures that the poll_state_synchronize_srcu() will
* eventually return the value true.
*/
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
{
unsigned long ret = get_state_synchronize_srcu(ssp);
srcu_gp_start_if_needed(ssp);
return ret;
}
EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
/*
* poll_state_synchronize_srcu - Has cookie's grace period ended?
*/
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
{
bool ret = USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx), cookie);
barrier();
return ret;
}
EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
/* Lockdep diagnostics. */
void __init rcu_scheduler_starting(void)
{

Some files were not shown because too many files have changed in this diff Show More