This is the 5.4.196 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmKN12MACgkQONu9yGCS
 aT4Uvg/8DPgL4dM+jKZ4R16cbJU1rpvY0pJEcYsepqLFXdHDSLBA04eakCXO8k+x
 Ksy0kXvZuVGRRl25OuTGoTPvsmdx/D0R+XNSEvh9KPWVHdcB5FoDM4TskBz8vENR
 NDfNyWImmnE2xRCxi7GjTXI7RAyaiEDGbHtpoO+E7EN3EWv1JyhkhBhL0mBpQLGk
 gfzjdn7W2s5RbvH4XQFxdF5AgvnQZdMp5L92DC14/77Uo7fZXcU1VUGvASacpYu8
 A2z3jZBRI+YDMeLSGXdha5LDT2KoAUu5WE9Ms3OjEOn4jfoOmPDxEwsbpupFlk/i
 PRclY1oitWkOgLTTg+ZO/h72tj+kPaczVryVcdM4NKvC+10xyXHk2snW0JUxO1cI
 Kls9d3f0ADBeb5bUrHc6zBk0sj4Bx8sGWigZCUEU1QCirTj/83F3g+RwM0dSuS6g
 HFw5DTZ8WvPfn9SH2RQi6D4lOZydifxOOcD72iZiyt4rOpsNkO1BY74L8oNHPcuv
 ukYQinLttpCiuHJFU4SYjsqH5FRkpqaun0ovD9SF8icEIJM0igI0ZJ+AMZf9ZnQJ
 Ws7aijqwzoFw1GcKxNYFwDxRa5Q85pVwXkl6YS46lZGP70hqrVBgxBG/pBDBY+M7
 lPtszi1Pp/9LpUIZdJLjEDIULWM3qVPLEY6EEtC70syue+XKevU=
 =ZjkQ
 -----END PGP SIGNATURE-----

Merge 5.4.196 into android11-5.4-lts

Changes in 5.4.196
	floppy: use a statically allocated error counter
	x86/xen: Make the boot CPU idle task reliable
	x86/xen: Make the secondary CPU idle tasks reliable
	rtc: fix use-after-free on device removal
	um: Cleanup syscall_handler_t definition/cast, fix warning
	Input: add bounds checking to input_set_capability()
	Input: stmfts - fix reference leak in stmfts_input_open
	crypto: stm32 - fix reference leak in stm32_crc_remove
	crypto: x86/chacha20 - Avoid spurious jumps to other functions
	ALSA: hda/realtek: Enable headset mic on Lenovo P360
	nvme-multipath: fix hang when disk goes live over reconnect
	rtc: mc146818-lib: Fix the AltCentury for AMD platforms
	MIPS: lantiq: check the return value of kzalloc()
	drbd: remove usage of list iterator variable after loop
	platform/chrome: cros_ec_debugfs: detach log reader wq from devm
	ARM: 9191/1: arm/stacktrace, kasan: Silence KASAN warnings in unwind_frame()
	nilfs2: fix lockdep warnings in page operations for btree nodes
	nilfs2: fix lockdep warnings during disk space reclamation
	mmc: core: Specify timeouts for BKOPS and CACHE_FLUSH for eMMC
	mmc: block: Use generic_cmd6_time when modifying INAND_CMD38_ARG_EXT_CSD
	mmc: core: Default to generic_cmd6_time as timeout in __mmc_switch()
	SUNRPC: Clean up scheduling of autoclose
	SUNRPC: Prevent immediate close+reconnect
	SUNRPC: Don't call connect() more than once on a TCP socket
	SUNRPC: Ensure we flush any closed sockets before xs_xprt_free()
	ALSA: wavefront: Proper check of get_user() error
	perf: Fix sys_perf_event_open() race against self
	Fix double fget() in vhost_net_set_backend()
	PCI/PM: Avoid putting Elo i2 PCIe Ports in D3cold
	KVM: x86/mmu: Update number of zapped pages even if page list is stable
	crypto: qcom-rng - fix infinite loop on requests not multiple of WORD_SZ
	drm/dp/mst: fix a possible memory leak in fetch_monitor_name()
	dma-buf: fix use of DMA_BUF_SET_NAME_{A,B} in userspace
	ARM: dts: aspeed-g6: remove FWQSPID group in pinctrl dtsi
	ARM: dts: aspeed-g6: fix SPI1/SPI2 quad pin group
	net: macb: Increment rx bd head after allocating skb and buffer
	net/sched: act_pedit: sanitize shift argument before usage
	net: vmxnet3: fix possible use-after-free bugs in vmxnet3_rq_alloc_rx_buf()
	net: vmxnet3: fix possible NULL pointer dereference in vmxnet3_rq_cleanup()
	ice: fix possible under reporting of ethtool Tx and Rx statistics
	clk: at91: generated: consider range when calculating best rate
	net/qla3xxx: Fix a test in ql_reset_work()
	NFC: nci: fix sleep in atomic context bugs caused by nci_skb_alloc
	net/mlx5e: Properly block LRO when XDP is enabled
	net: af_key: add check for pfkey_broadcast in function pfkey_process
	ARM: 9196/1: spectre-bhb: enable for Cortex-A15
	ARM: 9197/1: spectre-bhb: fix loop8 sequence for Thumb2
	igb: skip phy status check where unavailable
	net: bridge: Clear offload_fwd_mark when passing frame up bridge interface.
	gpio: gpio-vf610: do not touch other bits when set the target bit
	gpio: mvebu/pwm: Refuse requests with inverted polarity
	perf bench numa: Address compiler error on s390
	scsi: qla2xxx: Fix missed DMA unmap for aborted commands
	mac80211: fix rx reordering with non explicit / psmp ack policy
	selftests: add ping test with ping_group_range tuned
	ethernet: tulip: fix missing pci_disable_device() on error in tulip_init_one()
	net: stmmac: fix missing pci_disable_device() on error in stmmac_pci_probe()
	net: atlantic: verify hw_head_ lies within TX buffer ring
	Input: ili210x - fix reset timing
	block: return ELEVATOR_DISCARD_MERGE if possible
	net: stmmac: disable Split Header (SPH) for Intel platforms
	firmware_loader: use kernel credentials when reading firmware
	ARM: dts: imx7: Use audio_mclk_post_div instead audio_mclk_root_clk
	Reinstate some of "swiotlb: rework "fix info leak with DMA_FROM_DEVICE""
	x86/xen: fix booting 32-bit pv guest
	x86/xen: Mark cpu_bringup_and_idle() as dead_end_function
	i2c: mt7621: fix missing clk_disable_unprepare() on error in mtk_i2c_probe()
	afs: Fix afs_getattr() to refetch file status if callback break occurred
	Linux 5.4.196

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I8464b114a6d5d655386f3c794bbb8bbc3a94e0ec
This commit is contained in:
Greg Kroah-Hartman 2022-05-25 10:40:14 +02:00
commit 0cf7a2be06
94 changed files with 681 additions and 263 deletions

View File

@ -156,13 +156,3 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged
subsystem that the buffer is fully accessible at the elevated privilege
level (and ideally inaccessible or at least read-only at the
lesser-privileged levels).
DMA_ATTR_PRIVILEGED
-------------------
Some advanced peripherals such as remote processors and GPUs perform
accesses to DMA buffers in both privileged "supervisor" and unprivileged
"user" modes. This attribute is used to indicate to the DMA-mapping
subsystem that the buffer is fully accessible at the elevated privilege
level (and ideally inaccessible or at least read-only at the
lesser-privileged levels).

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 195
SUBLEVEL = 196
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -117,11 +117,6 @@
groups = "FWSPID";
};
pinctrl_fwqspid_default: fwqspid_default {
function = "FWSPID";
groups = "FWQSPID";
};
pinctrl_fwspiwp_default: fwspiwp_default {
function = "FWSPIWP";
groups = "FWSPIWP";
@ -653,12 +648,12 @@
};
pinctrl_qspi1_default: qspi1_default {
function = "QSPI1";
function = "SPI1";
groups = "QSPI1";
};
pinctrl_qspi2_default: qspi2_default {
function = "QSPI2";
function = "SPI2";
groups = "QSPI2";
};

View File

@ -77,7 +77,7 @@
dailink_master: simple-audio-card,codec {
sound-dai = <&codec>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
};
};
};
@ -152,7 +152,7 @@
compatible = "fsl,sgtl5000";
#sound-dai-cells = <0>;
reg = <0x0a>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai1_mclk>;
VDDA-supply = <&reg_module_3v3_avdd>;

View File

@ -250,7 +250,7 @@
tlv320aic32x4: audio-codec@18 {
compatible = "ti,tlv320aic32x4";
reg = <0x18>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
clock-names = "mclk";
ldoin-supply = <&reg_audio_3v3>;
iov-supply = <&reg_audio_3v3>;

View File

@ -284,7 +284,7 @@
codec: wm8960@1a {
compatible = "wlf,wm8960";
reg = <0x1a>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
clock-names = "mclk";
wlf,shared-lrclk;
};

View File

@ -31,7 +31,7 @@
dailink_master: simple-audio-card,codec {
sound-dai = <&sgtl5000>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
};
};
};
@ -41,7 +41,7 @@
#sound-dai-cells = <0>;
reg = <0x0a>;
compatible = "fsl,sgtl5000";
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
VDDA-supply = <&reg_2p5v>;
VDDIO-supply = <&reg_vref_1v8>;
};

View File

@ -31,7 +31,7 @@
dailink_master: simple-audio-card,codec {
sound-dai = <&sgtl5000>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
};
};
};
@ -41,7 +41,7 @@
#sound-dai-cells = <0>;
reg = <0x0a>;
compatible = "fsl,sgtl5000";
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
VDDA-supply = <&reg_2p5v>;
VDDIO-supply = <&reg_vref_1v8>;
};

View File

@ -356,7 +356,7 @@
codec: wm8960@1a {
compatible = "wlf,wm8960";
reg = <0x1a>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
clock-names = "mclk";
wlf,shared-lrclk;
};

View File

@ -75,7 +75,7 @@
dailink_master: simple-audio-card,codec {
sound-dai = <&codec>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
};
};
};
@ -232,7 +232,7 @@
#sound-dai-cells = <0>;
reg = <0x0a>;
compatible = "fsl,sgtl5000";
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai1_mclk>;
VDDA-supply = <&vgen4_reg>;

View File

@ -1043,7 +1043,7 @@ vector_bhb_loop8_\name:
@ bhb workaround
mov r0, #8
3: b . + 4
3: W(b) . + 4
subs r0, r0, #1
bne 3b
dsb

View File

@ -53,17 +53,17 @@ int notrace unwind_frame(struct stackframe *frame)
return -EINVAL;
frame->sp = frame->fp;
frame->fp = *(unsigned long *)(fp);
frame->pc = *(unsigned long *)(fp + 4);
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4));
#else
/* check current frame pointer is within bounds */
if (fp < low + 12 || fp > high - 4)
return -EINVAL;
/* restore the registers from the stack frame */
frame->fp = *(unsigned long *)(fp - 12);
frame->sp = *(unsigned long *)(fp - 8);
frame->pc = *(unsigned long *)(fp - 4);
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12));
frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8));
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4));
#endif
return 0;

View File

@ -301,6 +301,7 @@ void cpu_v7_ca15_ibe(void)
{
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
cpu_v7_spectre_v2_init();
cpu_v7_spectre_bhb_init();
}
void cpu_v7_bugs_init(void)

View File

@ -167,6 +167,8 @@ static inline void clkdev_add_sys(const char *dev, unsigned int module,
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev;
clk->cl.con_id = NULL;
clk->cl.clk = clk;

View File

@ -122,6 +122,8 @@ static inline void clkdev_add_gptu(struct device *dev, const char *con,
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev_name(dev);
clk->cl.con_id = con;
clk->cl.clk = clk;

View File

@ -311,6 +311,8 @@ static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate,
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev;
clk->cl.con_id = con;
clk->cl.clk = clk;
@ -334,6 +336,8 @@ static void clkdev_add_cgu(const char *dev, const char *con,
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev;
clk->cl.con_id = con;
clk->cl.clk = clk;
@ -352,24 +356,28 @@ static void clkdev_add_pci(void)
struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
/* main pci clock */
clk->cl.dev_id = "17000000.pci";
clk->cl.con_id = NULL;
clk->cl.clk = clk;
clk->rate = CLOCK_33M;
clk->rates = valid_pci_rates;
clk->enable = pci_enable;
clk->disable = pmu_disable;
clk->module = 0;
clk->bits = PMU_PCI;
clkdev_add(&clk->cl);
if (clk) {
clk->cl.dev_id = "17000000.pci";
clk->cl.con_id = NULL;
clk->cl.clk = clk;
clk->rate = CLOCK_33M;
clk->rates = valid_pci_rates;
clk->enable = pci_enable;
clk->disable = pmu_disable;
clk->module = 0;
clk->bits = PMU_PCI;
clkdev_add(&clk->cl);
}
/* use internal/external bus clock */
clk_ext->cl.dev_id = "17000000.pci";
clk_ext->cl.con_id = "external";
clk_ext->cl.clk = clk_ext;
clk_ext->enable = pci_ext_enable;
clk_ext->disable = pci_ext_disable;
clkdev_add(&clk_ext->cl);
if (clk_ext) {
clk_ext->cl.dev_id = "17000000.pci";
clk_ext->cl.con_id = "external";
clk_ext->cl.clk = clk_ext;
clk_ext->enable = pci_ext_enable;
clk_ext->disable = pci_ext_disable;
clkdev_add(&clk_ext->cl);
}
}
/* xway socs can generate clocks on gpio pins */
@ -389,9 +397,15 @@ static void clkdev_add_clkout(void)
char *name;
name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
if (!name)
continue;
sprintf(name, "clkout%d", i);
clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk) {
kfree(name);
continue;
}
clk->cl.dev_id = "1f103000.cgu";
clk->cl.con_id = name;
clk->cl.clk = clk;

View File

@ -172,7 +172,7 @@ ENTRY(chacha_2block_xor_avx512vl)
# xor remaining bytes from partial register into output
mov %rcx,%rax
and $0xf,%rcx
jz .Ldone8
jz .Ldone2
mov %rax,%r9
and $~0xf,%r9
@ -438,7 +438,7 @@ ENTRY(chacha_4block_xor_avx512vl)
# xor remaining bytes from partial register into output
mov %rcx,%rax
and $0xf,%rcx
jz .Ldone8
jz .Ldone4
mov %rax,%r9
and $~0xf,%r9

View File

@ -5821,6 +5821,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
int nr_zapped, batch = 0;
bool unstable;
restart:
list_for_each_entry_safe_reverse(sp, node,
@ -5853,11 +5854,12 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
goto restart;
}
if (__kvm_mmu_prepare_zap_page(kvm, sp,
&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
batch += nr_zapped;
unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
&kvm->arch.zapped_obsolete_pages, &nr_zapped);
batch += nr_zapped;
if (unstable)
goto restart;
}
}
/*

View File

@ -10,13 +10,12 @@
#include <linux/msg.h>
#include <linux/shm.h>
typedef long syscall_handler_t(void);
typedef long syscall_handler_t(long, long, long, long, long, long);
extern syscall_handler_t *sys_call_table[];
#define EXECUTE_SYSCALL(syscall, regs) \
(((long (*)(long, long, long, long, long, long)) \
(*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
(((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
UPT_SYSCALL_ARG2(&regs->regs), \
UPT_SYSCALL_ARG3(&regs->regs), \
UPT_SYSCALL_ARG4(&regs->regs), \

View File

@ -53,6 +53,7 @@ static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
void asm_cpu_bringup_and_idle(void);
static void cpu_bringup(void)
{
@ -310,7 +311,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
* pointing just below where pt_regs would be if it were a normal
* kernel entry.
*/
ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle;
ctxt->flags = VGCF_IN_KERNEL;
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
ctxt->user_regs.ds = __USER_DS;

View File

@ -35,7 +35,11 @@ ENTRY(startup_xen)
rep __ASM_SIZE(stos)
mov %_ASM_SI, xen_start_info
mov $init_thread_union+THREAD_SIZE, %_ASM_SP
#ifdef CONFIG_X86_64
mov initial_stack(%rip), %rsp
#else
mov initial_stack, %esp
#endif
#ifdef CONFIG_X86_64
/* Set up %gs.
@ -51,9 +55,19 @@ ENTRY(startup_xen)
wrmsr
#endif
jmp xen_start_kernel
call xen_start_kernel
END(startup_xen)
__FINIT
#ifdef CONFIG_XEN_PV_SMP
.pushsection .text
SYM_CODE_START(asm_cpu_bringup_and_idle)
UNWIND_HINT_EMPTY
call cpu_bringup_and_idle
SYM_CODE_END(asm_cpu_bringup_and_idle)
.popsection
#endif
#endif
.pushsection .text

View File

@ -2251,6 +2251,9 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
__rq = bfq_find_rq_fmerge(bfqd, bio, q);
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
if (blk_discard_mergable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_FRONT_MERGE;
}

View File

@ -730,21 +730,6 @@ static void blk_account_io_merge(struct request *req)
part_stat_unlock();
}
}
/*
* Two cases of handling DISCARD merge:
* If max_discard_segments > 1, the driver takes every bio
* as a range and send them to controller together. The ranges
* needn't to be contiguous.
* Otherwise, the bios/requests will be handled as same as
* others which should be contiguous.
*/
static inline bool blk_discard_mergable(struct request *req)
{
if (req_op(req) == REQ_OP_DISCARD &&
queue_max_discard_segments(req->q) > 1)
return true;
return false;
}
static enum elv_merge blk_try_req_merge(struct request *req,
struct request *next)

View File

@ -337,6 +337,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
if (blk_discard_mergable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_BACK_MERGE;
}

View File

@ -452,6 +452,8 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
if (elv_bio_merge_ok(__rq, bio)) {
*rq = __rq;
if (blk_discard_mergable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_FRONT_MERGE;
}
}

View File

@ -761,6 +761,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
enum fw_opt opt_flags)
{
struct firmware *fw = NULL;
struct cred *kern_cred = NULL;
const struct cred *old_cred;
int ret;
if (!firmware_p)
@ -776,6 +778,18 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (ret <= 0) /* error or already assigned */
goto out;
/*
* We are about to try to access the firmware file. Because we may have been
* called by a driver when serving an unrelated request from userland, we use
* the kernel credentials to read the file.
*/
kern_cred = prepare_kernel_cred(NULL);
if (!kern_cred) {
ret = -ENOMEM;
goto out;
}
old_cred = override_creds(kern_cred);
ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
#ifdef CONFIG_FW_LOADER_COMPRESS
if (ret == -ENOENT)
@ -792,6 +806,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
} else
ret = assign_fw(fw, device, opt_flags);
revert_creds(old_cred);
put_cred(kern_cred);
out:
if (ret < 0) {
fw_abort_batch_reqs(fw);

View File

@ -183,7 +183,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
unsigned int set_size)
{
struct drbd_request *r;
struct drbd_request *req = NULL;
struct drbd_request *req = NULL, *tmp = NULL;
int expect_epoch = 0;
int expect_size = 0;
@ -237,8 +237,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
* to catch requests being barrier-acked "unexpectedly".
* It usually should find the same req again, or some READ preceding it. */
list_for_each_entry(req, &connection->transfer_log, tl_requests)
if (req->epoch == expect_epoch)
if (req->epoch == expect_epoch) {
tmp = req;
break;
}
req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
if (req->epoch != expect_epoch)
break;

View File

@ -521,8 +521,8 @@ static unsigned long fdc_busy;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_WAIT_QUEUE_HEAD(command_done);
/* Errors during formatting are counted here. */
static int format_errors;
/* errors encountered on the current (or last) request */
static int floppy_errors;
/* Format request descriptor. */
static struct format_descr format_req;
@ -542,7 +542,6 @@ static struct format_descr format_req;
static char *floppy_track_buffer;
static int max_buffer_sectors;
static int *errors;
typedef void (*done_f)(int);
static const struct cont_t {
void (*interrupt)(void);
@ -1435,7 +1434,7 @@ static int interpret_errors(void)
if (DP->flags & FTD_MSG)
DPRINT("Over/Underrun - retrying\n");
bad = 0;
} else if (*errors >= DP->max_errors.reporting) {
} else if (floppy_errors >= DP->max_errors.reporting) {
print_errors();
}
if (ST2 & ST2_WC || ST2 & ST2_BC)
@ -2055,7 +2054,7 @@ static void bad_flp_intr(void)
if (!next_valid_format())
return;
}
err_count = ++(*errors);
err_count = ++floppy_errors;
INFBOUND(DRWE->badness, err_count);
if (err_count > DP->max_errors.abort)
cont->done(0);
@ -2200,9 +2199,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
return -EINVAL;
}
format_req = *tmp_format_req;
format_errors = 0;
cont = &format_cont;
errors = &format_errors;
floppy_errors = 0;
ret = wait_til_done(redo_format, true);
if (ret == -EINTR)
return -EINTR;
@ -2677,7 +2675,7 @@ static int make_raw_rw_request(void)
*/
if (!direct ||
(indirect * 2 > direct * 3 &&
*errors < DP->max_errors.read_track &&
floppy_errors < DP->max_errors.read_track &&
((!probing ||
(DP->read_track & (1 << DRS->probed_format)))))) {
max_size = blk_rq_sectors(current_req);
@ -2801,10 +2799,11 @@ static int set_next_request(void)
current_req = list_first_entry_or_null(&floppy_reqs, struct request,
queuelist);
if (current_req) {
current_req->error_count = 0;
floppy_errors = 0;
list_del_init(&current_req->queuelist);
return 1;
}
return current_req != NULL;
return 0;
}
static void redo_fd_request(void)
@ -2860,7 +2859,6 @@ static void redo_fd_request(void)
_floppy = floppy_type + DP->autodetect[DRS->probed_format];
} else
probing = 0;
errors = &(current_req->error_count);
tmp = make_raw_rw_request();
if (tmp < 2) {
request_done(tmp);

View File

@ -105,6 +105,10 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
tmp_rate = parent_rate;
else
tmp_rate = parent_rate / div;
if (tmp_rate < req->min_rate || tmp_rate > req->max_rate)
return;
tmp_diff = abs(req->rate - tmp_rate);
if (*best_diff < 0 || *best_diff >= tmp_diff) {

View File

@ -64,6 +64,7 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
} else {
/* copy only remaining bytes */
memcpy(data, &val, max - currsize);
break;
}
} while (currsize < max);

View File

@ -332,8 +332,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
struct stm32_crc *crc = platform_get_drvdata(pdev);
int ret = pm_runtime_get_sync(crc->dev);
if (ret < 0)
if (ret < 0) {
pm_runtime_put_noidle(crc->dev);
return ret;
}
spin_lock(&crc_list.lock);
list_del(&crc->list);

View File

@ -697,6 +697,9 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long flags;
unsigned int on, off;
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
do_div(val, NSEC_PER_SEC);
if (val > UINT_MAX)

View File

@ -125,9 +125,13 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
{
struct vf610_gpio_port *port = gpiochip_get_data(chip);
unsigned long mask = BIT(gpio);
u32 val;
if (port->sdata && port->sdata->have_paddr)
vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR);
if (port->sdata && port->sdata->have_paddr) {
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
val |= mask;
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
}
vf610_gpio_set(chip, gpio, value);

View File

@ -3657,6 +3657,7 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
drm_edid_get_monitor_name(mst_edid, name, namelen);
kfree(mst_edid);
}
/**

View File

@ -304,7 +304,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
if (i2c->bus_freq == 0) {
dev_warn(i2c->dev, "clock-frequency 0 not supported\n");
return -EINVAL;
ret = -EINVAL;
goto err_disable_clk;
}
adap = &i2c->adap;
@ -322,10 +323,15 @@ static int mtk_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(adap);
if (ret < 0)
return ret;
goto err_disable_clk;
dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000);
return 0;
err_disable_clk:
clk_disable_unprepare(i2c->clk);
return ret;
}

View File

@ -47,6 +47,17 @@ static DEFINE_MUTEX(input_mutex);
static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
static const unsigned int input_max_code[EV_CNT] = {
[EV_KEY] = KEY_MAX,
[EV_REL] = REL_MAX,
[EV_ABS] = ABS_MAX,
[EV_MSC] = MSC_MAX,
[EV_SW] = SW_MAX,
[EV_LED] = LED_MAX,
[EV_SND] = SND_MAX,
[EV_FF] = FF_MAX,
};
static inline int is_event_supported(unsigned int code,
unsigned long *bm, unsigned int max)
{
@ -1978,6 +1989,14 @@ EXPORT_SYMBOL(input_get_timestamp);
*/
void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
{
if (type < EV_CNT && input_max_code[type] &&
code > input_max_code[type]) {
pr_err("%s: invalid code %u for type %u\n", __func__, code,
type);
dump_stack();
return;
}
switch (type) {
case EV_KEY:
__set_bit(code, dev->keybit);

View File

@ -290,9 +290,9 @@ static int ili210x_i2c_probe(struct i2c_client *client,
if (error)
return error;
usleep_range(50, 100);
usleep_range(12000, 15000);
gpiod_set_value_cansleep(reset_gpio, 0);
msleep(100);
msleep(160);
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);

View File

@ -339,11 +339,11 @@ static int stmfts_input_open(struct input_dev *dev)
err = pm_runtime_get_sync(&sdata->client->dev);
if (err < 0)
return err;
goto out;
err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
if (err)
return err;
goto out;
mutex_lock(&sdata->mutex);
sdata->running = true;
@ -366,7 +366,9 @@ static int stmfts_input_open(struct input_dev *dev)
"failed to enable touchkey\n");
}
return 0;
out:
pm_runtime_put_noidle(&sdata->client->dev);
return err;
}
static void stmfts_input_close(struct input_dev *dev)

View File

@ -1132,7 +1132,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
card->erase_arg == MMC_TRIM_ARG ?
INAND_CMD38_ARG_TRIM :
INAND_CMD38_ARG_ERASE,
0);
card->ext_csd.generic_cmd6_time);
}
if (!err)
err = mmc_erase(card, from, nr, card->erase_arg);
@ -1174,7 +1174,7 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
arg == MMC_SECURE_TRIM1_ARG ?
INAND_CMD38_ARG_SECTRIM1 :
INAND_CMD38_ARG_SECERASE,
0);
card->ext_csd.generic_cmd6_time);
if (err)
goto out_retry;
}
@ -1192,7 +1192,7 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
INAND_CMD38_ARG_SECTRIM2,
0);
card->ext_csd.generic_cmd6_time);
if (err)
goto out_retry;
}

View File

@ -19,7 +19,9 @@
#include "host.h"
#include "mmc_ops.h"
#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
static const u8 tuning_blk_pattern_4bit[] = {
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
@ -458,10 +460,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
bool expired = false;
bool busy = false;
/* We have an unspecified cmd timeout, use the fallback value. */
if (!timeout_ms)
timeout_ms = MMC_OPS_TIMEOUT_MS;
/*
* In cases when not allowed to poll by using CMD13 or because we aren't
* capable of polling by using ->card_busy(), then rely on waiting the
@ -534,6 +532,12 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
mmc_retune_hold(host);
if (!timeout_ms) {
pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
mmc_hostname(host));
timeout_ms = card->ext_csd.generic_cmd6_time;
}
/*
* If the cmd timeout and the max_busy_timeout of the host are both
* specified, let's validate them. A failure means we need to prevent
@ -542,7 +546,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
* which also means they are on their own when it comes to deal with the
* busy timeout.
*/
if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && timeout_ms &&
if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) &&
host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
use_r1b_resp = false;
@ -554,10 +558,6 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
cmd.flags = MMC_CMD_AC;
if (use_r1b_resp) {
cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
/*
* A busy_timeout of zero means the host can decide to use
* whatever value it finds suitable.
*/
cmd.busy_timeout = timeout_ms;
} else {
cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
@ -943,7 +943,7 @@ void mmc_run_bkops(struct mmc_card *card)
* urgent levels by using an asynchronous background task, when idle.
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
if (err)
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
@ -963,7 +963,8 @@ int mmc_flush_cache(struct mmc_card *card)
(card->ext_csd.cache_size > 0) &&
(card->ext_csd.cache_ctrl & 1)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 1, 0);
EXT_CSD_FLUSH_CACHE, 1,
MMC_CACHE_FLUSH_TIMEOUT_MS);
if (err)
pr_err("%s: cache flush error %d\n",
mmc_hostname(card->host), err);

View File

@ -674,6 +674,13 @@ static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
err = -ENXIO;
goto err_exit;
}
/* Validate that the new hw_head_ is reasonable. */
if (hw_head_ >= ring->size) {
err = -ENXIO;
goto err_exit;
}
ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self);

View File

@ -927,7 +927,6 @@ static void gem_rx_refill(struct macb_queue *queue)
/* Make hw descriptor updates visible to CPU */
rmb();
queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) {
@ -966,6 +965,7 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED);
}
queue->rx_prepared_head++;
}
/* Make descriptor updates visible to hardware */

View File

@ -1410,8 +1410,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
if (!dev)
if (!dev) {
pci_disable_device(pdev);
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
@ -1788,6 +1790,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_free_netdev:
free_netdev (dev);
pci_disable_device(pdev);
return -ENODEV;
}

View File

@ -3561,9 +3561,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
netif_carrier_on(vsi->netdev);
}
/* clear this now, and the first stats read will be used as baseline */
vsi->stat_offsets_loaded = false;
/* Perform an initial read of the statistics registers now to
* set the baseline so counters are ready when interface is up
*/
ice_update_eth_stats(vsi);
ice_service_task_schedule(pf);
return 0;

View File

@ -5318,7 +5318,8 @@ static void igb_watchdog_task(struct work_struct *work)
break;
}
if (adapter->link_speed != SPEED_1000)
if (adapter->link_speed != SPEED_1000 ||
!hw->phy.ops.read_reg)
goto no_wait;
/* wait for Remote receiver status OK */

View File

@ -3980,6 +3980,13 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
}
if (params->xdp_prog) {
if (features & NETIF_F_LRO) {
netdev_warn(netdev, "LRO is incompatible with XDP\n");
features &= ~NETIF_F_LRO;
}
}
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
features &= ~NETIF_F_RXHASH;
if (netdev->features & NETIF_F_RXHASH)

View File

@ -3629,7 +3629,8 @@ static void ql_reset_work(struct work_struct *work)
qdev->mem_map_registers;
unsigned long hw_flags;
if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
test_bit(QL_RESET_START, &qdev->flags)) {
clear_bit(QL_LINK_MASTER, &qdev->flags);
/*

View File

@ -4531,7 +4531,7 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "TSO feature enabled\n");
}
if (priv->dma_cap.sphen) {
if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
ndev->hw_features |= NETIF_F_GRO;
priv->sph = true;
dev_info(priv->device, "SPH feature enabled\n");

View File

@ -119,6 +119,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->has_gmac4 = 1;
plat->force_sf_dma_mode = 0;
plat->tso_en = 1;
plat->sph_disable = 1;
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
@ -481,7 +482,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
/* Enable pci device */
ret = pci_enable_device(pdev);
ret = pcim_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
__func__);
@ -538,8 +539,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
pcim_iounmap_regions(pdev, BIT(i));
break;
}
pci_disable_device(pdev);
}
static int __maybe_unused stmmac_pci_suspend(struct device *dev)

View File

@ -595,6 +595,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
dev_kfree_skb_any(rbi->skb);
rbi->skb = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@ -619,6 +620,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
put_page(rbi->page);
rbi->page = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@ -1584,6 +1586,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
u32 i, ring_idx;
struct Vmxnet3_RxDesc *rxd;
/* ring has already been cleaned up */
if (!rq->rx_ring[0].base)
return;
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
#ifdef __BIG_ENDIAN_BITFIELD

View File

@ -4047,6 +4047,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
nvme_mpath_update(ctrl);
}
ctrl->created = true;
}

View File

@ -501,8 +501,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
if (nvme_state_is_live(ns->ana_state))
/*
* nvme_mpath_set_live() will trigger I/O to the multipath path device
* and in turn to this path device. However we cannot accept this I/O
* if the controller is not live. This may deadlock if called from
* nvme_mpath_init_identify() and the ctrl will never complete
* initialization, preventing I/O from completing. For this case we
* will reprocess the ANA log page in nvme_mpath_update() once the
* controller is ready.
*/
if (nvme_state_is_live(ns->ana_state) &&
ns->ctrl->state == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
}
@ -586,6 +595,18 @@ static void nvme_ana_work(struct work_struct *work)
nvme_read_ana_log(ctrl);
}
void nvme_mpath_update(struct nvme_ctrl *ctrl)
{
u32 nr_change_groups = 0;
if (!ctrl->ana_log_buf)
return;
mutex_lock(&ctrl->ana_lock);
nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
mutex_unlock(&ctrl->ana_lock);
}
static void nvme_anatt_timeout(struct timer_list *t)
{
struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);

View File

@ -551,6 +551,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_mpath_update(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
@ -648,6 +649,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0;
}
static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
{
}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
}

View File

@ -2614,6 +2614,16 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
},
/*
* Downstream device is not accessible after putting a root port
* into D3cold and back into D0 on Elo i2.
*/
.ident = "Elo i2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
},
},
#endif
{ }

View File

@ -26,6 +26,9 @@
#define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1))
/* waitqueue for log readers */
static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
/**
* struct cros_ec_debugfs - EC debugging information.
*
@ -34,7 +37,6 @@
* @log_buffer: circular buffer for console log information
* @read_msg: preallocated EC command and buffer to read console log
* @log_mutex: mutex to protect circular buffer
* @log_wq: waitqueue for log readers
* @log_poll_work: recurring task to poll EC for new console log data
* @panicinfo_blob: panicinfo debugfs blob
*/
@ -45,7 +47,6 @@ struct cros_ec_debugfs {
struct circ_buf log_buffer;
struct cros_ec_command *read_msg;
struct mutex log_mutex;
wait_queue_head_t log_wq;
struct delayed_work log_poll_work;
/* EC panicinfo */
struct debugfs_blob_wrapper panicinfo_blob;
@ -108,7 +109,7 @@ static void cros_ec_console_log_work(struct work_struct *__work)
buf_space--;
}
wake_up(&debug_info->log_wq);
wake_up(&cros_ec_debugfs_log_wq);
}
mutex_unlock(&debug_info->log_mutex);
@ -142,7 +143,7 @@ static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf,
mutex_unlock(&debug_info->log_mutex);
ret = wait_event_interruptible(debug_info->log_wq,
ret = wait_event_interruptible(cros_ec_debugfs_log_wq,
CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
if (ret < 0)
return ret;
@ -174,7 +175,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file,
struct cros_ec_debugfs *debug_info = file->private_data;
__poll_t mask = 0;
poll_wait(file, &debug_info->log_wq, wait);
poll_wait(file, &cros_ec_debugfs_log_wq, wait);
mutex_lock(&debug_info->log_mutex);
if (CIRC_CNT(debug_info->log_buffer.head,
@ -359,7 +360,6 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
debug_info->log_buffer.tail = 0;
mutex_init(&debug_info->log_mutex);
init_waitqueue_head(&debug_info->log_wq);
debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
debug_info, &cros_ec_console_log_fops);

View File

@ -26,6 +26,15 @@ struct class *rtc_class;
static void rtc_device_release(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
struct timerqueue_head *head = &rtc->timerqueue;
struct timerqueue_node *node;
mutex_lock(&rtc->ops_lock);
while ((node = timerqueue_getnext(head)))
timerqueue_del(head, node);
mutex_unlock(&rtc->ops_lock);
cancel_work_sync(&rtc->irqwork);
ida_simple_remove(&rtc_ida, rtc->id);
kfree(rtc);

View File

@ -99,6 +99,17 @@ unsigned int mc146818_get_time(struct rtc_time *time)
}
EXPORT_SYMBOL_GPL(mc146818_get_time);
/* AMD systems don't allow access to AltCentury with DV1 */
static bool apply_amd_register_a_behavior(void)
{
#ifdef CONFIG_X86
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
return true;
#endif
return false;
}
/* Set the current date and time in the real time clock. */
int mc146818_set_time(struct rtc_time *time)
{
@ -172,7 +183,10 @@ int mc146818_set_time(struct rtc_time *time)
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
if (apply_amd_register_a_behavior())
CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT);
else
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);

View File

@ -3768,6 +3768,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&cmd->cmd_lock, flags);
if (cmd->aborted) {
if (cmd->sg_mapped)
qlt_unmap_sg(vha, cmd);
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/*
* It's normal to see 2 calls in this path:

View File

@ -1446,13 +1446,9 @@ static struct socket *get_raw_socket(int fd)
return ERR_PTR(r);
}
static struct ptr_ring *get_tap_ptr_ring(int fd)
static struct ptr_ring *get_tap_ptr_ring(struct file *file)
{
struct ptr_ring *ring;
struct file *file = fget(fd);
if (!file)
return NULL;
ring = tun_get_tx_ring(file);
if (!IS_ERR(ring))
goto out;
@ -1461,7 +1457,6 @@ static struct ptr_ring *get_tap_ptr_ring(int fd)
goto out;
ring = NULL;
out:
fput(file);
return ring;
}
@ -1548,8 +1543,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
r = vhost_net_enable_vq(n, vq);
if (r)
goto err_used;
if (index == VHOST_NET_VQ_RX)
nvq->rx_ring = get_tap_ptr_ring(fd);
if (index == VHOST_NET_VQ_RX) {
if (sock)
nvq->rx_ring = get_tap_ptr_ring(sock->file);
else
nvq->rx_ring = NULL;
}
oldubufs = nvq->ubufs;
nvq->ubufs = ubufs;

View File

@ -734,10 +734,22 @@ int afs_getattr(const struct path *path, struct kstat *stat,
{
struct inode *inode = d_inode(path->dentry);
struct afs_vnode *vnode = AFS_FS_I(inode);
int seq = 0;
struct key *key;
int ret, seq = 0;
_enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
if (!(query_flags & AT_STATX_DONT_SYNC) &&
!test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
key = afs_request_key(vnode->volume->cell);
if (IS_ERR(key))
return PTR_ERR(key);
ret = afs_validate(vnode, key);
key_put(key);
if (ret < 0)
return ret;
}
do {
read_seqbegin_or_lock(&vnode->cb_lock, &seq);
generic_fillattr(inode, stat);

View File

@ -375,6 +375,7 @@ void __fput_sync(struct file *file)
}
EXPORT_SYMBOL(fput);
EXPORT_SYMBOL(__fput_sync);
void __init files_init(void)
{

View File

@ -20,6 +20,23 @@
#include "page.h"
#include "btnode.h"
/**
* nilfs_init_btnc_inode - initialize B-tree node cache inode
* @btnc_inode: inode to be initialized
*
* nilfs_init_btnc_inode() sets up an inode for B-tree node cache.
*/
void nilfs_init_btnc_inode(struct inode *btnc_inode)
{
struct nilfs_inode_info *ii = NILFS_I(btnc_inode);
btnc_inode->i_mode = S_IFREG;
ii->i_flags = 0;
memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
}
void nilfs_btnode_cache_clear(struct address_space *btnc)
{
invalidate_mapping_pages(btnc, 0, -1);
@ -29,7 +46,7 @@ void nilfs_btnode_cache_clear(struct address_space *btnc)
struct buffer_head *
nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
{
struct inode *inode = NILFS_BTNC_I(btnc);
struct inode *inode = btnc->host;
struct buffer_head *bh;
bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
@ -57,7 +74,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
struct buffer_head **pbh, sector_t *submit_ptr)
{
struct buffer_head *bh;
struct inode *inode = NILFS_BTNC_I(btnc);
struct inode *inode = btnc->host;
struct page *page;
int err;
@ -157,7 +174,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
{
struct buffer_head *obh, *nbh;
struct inode *inode = NILFS_BTNC_I(btnc);
struct inode *inode = btnc->host;
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
int err;

View File

@ -30,6 +30,7 @@ struct nilfs_btnode_chkey_ctxt {
struct buffer_head *newbh;
};
void nilfs_init_btnc_inode(struct inode *btnc_inode);
void nilfs_btnode_cache_clear(struct address_space *);
struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
__u64 blocknr);

View File

@ -58,7 +58,8 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path)
static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
__u64 ptr, struct buffer_head **bhp)
{
struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
struct address_space *btnc = btnc_inode->i_mapping;
struct buffer_head *bh;
bh = nilfs_btnode_create_block(btnc, ptr);
@ -470,7 +471,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
struct buffer_head **bhp,
const struct nilfs_btree_readahead_info *ra)
{
struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
struct address_space *btnc = btnc_inode->i_mapping;
struct buffer_head *bh, *ra_bh;
sector_t submit_ptr = 0;
int ret;
@ -1742,6 +1744,10 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key,
dat = nilfs_bmap_get_dat(btree);
}
ret = nilfs_attach_btree_node_cache(&NILFS_BMAP_I(btree)->vfs_inode);
if (ret < 0)
return ret;
ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat);
if (ret < 0)
return ret;
@ -1914,7 +1920,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree,
path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
path[level].bp_ctxt.bh = path[level].bp_bh;
ret = nilfs_btnode_prepare_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
if (ret < 0) {
nilfs_dat_abort_update(dat,
@ -1940,7 +1946,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree,
if (buffer_nilfs_node(path[level].bp_bh)) {
nilfs_btnode_commit_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
path[level].bp_bh = path[level].bp_ctxt.bh;
}
@ -1959,7 +1965,7 @@ static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree,
&path[level].bp_newreq.bpr_req);
if (buffer_nilfs_node(path[level].bp_bh))
nilfs_btnode_abort_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
}
@ -2135,7 +2141,8 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
struct list_head *listp)
{
struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache;
struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
struct address_space *btcache = btnc_inode->i_mapping;
struct list_head lists[NILFS_BTREE_LEVEL_MAX];
struct pagevec pvec;
struct buffer_head *bh, *head;
@ -2189,12 +2196,12 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
path[level].bp_ctxt.newkey = blocknr;
path[level].bp_ctxt.bh = *bh;
ret = nilfs_btnode_prepare_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
if (ret < 0)
return ret;
nilfs_btnode_commit_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
*bh = path[level].bp_ctxt.bh;
}
@ -2399,6 +2406,10 @@ int nilfs_btree_init(struct nilfs_bmap *bmap)
if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode))
ret = -EIO;
else
ret = nilfs_attach_btree_node_cache(
&NILFS_BMAP_I(bmap)->vfs_inode);
return ret;
}

View File

@ -497,7 +497,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
di = NILFS_DAT_I(dat);
lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
nilfs_palloc_setup_cache(dat, &di->palloc_cache);
nilfs_mdt_setup_shadow_map(dat, &di->shadow);
err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
if (err)
goto failed;
err = nilfs_read_inode_common(dat, raw_inode);
if (err)

View File

@ -126,9 +126,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
__u64 vbn, struct buffer_head **out_bh)
{
struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode;
int ret;
ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
ret = nilfs_btnode_submit_block(btnc_inode->i_mapping,
vbn ? : pbn, pbn, REQ_OP_READ, 0,
out_bh, &pbn);
if (ret == -EEXIST) /* internal code (cache hit) */
@ -170,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode)
ii->i_flags = 0;
nilfs_bmap_init_gc(ii->i_bmap);
return 0;
return nilfs_attach_btree_node_cache(inode);
}
/**
@ -185,7 +186,7 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
list_del_init(&ii->i_dirty);
truncate_inode_pages(&ii->vfs_inode.i_data, 0);
nilfs_btnode_cache_clear(&ii->i_btnode_cache);
nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
iput(&ii->vfs_inode);
}
}

View File

@ -28,12 +28,16 @@
* @cno: checkpoint number
* @root: pointer on NILFS root object (mounted checkpoint)
* @for_gc: inode for GC flag
* @for_btnc: inode for B-tree node cache flag
* @for_shadow: inode for shadowed page cache flag
*/
struct nilfs_iget_args {
u64 ino;
__u64 cno;
struct nilfs_root *root;
int for_gc;
bool for_gc;
bool for_btnc;
bool for_shadow;
};
static int nilfs_iget_test(struct inode *inode, void *opaque);
@ -322,7 +326,8 @@ static int nilfs_insert_inode_locked(struct inode *inode,
unsigned long ino)
{
struct nilfs_iget_args args = {
.ino = ino, .root = root, .cno = 0, .for_gc = 0
.ino = ino, .root = root, .cno = 0, .for_gc = false,
.for_btnc = false, .for_shadow = false
};
return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
@ -534,6 +539,19 @@ static int nilfs_iget_test(struct inode *inode, void *opaque)
return 0;
ii = NILFS_I(inode);
if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
if (!args->for_btnc)
return 0;
} else if (args->for_btnc) {
return 0;
}
if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
if (!args->for_shadow)
return 0;
} else if (args->for_shadow) {
return 0;
}
if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
return !args->for_gc;
@ -545,15 +563,17 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
struct nilfs_iget_args *args = opaque;
inode->i_ino = args->ino;
if (args->for_gc) {
NILFS_I(inode)->i_cno = args->cno;
NILFS_I(inode)->i_root = args->root;
if (args->root && args->ino == NILFS_ROOT_INO)
nilfs_get_root(args->root);
if (args->for_gc)
NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
NILFS_I(inode)->i_cno = args->cno;
NILFS_I(inode)->i_root = NULL;
} else {
if (args->root && args->ino == NILFS_ROOT_INO)
nilfs_get_root(args->root);
NILFS_I(inode)->i_root = args->root;
}
if (args->for_btnc)
NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
if (args->for_shadow)
NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
return 0;
}
@ -561,7 +581,8 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
unsigned long ino)
{
struct nilfs_iget_args args = {
.ino = ino, .root = root, .cno = 0, .for_gc = 0
.ino = ino, .root = root, .cno = 0, .for_gc = false,
.for_btnc = false, .for_shadow = false
};
return ilookup5(sb, ino, nilfs_iget_test, &args);
@ -571,7 +592,8 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
unsigned long ino)
{
struct nilfs_iget_args args = {
.ino = ino, .root = root, .cno = 0, .for_gc = 0
.ino = ino, .root = root, .cno = 0, .for_gc = false,
.for_btnc = false, .for_shadow = false
};
return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
@ -602,7 +624,8 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
__u64 cno)
{
struct nilfs_iget_args args = {
.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
.ino = ino, .root = NULL, .cno = cno, .for_gc = true,
.for_btnc = false, .for_shadow = false
};
struct inode *inode;
int err;
@ -622,6 +645,113 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
return inode;
}
/**
* nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
* @inode: inode object
*
* nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
* or does nothing if the inode already has it. This function allocates
* an additional inode to maintain page cache of B-tree nodes one-on-one.
*
* Return Value: On success, 0 is returned. On errors, one of the following
* negative error code is returned.
*
* %-ENOMEM - Insufficient memory available.
*/
int nilfs_attach_btree_node_cache(struct inode *inode)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
struct inode *btnc_inode;
struct nilfs_iget_args args;
if (ii->i_assoc_inode)
return 0;
args.ino = inode->i_ino;
args.root = ii->i_root;
args.cno = ii->i_cno;
args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
args.for_btnc = true;
args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
nilfs_iget_set, &args);
if (unlikely(!btnc_inode))
return -ENOMEM;
if (btnc_inode->i_state & I_NEW) {
nilfs_init_btnc_inode(btnc_inode);
unlock_new_inode(btnc_inode);
}
NILFS_I(btnc_inode)->i_assoc_inode = inode;
NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
ii->i_assoc_inode = btnc_inode;
return 0;
}
/**
* nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
* @inode: inode object
*
* nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
* holder inode bound to @inode, or does nothing if @inode doesn't have it.
*/
void nilfs_detach_btree_node_cache(struct inode *inode)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
struct inode *btnc_inode = ii->i_assoc_inode;
if (btnc_inode) {
NILFS_I(btnc_inode)->i_assoc_inode = NULL;
ii->i_assoc_inode = NULL;
iput(btnc_inode);
}
}
/**
* nilfs_iget_for_shadow - obtain inode for shadow mapping
* @inode: inode object that uses shadow mapping
*
* nilfs_iget_for_shadow() allocates a pair of inodes that holds page
* caches for shadow mapping. The page cache for data pages is set up
* in one inode and the one for b-tree node pages is set up in the
* other inode, which is attached to the former inode.
*
* Return Value: On success, a pointer to the inode for data pages is
* returned. On errors, one of the following negative error code is returned
* in a pointer type.
*
* %-ENOMEM - Insufficient memory available.
*/
struct inode *nilfs_iget_for_shadow(struct inode *inode)
{
struct nilfs_iget_args args = {
.ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
.for_btnc = false, .for_shadow = true
};
struct inode *s_inode;
int err;
s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
nilfs_iget_set, &args);
if (unlikely(!s_inode))
return ERR_PTR(-ENOMEM);
if (!(s_inode->i_state & I_NEW))
return inode;
NILFS_I(s_inode)->i_flags = 0;
memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
err = nilfs_attach_btree_node_cache(s_inode);
if (unlikely(err)) {
iget_failed(s_inode);
return ERR_PTR(err);
}
unlock_new_inode(s_inode);
return s_inode;
}
void nilfs_write_inode_common(struct inode *inode,
struct nilfs_inode *raw_inode, int has_bmap)
{
@ -770,7 +900,8 @@ static void nilfs_clear_inode(struct inode *inode)
if (test_bit(NILFS_I_BMAP, &ii->i_state))
nilfs_bmap_clear(ii->i_bmap);
nilfs_btnode_cache_clear(&ii->i_btnode_cache);
if (!test_bit(NILFS_I_BTNC, &ii->i_state))
nilfs_detach_btree_node_cache(inode);
if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
nilfs_put_root(ii->i_root);

View File

@ -469,9 +469,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
void nilfs_mdt_clear(struct inode *inode)
{
struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
struct nilfs_shadow_map *shadow = mdi->mi_shadow;
if (mdi->mi_palloc_cache)
nilfs_palloc_destroy_cache(inode);
if (shadow) {
struct inode *s_inode = shadow->inode;
shadow->inode = NULL;
iput(s_inode);
mdi->mi_shadow = NULL;
}
}
/**
@ -505,12 +514,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
struct nilfs_shadow_map *shadow)
{
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct inode *s_inode;
INIT_LIST_HEAD(&shadow->frozen_buffers);
address_space_init_once(&shadow->frozen_data);
nilfs_mapping_init(&shadow->frozen_data, inode);
address_space_init_once(&shadow->frozen_btnodes);
nilfs_mapping_init(&shadow->frozen_btnodes, inode);
s_inode = nilfs_iget_for_shadow(inode);
if (IS_ERR(s_inode))
return PTR_ERR(s_inode);
shadow->inode = s_inode;
mi->mi_shadow = shadow;
return 0;
}
@ -524,14 +536,15 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode)
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct nilfs_inode_info *ii = NILFS_I(inode);
struct nilfs_shadow_map *shadow = mi->mi_shadow;
struct inode *s_inode = shadow->inode;
int ret;
ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
if (ret)
goto out;
ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
&ii->i_btnode_cache);
ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
ii->i_assoc_inode->i_mapping);
if (ret)
goto out;
@ -547,7 +560,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
struct page *page;
int blkbits = inode->i_blkbits;
page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
page = grab_cache_page(shadow->inode->i_mapping, bh->b_page->index);
if (!page)
return -ENOMEM;
@ -579,7 +592,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
struct page *page;
int n;
page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
page = find_lock_page(shadow->inode->i_mapping, bh->b_page->index);
if (page) {
if (page_has_buffers(page)) {
n = bh_offset(bh) >> inode->i_blkbits;
@ -620,10 +633,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
nilfs_palloc_clear_cache(inode);
nilfs_clear_dirty_pages(inode->i_mapping, true);
nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
nilfs_clear_dirty_pages(&ii->i_btnode_cache, true);
nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true);
nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
@ -638,10 +652,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode)
{
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct nilfs_shadow_map *shadow = mi->mi_shadow;
struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
down_write(&mi->mi_sem);
nilfs_release_frozen_buffers(shadow);
truncate_inode_pages(&shadow->frozen_data, 0);
truncate_inode_pages(&shadow->frozen_btnodes, 0);
truncate_inode_pages(shadow->inode->i_mapping, 0);
truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
up_write(&mi->mi_sem);
}

View File

@ -18,14 +18,12 @@
/**
* struct nilfs_shadow_map - shadow mapping of meta data file
* @bmap_store: shadow copy of bmap state
* @frozen_data: shadowed dirty data pages
* @frozen_btnodes: shadowed dirty b-tree nodes' pages
* @inode: holder of page caches used in shadow mapping
* @frozen_buffers: list of frozen buffers
*/
struct nilfs_shadow_map {
struct nilfs_bmap_store bmap_store;
struct address_space frozen_data;
struct address_space frozen_btnodes;
struct inode *inode;
struct list_head frozen_buffers;
};

View File

@ -28,7 +28,7 @@
* @i_xattr: <TODO>
* @i_dir_start_lookup: page index of last successful search
* @i_cno: checkpoint number for GC inode
* @i_btnode_cache: cached pages of b-tree nodes
* @i_assoc_inode: associated inode (B-tree node cache holder or back pointer)
* @i_dirty: list for connecting dirty files
* @xattr_sem: semaphore for extended attributes processing
* @i_bh: buffer contains disk inode
@ -43,7 +43,7 @@ struct nilfs_inode_info {
__u64 i_xattr; /* sector_t ??? */
__u32 i_dir_start_lookup;
__u64 i_cno; /* check point number for GC inode */
struct address_space i_btnode_cache;
struct inode *i_assoc_inode;
struct list_head i_dirty; /* List for connecting dirty files */
#ifdef CONFIG_NILFS_XATTR
@ -75,13 +75,6 @@ NILFS_BMAP_I(const struct nilfs_bmap *bmap)
return container_of(bmap, struct nilfs_inode_info, i_bmap_data);
}
static inline struct inode *NILFS_BTNC_I(struct address_space *btnc)
{
struct nilfs_inode_info *ii =
container_of(btnc, struct nilfs_inode_info, i_btnode_cache);
return &ii->vfs_inode;
}
/*
* Dynamic state flags of NILFS on-memory inode (i_state)
*/
@ -98,6 +91,8 @@ enum {
NILFS_I_INODE_SYNC, /* dsync is not allowed for inode */
NILFS_I_BMAP, /* has bmap and btnode_cache */
NILFS_I_GCINODE, /* inode for GC, on memory only */
NILFS_I_BTNC, /* inode for btree node cache */
NILFS_I_SHADOW, /* inode for shadowed page cache */
};
/*
@ -264,6 +259,9 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
unsigned long ino);
extern struct inode *nilfs_iget_for_gc(struct super_block *sb,
unsigned long ino, __u64 cno);
int nilfs_attach_btree_node_cache(struct inode *inode);
void nilfs_detach_btree_node_cache(struct inode *inode);
struct inode *nilfs_iget_for_shadow(struct inode *inode);
extern void nilfs_update_inode(struct inode *, struct buffer_head *, int);
extern void nilfs_truncate(struct inode *);
extern void nilfs_evict_inode(struct inode *);

View File

@ -450,10 +450,9 @@ void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
/*
* NILFS2 needs clear_page_dirty() in the following two cases:
*
* 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
* page dirty flags when it copies back pages from the shadow cache
* (gcdat->{i_mapping,i_btnode_cache}) to its original cache
* (dat->{i_mapping,i_btnode_cache}).
* 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
* flag of pages when it copies back pages from shadow cache to the
* original cache.
*
* 2) Some B-tree operations like insertion or deletion may dispose buffers
* in dirty state, and this needs to cancel the dirty state of their pages.

View File

@ -738,15 +738,18 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
struct list_head *listp)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
struct address_space *mapping = &ii->i_btnode_cache;
struct inode *btnc_inode = ii->i_assoc_inode;
struct pagevec pvec;
struct buffer_head *bh, *head;
unsigned int i;
pgoff_t index = 0;
if (!btnc_inode)
return;
pagevec_init(&pvec);
while (pagevec_lookup_tag(&pvec, mapping, &index,
while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index,
PAGECACHE_TAG_DIRTY)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
bh = head = page_buffers(pvec.pages[i]);
@ -2410,7 +2413,7 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
continue;
list_del_init(&ii->i_dirty);
truncate_inode_pages(&ii->vfs_inode.i_data, 0);
nilfs_btnode_cache_clear(&ii->i_btnode_cache);
nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
iput(&ii->vfs_inode);
}
}

View File

@ -151,7 +151,8 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
ii->i_bh = NULL;
ii->i_state = 0;
ii->i_cno = 0;
nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode);
ii->i_assoc_inode = NULL;
ii->i_bmap = &ii->i_bmap_data;
return &ii->vfs_inode;
}
@ -1375,8 +1376,6 @@ static void nilfs_inode_init_once(void *obj)
#ifdef CONFIG_NILFS_XATTR
init_rwsem(&ii->xattr_sem);
#endif
address_space_init_once(&ii->i_btnode_cache);
ii->i_bmap = &ii->i_bmap_data;
inode_init_once(&ii->vfs_inode);
}

View File

@ -1411,6 +1411,22 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
return offset << SECTOR_SHIFT;
}
/*
* Two cases of handling DISCARD merge:
* If max_discard_segments > 1, the driver takes every bio
* as a range and send them to controller together. The ranges
* needn't to be contiguous.
* Otherwise, the bios/requests will be handled as same as
* others which should be contiguous.
*/
static inline bool blk_discard_mergable(struct request *req)
{
if (req_op(req) == REQ_OP_DISCARD &&
queue_max_discard_segments(req->q) > 1)
return true;
return false;
}
static inline int bdev_discard_alignment(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);

View File

@ -71,14 +71,6 @@
*/
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/*
* This is a hint to the DMA-mapping subsystem that the device is expected
* to overwrite the entire mapped size, thus the caller does not require any
* of the previous buffer contents to be preserved. This allows
* bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers.
*/
#define DMA_ATTR_OVERWRITE (1UL << 10)
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform.
* It can be given to a device to use as a DMA source or target. A CPU cannot

View File

@ -86,6 +86,8 @@ struct cmos_rtc_board_info {
/* 2 values for divider stage reset, others for "testing purposes only" */
# define RTC_DIV_RESET1 0x60
# define RTC_DIV_RESET2 0x70
/* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */
# define RTC_AMD_BANK_SELECT 0x10
/* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
# define RTC_RATE_SELECT 0x0F

View File

@ -179,5 +179,6 @@ struct plat_stmmacenet_data {
int mac_port_sel_speed;
bool en_tx_lpi_clockgating;
int has_xgmac;
bool sph_disable;
};
#endif

View File

@ -90,6 +90,7 @@ struct sock_xprt {
#define XPRT_SOCK_WAKE_WRITE (5)
#define XPRT_SOCK_WAKE_PENDING (6)
#define XPRT_SOCK_WAKE_DISCONNECT (7)
#define XPRT_SOCK_CONNECT_SENT (8)
#endif /* __KERNEL__ */

View File

@ -44,7 +44,7 @@ struct dma_buf_sync {
* between them in actual uapi, they're just different numbers.
*/
#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *)
#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32)
#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64)
#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32)
#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64)
#endif

View File

@ -571,11 +571,14 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
*/
for (i = 0; i < nslots; i++)
io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
(!(attrs & DMA_ATTR_OVERWRITE) || dir == DMA_TO_DEVICE ||
dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
/*
* When dir == DMA_FROM_DEVICE we could omit the copy from the orig
* to the tlb buffer, if we knew for sure the device will
* overwirte the entire current content. But we don't. Thus
* unconditional bounce may prevent leaking swiotlb content (i.e.
* kernel memory) to user-space.
*/
swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
return tlb_addr;
}

View File

@ -11167,6 +11167,9 @@ SYSCALL_DEFINE5(perf_event_open,
* Do not allow to attach to a group in a different task
* or CPU context. If we're moving SW events, we'll fix
* this up later, so allow that.
*
* Racy, not holding group_leader->ctx->mutex, see comment with
* perf_event_ctx_lock().
*/
if (!move_group && group_leader->ctx != ctx)
goto err_context;
@ -11216,6 +11219,7 @@ SYSCALL_DEFINE5(perf_event_open,
} else {
perf_event_ctx_unlock(group_leader, gctx);
move_group = 0;
goto not_move_group;
}
}
@ -11232,7 +11236,17 @@ SYSCALL_DEFINE5(perf_event_open,
}
} else {
mutex_lock(&ctx->mutex);
/*
* Now that we hold ctx->lock, (re)validate group_leader->ctx == ctx,
* see the group_leader && !move_group test earlier.
*/
if (group_leader && group_leader->ctx != ctx) {
err = -EINVAL;
goto err_locked;
}
}
not_move_group:
if (ctx->task == TASK_TOMBSTONE) {
err = -ESRCH;

View File

@ -42,6 +42,13 @@ static int br_pass_frame_up(struct sk_buff *skb)
u64_stats_update_end(&brstats->syncp);
vg = br_vlan_group_rcu(br);
/* Reset the offload_fwd_mark because there could be a stacked
* bridge above, and it should not think this bridge it doing
* that bridge's work forwarding out its ports.
*/
br_switchdev_frame_unmark(skb);
/* Bridge is just like any other port. Make sure the
* packet is allowed except in promisc modue when someone
* may be running packet capture.

View File

@ -2830,8 +2830,10 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
void *ext_hdrs[SADB_EXT_MAX];
int err;
pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
err = pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
if (err)
return err;
memset(ext_hdrs, 0, sizeof(ext_hdrs));
err = parse_exthdrs(skb, hdr, ext_hdrs);

View File

@ -1400,8 +1400,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
goto dont_reorder;
/* not part of a BA session */
if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
goto dont_reorder;
/* new, potentially un-ordered, ampdu frame - process it */

View File

@ -118,7 +118,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
skb_frag = nci_skb_alloc(ndev,
(NCI_DATA_HDR_SIZE + frag_len),
GFP_KERNEL);
GFP_ATOMIC);
if (skb_frag == NULL) {
rc = -ENOMEM;
goto free_exit;

View File

@ -153,7 +153,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
i = 0;
skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len +
NCI_DATA_HDR_SIZE, GFP_KERNEL);
NCI_DATA_HDR_SIZE, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
@ -186,7 +186,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
if (i < data_len) {
skb = nci_skb_alloc(ndev,
conn_info->max_pkt_payload_len +
NCI_DATA_HDR_SIZE, GFP_KERNEL);
NCI_DATA_HDR_SIZE, GFP_ATOMIC);
if (!skb)
return -ENOMEM;

View File

@ -231,6 +231,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
for (i = 0; i < p->tcfp_nkeys; ++i) {
u32 cur = p->tcfp_keys[i].off;
/* sanitize the shift value for any later use */
p->tcfp_keys[i].shift = min_t(size_t, BITS_PER_TYPE(int) - 1,
p->tcfp_keys[i].shift);
/* The AT option can read a single byte, we can bound the actual
* value with uchar max.
*/

View File

@ -716,6 +716,21 @@ void xprt_disconnect_done(struct rpc_xprt *xprt)
}
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
/**
* xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call
* @xprt: transport to disconnect
*/
static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
{
if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
return;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
rpc_wake_up_queued_task_set_status(&xprt->pending,
xprt->snd_task, -ENOTCONN);
}
/**
* xprt_force_disconnect - force a transport to disconnect
* @xprt: transport to disconnect
@ -725,13 +740,7 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
{
/* Don't race with the test_bit() in xprt_clear_locked() */
spin_lock(&xprt->transport_lock);
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
/* Try to schedule an autoclose RPC call */
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
rpc_wake_up_queued_task_set_status(&xprt->pending,
xprt->snd_task, -ENOTCONN);
xprt_schedule_autoclose_locked(xprt);
spin_unlock(&xprt->transport_lock);
}
EXPORT_SYMBOL_GPL(xprt_force_disconnect);
@ -771,11 +780,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
goto out;
if (test_bit(XPRT_CLOSING, &xprt->state))
goto out;
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
/* Try to schedule an autoclose RPC call */
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
xprt_wake_pending_tasks(xprt, -EAGAIN);
xprt_schedule_autoclose_locked(xprt);
out:
spin_unlock(&xprt->transport_lock);
}
@ -863,10 +868,7 @@ void xprt_connect(struct rpc_task *task)
if (!xprt_lock_write(xprt, task))
return;
if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
xprt->ops->close(xprt);
if (!xprt_connected(xprt)) {
if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
rpc_sleep_on_timeout(&xprt->pending, task, NULL,
xprt_request_timeout(task->tk_rqstp));

View File

@ -989,7 +989,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
/* Close the stream if the previous transmission was incomplete */
if (xs_send_request_was_aborted(transport, req)) {
xs_close(xprt);
xprt_force_disconnect(xprt);
return -ENOTCONN;
}
@ -1027,7 +1027,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
-status);
/* fall through */
case -EPIPE:
xs_close(xprt);
xprt_force_disconnect(xprt);
status = -ENOTCONN;
}
@ -1303,6 +1303,16 @@ static void xs_reset_transport(struct sock_xprt *transport)
if (sk == NULL)
return;
/*
* Make sure we're calling this in a context from which it is safe
* to call __fput_sync(). In practice that means rpciod and the
* system workqueue.
*/
if (!(current->flags & PF_WQ_WORKER)) {
WARN_ON_ONCE(1);
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
return;
}
if (atomic_read(&transport->xprt.swapper))
sk_clear_memalloc(sk);
@ -1326,7 +1336,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
mutex_unlock(&transport->recv_mutex);
trace_rpc_socket_close(xprt, sock);
fput(filp);
__fput_sync(filp);
xprt_disconnect_done(xprt);
}
@ -2384,10 +2394,14 @@ static void xs_tcp_setup_socket(struct work_struct *work)
struct rpc_xprt *xprt = &transport->xprt;
int status = -EIO;
if (!sock) {
sock = xs_create_sock(xprt, transport,
xs_addr(xprt)->sa_family, SOCK_STREAM,
IPPROTO_TCP, true);
if (xprt_connected(xprt))
goto out;
if (test_and_clear_bit(XPRT_SOCK_CONNECT_SENT,
&transport->sock_state) ||
!sock) {
xs_reset_transport(transport);
sock = xs_create_sock(xprt, transport, xs_addr(xprt)->sa_family,
SOCK_STREAM, IPPROTO_TCP, true);
if (IS_ERR(sock)) {
status = PTR_ERR(sock);
goto out;
@ -2418,6 +2432,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
break;
case 0:
case -EINPROGRESS:
set_bit(XPRT_SOCK_CONNECT_SENT, &transport->sock_state);
fallthrough;
case -EALREADY:
xprt_unlock_connect(xprt, transport);
return;
@ -2471,11 +2487,7 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
if (transport->sock != NULL) {
dprintk("RPC: xs_connect delayed xprt %p for %lu "
"seconds\n",
xprt, xprt->reestablish_timeout / HZ);
/* Start by resetting any existing state */
xs_reset_transport(transport);
"seconds\n", xprt, xprt->reestablish_timeout / HZ);
delay = xprt_reconnect_delay(xprt);
xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO);

View File

@ -1088,7 +1088,8 @@ wavefront_send_sample (snd_wavefront_t *dev,
if (dataptr < data_end) {
__get_user (sample_short, dataptr);
if (get_user(sample_short, dataptr))
return -EFAULT;
dataptr += skip;
if (data_is_unsigned) { /* GUS ? */

View File

@ -10233,6 +10233,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),

View File

@ -144,6 +144,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
"usercopy_abort",
"machine_real_restart",
"rewind_stack_do_exit",
"cpu_bringup_and_idle",
};
if (!func)

View File

@ -1630,7 +1630,7 @@ static int __bench_numa(const char *name)
"GB/sec,", "total-speed", "GB/sec total speed");
if (g->p.show_details >= 2) {
char tname[14 + 2 * 10 + 1];
char tname[14 + 2 * 11 + 1];
struct thread_data *td;
for (p = 0; p < g->p.nr_proc; p++) {
for (t = 0; t < g->p.nr_threads; t++) {

View File

@ -757,10 +757,16 @@ ipv4_ping()
setup
set_sysctl net.ipv4.raw_l3mdev_accept=1 2>/dev/null
ipv4_ping_novrf
setup
set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
ipv4_ping_novrf
log_subsection "With VRF"
setup "yes"
ipv4_ping_vrf
setup "yes"
set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
ipv4_ping_vrf
}
################################################################################
@ -2005,10 +2011,16 @@ ipv6_ping()
log_subsection "No VRF"
setup
ipv6_ping_novrf
setup
set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
ipv6_ping_novrf
log_subsection "With VRF"
setup "yes"
ipv6_ping_vrf
setup "yes"
set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
ipv6_ping_vrf
}
################################################################################