This is the 5.10.76 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmF5BlMACgkQONu9yGCS
 aT4OIQ/+PY9YFEKTRlW/TDDqnc3KxpTsSDgpyEHzDwW/zqzTu9ja36suqz0SGu+S
 SyY9tp3g7A+Ty7m7Xd6UKFcrfb6+fWN3+YqglXqgQ/VH1rGOhZiVfOZ+yBsF1D3O
 hQZRhnxYn1IJRpvKdTiw57RvjwuZO82QlXpd2SCt0crfclN/HeTvUpTxq6bllE+H
 X5TANiN0NVJgVBQvoztXQS7I6oXcAeGCoXsQ4m7S28apKBgax2EXHewFIOrjq3ub
 SVFeD1oWcHjGPcln/kv8HNGAZXZivsARl+ag8BBWmtC59dZjH/niFEcq1qVmwjBj
 d3YcBUP1nfsY2PhyXjxaUQ7gdEfFieL8wM9mYBmGyWempVWSfGAPe/td0IR7NsR9
 hwmHs4VNFLhtzEZW8OSpCV1f7dNo1WZZshCrRotSZ9QEZP/TZ84oXFFLl72C0tUI
 4Jfx5Ll3xqBjBQdDthWb99Rg0ZBQVPox/jnZMBCxk3NXtLhREmjwP4Sx8Wf635xE
 kfCGb0HkgjrQiQX0A+AyPwFAOTTrTe3WDZ5aYB4r2FwFzhySQw5ol8SmOEsZt2om
 aAEPjmut8HoM+Ch2Q9NGZghZGyePiqyxNTh9dL7G3U+Wss2xpBzpm1loCpm8OHr3
 KxwqhQCdKNxfVuPjK49N6QDAgmuhqtOx6S1z0WcMUuaZ7EZkac8=
 =zASi
 -----END PGP SIGNATURE-----

Merge 5.10.76 into android12-5.10-lts

Changes in 5.10.76
	parisc: math-emu: Fix fall-through warnings
	xhci: add quirk for host controllers that don't update endpoint DCS
	io_uring: fix splice_fd_in checks backport typo
	arm: dts: vexpress-v2p-ca9: Fix the SMB unit-address
	ARM: dts: at91: sama5d2_som1_ek: disable ISC node by default
	block: decode QUEUE_FLAG_HCTX_ACTIVE in debugfs output
	xen/x86: prevent PVH type from getting clobbered
	drm/amdgpu/display: fix dependencies for DRM_AMD_DC_SI
	xtensa: xtfpga: use CONFIG_USE_OF instead of CONFIG_OF
	xtensa: xtfpga: Try software restart before simulating CPU reset
	NFSD: Keep existing listeners on portlist error
	netfilter: xt_IDLETIMER: fix panic that occurs when timer_type has garbage value
	dma-debug: fix sg checks in debug_dma_map_sg()
	ASoC: wm8960: Fix clock configuration on slave mode
	ice: fix getting UDP tunnel entry
	netfilter: ip6t_rt: fix rt0_hdr parsing in rt_mt6
	netfilter: ipvs: make global sysctl readonly in non-init netns
	lan78xx: select CRC32
	tcp: md5: Fix overlap between vrf and non-vrf keys
	ipv6: When forwarding count rx stats on the orig netdev
	net: dsa: lantiq_gswip: fix register definition
	NIOS2: irqflags: rename a redefined register name
	powerpc/smp: do not decrement idle task preempt count in CPU offline
	net: hns3: reset DWRR of unused tc to zero
	net: hns3: add limit ets dwrr bandwidth cannot be 0
	net: hns3: schedule the polling again when allocation fails
	net: hns3: fix vf reset workqueue cannot exit
	net: hns3: disable sriov before unload hclge layer
	net: stmmac: Fix E2E delay mechanism
	e1000e: Fix packet loss on Tiger Lake and later
	ice: Add missing E810 device ids
	drm/panel: ilitek-ili9881c: Fix sync for Feixin K101-IM2BYL02 panel
	net: enetc: fix ethtool counter name for PM0_TERR
	can: rcar_can: fix suspend/resume
	can: peak_usb: pcan_usb_fd_decode_status(): fix back to ERROR_ACTIVE state notification
	can: peak_pci: peak_pci_remove(): fix UAF
	can: isotp: isotp_sendmsg(): fix return error on FC timeout on TX path
	can: isotp: isotp_sendmsg(): add result check for wait_event_interruptible()
	can: j1939: j1939_tp_rxtimer(): fix errant alert in j1939_tp_rxtimer
	can: j1939: j1939_netdev_start(): fix UAF for rx_kref of j1939_priv
	can: j1939: j1939_xtp_rx_dat_one(): cancel session if receive TP.DT with error length
	can: j1939: j1939_xtp_rx_rts_session_new(): abort TP less than 9 bytes
	ceph: skip existing superblocks that are blocklisted or shut down when mounting
	ceph: fix handling of "meta" errors
	ocfs2: fix data corruption after conversion from inline format
	ocfs2: mount fails with buffer overflow in strlen
	userfaultfd: fix a race between writeprotect and exit_mmap()
	elfcore: correct reference to CONFIG_UML
	vfs: check fd has read access in kernel_read_file_from_fd()
	ALSA: usb-audio: Provide quirk for Sennheiser GSP670 Headset
	ALSA: hda/realtek: Add quirk for Clevo PC50HS
	ASoC: DAPM: Fix missing kctl change notifications
	audit: fix possible null-pointer dereference in audit_filter_rules
	net: dsa: mt7530: correct ds->num_ports
	powerpc64/idle: Fix SP offsets when saving GPRs
	KVM: PPC: Book3S HV: Fix stack handling in idle_kvm_start_guest()
	KVM: PPC: Book3S HV: Make idle_kvm_start_guest() return 0 if it went to guest
	powerpc/idle: Don't corrupt back chain when going idle
	mm, slub: fix mismatch between reconstructed freelist depth and cnt
	mm, slub: fix potential memoryleak in kmem_cache_open()
	mm, slub: fix incorrect memcg slab count for bulk free
	KVM: nVMX: promptly process interrupts delivered while in guest mode
	nfc: nci: fix the UAF of rf_conn_info object
	isdn: cpai: check ctr->cnr to avoid array index out of bound
	netfilter: Kconfig: use 'default y' instead of 'm' for bool config option
	selftests: netfilter: remove stray bash debug line
	net: bridge: mcast: use multicast_membership_interval for IGMPv3
	drm: mxsfb: Fix NULL pointer dereference crash on unload
	net: hns3: fix the max tx size according to user manual
	gcc-plugins/structleak: add makefile var for disabling structleak
	ALSA: hda: intel: Allow repeatedly probing on codec configuration errors
	btrfs: deal with errors when checking if a dir entry exists during log replay
	net: stmmac: add support for dwmac 3.40a
	ARM: dts: spear3xx: Fix gmac node
	isdn: mISDN: Fix sleeping function called from invalid context
	platform/x86: intel_scu_ipc: Update timeout value in comment
	ALSA: hda: avoid write to STATESTS if controller is in reset
	libperf tests: Fix test_stat_cpu
	perf/x86/msr: Add Sapphire Rapids CPU support
	Input: snvs_pwrkey - add clk handling
	scsi: iscsi: Fix set_param() handling
	scsi: qla2xxx: Fix a memory leak in an error path of qla2x00_process_els()
	sched/scs: Reset the shadow stack when idle_task_exit
	net: hns3: fix for miscalculation of rx unused desc
	scsi: core: Fix shost->cmd_per_lun calculation in scsi_add_host_with_dma()
	can: isotp: isotp_sendmsg(): fix TX buffer concurrent access in isotp_sendmsg()
	s390/pci: fix zpci_zdev_put() on reserve
	bpf, test, cgroup: Use sk_{alloc,free} for test cases
	net: mdiobus: Fix memory leak in __mdiobus_register
	tracing: Have all levels of checks prevent recursion
	e1000e: Separate TGP board type from SPT
	selftests: bpf: fix backported ASSERT_FALSE
	ARM: 9122/1: select HAVE_FUTEX_CMPXCHG
	pinctrl: stm32: use valid pin identifier in stm32_pinctrl_resume()
	Linux 5.10.76

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ia2eae7445f275464721daabb414beadf1e244c56
This commit is contained in:
Greg Kroah-Hartman 2021-10-27 10:43:17 +02:00
commit 4944ec82eb
116 changed files with 987 additions and 477 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 75
SUBLEVEL = 76
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -88,6 +88,7 @@ config ARM
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_IDE if PCI || ISA || PCMCIA

View File

@ -71,7 +71,6 @@ apb {
isc: isc@f0008000 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_isc_base &pinctrl_isc_data_8bit &pinctrl_isc_data_9_10 &pinctrl_isc_data_11_12>;
status = "okay";
};
qspi1: spi@f0024000 {

View File

@ -47,7 +47,7 @@ dma@fc400000 {
};
gmac: eth@e0800000 {
compatible = "st,spear600-gmac";
compatible = "snps,dwmac-3.40a";
reg = <0xe0800000 0x8000>;
interrupts = <23 22>;
interrupt-names = "macirq", "eth_wake_irq";

View File

@ -19,7 +19,7 @@
*/
/ {
bus@4000000 {
bus@40000000 {
motherboard {
model = "V2M-P1";
arm,hbi = <0x190>;

View File

@ -295,7 +295,7 @@ power-vd10-s3 {
};
};
smb: bus@4000000 {
smb: bus@40000000 {
compatible = "simple-bus";
#address-cells = <2>;

View File

@ -9,7 +9,7 @@
static inline unsigned long arch_local_save_flags(void)
{
return RDCTL(CTL_STATUS);
return RDCTL(CTL_FSTATUS);
}
/*
@ -18,7 +18,7 @@ static inline unsigned long arch_local_save_flags(void)
*/
static inline void arch_local_irq_restore(unsigned long flags)
{
WRCTL(CTL_STATUS, flags);
WRCTL(CTL_FSTATUS, flags);
}
static inline void arch_local_irq_disable(void)

View File

@ -11,7 +11,7 @@
#endif
/* control register numbers */
#define CTL_STATUS 0
#define CTL_FSTATUS 0
#define CTL_ESTATUS 1
#define CTL_BSTATUS 2
#define CTL_IENABLE 3

View File

@ -310,12 +310,15 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
r1 &= ~3;
fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1];
return(NOEXCEPTION);
}
BUG();
case 3: /* FABS */
switch (fmt) {
case 2: /* illegal */
@ -325,13 +328,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
r1 &= ~3;
fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
/* copy and clear sign bit */
fpregs[t] = fpregs[r1] & 0x7fffffff;
return(NOEXCEPTION);
}
BUG();
case 6: /* FNEG */
switch (fmt) {
case 2: /* illegal */
@ -341,13 +347,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
r1 &= ~3;
fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
/* copy and invert sign bit */
fpregs[t] = fpregs[r1] ^ 0x80000000;
return(NOEXCEPTION);
}
BUG();
case 7: /* FNEGABS */
switch (fmt) {
case 2: /* illegal */
@ -357,13 +366,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
r1 &= ~3;
fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
/* copy and set sign bit */
fpregs[t] = fpregs[r1] | 0x80000000;
return(NOEXCEPTION);
}
BUG();
case 4: /* FSQRT */
switch (fmt) {
case 0:
@ -376,6 +388,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 5: /* FRND */
switch (fmt) {
case 0:
@ -389,7 +402,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(MAJOR_0C_EXCP);
}
} /* end of switch (subop) */
BUG();
case 1: /* class 1 */
df = extru(ir,fpdfpos,2); /* get dest format */
if ((df & 2) || (fmt & 2)) {
@ -419,6 +432,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* dbl/dbl */
return(MAJOR_0C_EXCP);
}
BUG();
case 1: /* FCNVXF */
switch(fmt) {
case 0: /* sgl/sgl */
@ -434,6 +448,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 2: /* FCNVFX */
switch(fmt) {
case 0: /* sgl/sgl */
@ -449,6 +464,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 3: /* FCNVFXT */
switch(fmt) {
case 0: /* sgl/sgl */
@ -464,6 +480,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 5: /* FCNVUF (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
@ -479,6 +496,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 6: /* FCNVFU (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
@ -494,6 +512,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 7: /* FCNVFUT (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
@ -509,10 +528,11 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 4: /* undefined */
return(MAJOR_0C_EXCP);
} /* end of switch subop */
BUG();
case 2: /* class 2 */
fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS];
r2 = extru(ir, fpr2pos, 5) * sizeof(double)/sizeof(u_int);
@ -590,6 +610,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 1: /* FTEST */
switch (fmt) {
case 0:
@ -609,8 +630,10 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3:
return(MAJOR_0C_EXCP);
}
BUG();
} /* end of switch subop */
} /* end of else for PA1.0 & PA1.1 */
BUG();
case 3: /* class 3 */
r2 = extru(ir,fpr2pos,5) * sizeof(double)/sizeof(u_int);
if (r2 == 0)
@ -633,6 +656,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 1: /* FSUB */
switch (fmt) {
case 0:
@ -645,6 +669,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 2: /* FMPY */
switch (fmt) {
case 0:
@ -657,6 +682,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 3: /* FDIV */
switch (fmt) {
case 0:
@ -669,6 +695,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 4: /* FREM */
switch (fmt) {
case 0:
@ -681,6 +708,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
} /* end of class 3 switch */
} /* end of switch(class) */
@ -736,10 +764,12 @@ u_int fpregs[];
return(MAJOR_0E_EXCP);
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1];
return(NOEXCEPTION);
}
BUG();
case 3: /* FABS */
switch (fmt) {
case 2:
@ -747,10 +777,12 @@ u_int fpregs[];
return(MAJOR_0E_EXCP);
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1] & 0x7fffffff;
return(NOEXCEPTION);
}
BUG();
case 6: /* FNEG */
switch (fmt) {
case 2:
@ -758,10 +790,12 @@ u_int fpregs[];
return(MAJOR_0E_EXCP);
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1] ^ 0x80000000;
return(NOEXCEPTION);
}
BUG();
case 7: /* FNEGABS */
switch (fmt) {
case 2:
@ -769,10 +803,12 @@ u_int fpregs[];
return(MAJOR_0E_EXCP);
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1] | 0x80000000;
return(NOEXCEPTION);
}
BUG();
case 4: /* FSQRT */
switch (fmt) {
case 0:
@ -785,6 +821,7 @@ u_int fpregs[];
case 3:
return(MAJOR_0E_EXCP);
}
BUG();
case 5: /* FRMD */
switch (fmt) {
case 0:
@ -798,7 +835,7 @@ u_int fpregs[];
return(MAJOR_0E_EXCP);
}
} /* end of switch (subop */
BUG();
case 1: /* class 1 */
df = extru(ir,fpdfpos,2); /* get dest format */
/*
@ -826,6 +863,7 @@ u_int fpregs[];
case 3: /* dbl/dbl */
return(MAJOR_0E_EXCP);
}
BUG();
case 1: /* FCNVXF */
switch(fmt) {
case 0: /* sgl/sgl */
@ -841,6 +879,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 2: /* FCNVFX */
switch(fmt) {
case 0: /* sgl/sgl */
@ -856,6 +895,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 3: /* FCNVFXT */
switch(fmt) {
case 0: /* sgl/sgl */
@ -871,6 +911,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 5: /* FCNVUF (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
@ -886,6 +927,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 6: /* FCNVFU (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
@ -901,6 +943,7 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 7: /* FCNVFUT (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
@ -916,9 +959,11 @@ u_int fpregs[];
return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 4: /* undefined */
return(MAJOR_0C_EXCP);
} /* end of switch subop */
BUG();
case 2: /* class 2 */
/*
* Be careful out there.
@ -994,6 +1039,7 @@ u_int fpregs[];
}
} /* end of switch subop */
} /* end of else for PA1.0 & PA1.1 */
BUG();
case 3: /* class 3 */
/*
* Be careful out there.
@ -1026,6 +1072,7 @@ u_int fpregs[];
return(dbl_fadd(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
}
BUG();
case 1: /* FSUB */
switch (fmt) {
case 0:
@ -1035,6 +1082,7 @@ u_int fpregs[];
return(dbl_fsub(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
}
BUG();
case 2: /* FMPY or XMPYU */
/*
* check for integer multiply (x bit set)
@ -1071,6 +1119,7 @@ u_int fpregs[];
&fpregs[r2],&fpregs[t],status));
}
}
BUG();
case 3: /* FDIV */
switch (fmt) {
case 0:
@ -1080,6 +1129,7 @@ u_int fpregs[];
return(dbl_fdiv(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
}
BUG();
case 4: /* FREM */
switch (fmt) {
case 0:

View File

@ -52,28 +52,32 @@ _GLOBAL(isa300_idle_stop_mayloss)
std r1,PACAR1(r13)
mflr r4
mfcr r5
/* use stack red zone rather than a new frame for saving regs */
std r2,-8*0(r1)
std r14,-8*1(r1)
std r15,-8*2(r1)
std r16,-8*3(r1)
std r17,-8*4(r1)
std r18,-8*5(r1)
std r19,-8*6(r1)
std r20,-8*7(r1)
std r21,-8*8(r1)
std r22,-8*9(r1)
std r23,-8*10(r1)
std r24,-8*11(r1)
std r25,-8*12(r1)
std r26,-8*13(r1)
std r27,-8*14(r1)
std r28,-8*15(r1)
std r29,-8*16(r1)
std r30,-8*17(r1)
std r31,-8*18(r1)
std r4,-8*19(r1)
std r5,-8*20(r1)
/*
* Use the stack red zone rather than a new frame for saving regs since
* in the case of no GPR loss the wakeup code branches directly back to
* the caller without deallocating the stack frame first.
*/
std r2,-8*1(r1)
std r14,-8*2(r1)
std r15,-8*3(r1)
std r16,-8*4(r1)
std r17,-8*5(r1)
std r18,-8*6(r1)
std r19,-8*7(r1)
std r20,-8*8(r1)
std r21,-8*9(r1)
std r22,-8*10(r1)
std r23,-8*11(r1)
std r24,-8*12(r1)
std r25,-8*13(r1)
std r26,-8*14(r1)
std r27,-8*15(r1)
std r28,-8*16(r1)
std r29,-8*17(r1)
std r30,-8*18(r1)
std r31,-8*19(r1)
std r4,-8*20(r1)
std r5,-8*21(r1)
/* 168 bytes */
PPC_STOP
b . /* catch bugs */
@ -89,8 +93,8 @@ _GLOBAL(isa300_idle_stop_mayloss)
*/
_GLOBAL(idle_return_gpr_loss)
ld r1,PACAR1(r13)
ld r4,-8*19(r1)
ld r5,-8*20(r1)
ld r4,-8*20(r1)
ld r5,-8*21(r1)
mtlr r4
mtcr r5
/*
@ -98,38 +102,40 @@ _GLOBAL(idle_return_gpr_loss)
* from PACATOC. This could be avoided for that less common case
* if KVM saved its r2.
*/
ld r2,-8*0(r1)
ld r14,-8*1(r1)
ld r15,-8*2(r1)
ld r16,-8*3(r1)
ld r17,-8*4(r1)
ld r18,-8*5(r1)
ld r19,-8*6(r1)
ld r20,-8*7(r1)
ld r21,-8*8(r1)
ld r22,-8*9(r1)
ld r23,-8*10(r1)
ld r24,-8*11(r1)
ld r25,-8*12(r1)
ld r26,-8*13(r1)
ld r27,-8*14(r1)
ld r28,-8*15(r1)
ld r29,-8*16(r1)
ld r30,-8*17(r1)
ld r31,-8*18(r1)
ld r2,-8*1(r1)
ld r14,-8*2(r1)
ld r15,-8*3(r1)
ld r16,-8*4(r1)
ld r17,-8*5(r1)
ld r18,-8*6(r1)
ld r19,-8*7(r1)
ld r20,-8*8(r1)
ld r21,-8*9(r1)
ld r22,-8*10(r1)
ld r23,-8*11(r1)
ld r24,-8*12(r1)
ld r25,-8*13(r1)
ld r26,-8*14(r1)
ld r27,-8*15(r1)
ld r28,-8*16(r1)
ld r29,-8*17(r1)
ld r30,-8*18(r1)
ld r31,-8*19(r1)
blr
/*
* This is the sequence required to execute idle instructions, as
* specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
*
* The 0(r1) slot is used to save r2 in isa206, so use that here.
* We have to store a GPR somewhere, ptesync, then reload it, and create
* a false dependency on the result of the load. It doesn't matter which
* GPR we store, or where we store it. We have already stored r2 to the
* stack at -8(r1) in isa206_idle_insn_mayloss, so use that.
*/
#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
/* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
std r2,0(r1); \
std r2,-8(r1); \
ptesync; \
ld r2,0(r1); \
ld r2,-8(r1); \
236: cmpd cr0,r2,r2; \
bne 236b; \
IDLE_INST; \
@ -154,28 +160,32 @@ _GLOBAL(isa206_idle_insn_mayloss)
std r1,PACAR1(r13)
mflr r4
mfcr r5
/* use stack red zone rather than a new frame for saving regs */
std r2,-8*0(r1)
std r14,-8*1(r1)
std r15,-8*2(r1)
std r16,-8*3(r1)
std r17,-8*4(r1)
std r18,-8*5(r1)
std r19,-8*6(r1)
std r20,-8*7(r1)
std r21,-8*8(r1)
std r22,-8*9(r1)
std r23,-8*10(r1)
std r24,-8*11(r1)
std r25,-8*12(r1)
std r26,-8*13(r1)
std r27,-8*14(r1)
std r28,-8*15(r1)
std r29,-8*16(r1)
std r30,-8*17(r1)
std r31,-8*18(r1)
std r4,-8*19(r1)
std r5,-8*20(r1)
/*
* Use the stack red zone rather than a new frame for saving regs since
* in the case of no GPR loss the wakeup code branches directly back to
* the caller without deallocating the stack frame first.
*/
std r2,-8*1(r1)
std r14,-8*2(r1)
std r15,-8*3(r1)
std r16,-8*4(r1)
std r17,-8*5(r1)
std r18,-8*6(r1)
std r19,-8*7(r1)
std r20,-8*8(r1)
std r21,-8*9(r1)
std r22,-8*10(r1)
std r23,-8*11(r1)
std r24,-8*12(r1)
std r25,-8*13(r1)
std r26,-8*14(r1)
std r27,-8*15(r1)
std r28,-8*16(r1)
std r29,-8*17(r1)
std r30,-8*18(r1)
std r31,-8*19(r1)
std r4,-8*20(r1)
std r5,-8*21(r1)
cmpwi r3,PNV_THREAD_NAP
bne 1f
IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)

View File

@ -1578,8 +1578,6 @@ void __cpu_die(unsigned int cpu)
void arch_cpu_idle_dead(void)
{
sched_preempt_enable_no_resched();
/*
* Disable on the down path. This will be re-enabled by
* start_secondary() via start_secondary_resume() below

View File

@ -292,13 +292,16 @@ kvm_novcpu_exit:
* r3 contains the SRR1 wakeup value, SRR1 is trashed.
*/
_GLOBAL(idle_kvm_start_guest)
ld r4,PACAEMERGSP(r13)
mfcr r5
mflr r0
std r1,0(r4)
std r5,8(r4)
std r0,16(r4)
subi r1,r4,STACK_FRAME_OVERHEAD
std r5, 8(r1) // Save CR in caller's frame
std r0, 16(r1) // Save LR in caller's frame
// Create frame on emergency stack
ld r4, PACAEMERGSP(r13)
stdu r1, -SWITCH_FRAME_SIZE(r4)
// Switch to new frame on emergency stack
mr r1, r4
std r3, 32(r1) // Save SRR1 wakeup value
SAVE_NVGPRS(r1)
/*
@ -350,6 +353,10 @@ kvm_unsplit_wakeup:
kvm_secondary_got_guest:
// About to go to guest, clear saved SRR1
li r0, 0
std r0, 32(r1)
/* Set HSTATE_DSCR(r13) to something sensible */
ld r6, PACA_DSCR_DEFAULT(r13)
std r6, HSTATE_DSCR(r13)
@ -441,13 +448,12 @@ kvm_no_guest:
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4
/* set up r3 for return */
mfspr r3,SPRN_SRR1
// Return SRR1 wakeup value, or 0 if we went into the guest
ld r3, 32(r1)
REST_NVGPRS(r1)
addi r1, r1, STACK_FRAME_OVERHEAD
ld r0, 16(r1)
ld r5, 8(r1)
ld r1, 0(r1)
ld r1, 0(r1) // Switch back to caller stack
ld r0, 16(r1) // Reload LR
ld r5, 8(r1) // Reload CR
mtlr r0
mtcr r5
blr

View File

@ -205,6 +205,9 @@ int zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
void zpci_remove_device(struct zpci_dev *zdev, bool set_error);
int zpci_enable_device(struct zpci_dev *);
int zpci_disable_device(struct zpci_dev *);
void zpci_device_reserved(struct zpci_dev *zdev);
bool zpci_is_device_configured(struct zpci_dev *zdev);
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
int zpci_unregister_ioat(struct zpci_dev *, u8);
void zpci_remove_reserved_devices(void);

View File

@ -92,7 +92,7 @@ void zpci_remove_reserved_devices(void)
spin_unlock(&zpci_list_lock);
list_for_each_entry_safe(zdev, tmp, &remove, entry)
zpci_zdev_put(zdev);
zpci_device_reserved(zdev);
}
int pci_domain_nr(struct pci_bus *bus)
@ -787,6 +787,39 @@ int zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
return rc;
}
bool zpci_is_device_configured(struct zpci_dev *zdev)
{
enum zpci_state state = zdev->state;
return state != ZPCI_FN_STATE_RESERVED &&
state != ZPCI_FN_STATE_STANDBY;
}
/**
* zpci_device_reserved() - Mark device as resverved
* @zdev: the zpci_dev that was reserved
*
* Handle the case that a given zPCI function was reserved by another system.
* After a call to this function the zpci_dev can not be found via
* get_zdev_by_fid() anymore but may still be accessible via existing
* references though it will not be functional anymore.
*/
void zpci_device_reserved(struct zpci_dev *zdev)
{
if (zdev->has_hp_slot)
zpci_exit_slot(zdev);
/*
* Remove device from zpci_list as it is going away. This also
* makes sure we ignore subsequent zPCI events for this device.
*/
spin_lock(&zpci_list_lock);
list_del(&zdev->entry);
spin_unlock(&zpci_list_lock);
zdev->state = ZPCI_FN_STATE_RESERVED;
zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
zpci_zdev_put(zdev);
}
void zpci_release_device(struct kref *kref)
{
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
@ -802,6 +835,12 @@ void zpci_release_device(struct kref *kref)
case ZPCI_FN_STATE_STANDBY:
if (zdev->has_hp_slot)
zpci_exit_slot(zdev);
spin_lock(&zpci_list_lock);
list_del(&zdev->entry);
spin_unlock(&zpci_list_lock);
zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
fallthrough;
case ZPCI_FN_STATE_RESERVED:
zpci_cleanup_bus_resources(zdev);
zpci_bus_device_unregister(zdev);
zpci_destroy_iommu(zdev);
@ -809,10 +848,6 @@ void zpci_release_device(struct kref *kref)
default:
break;
}
spin_lock(&zpci_list_lock);
list_del(&zdev->entry);
spin_unlock(&zpci_list_lock);
zpci_dbg(3, "rem fid:%x\n", zdev->fid);
kfree(zdev);
}

View File

@ -146,7 +146,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
zdev->state = ZPCI_FN_STATE_STANDBY;
if (!clp_get_state(ccdf->fid, &state) &&
state == ZPCI_FN_STATE_RESERVED) {
zpci_zdev_put(zdev);
zpci_device_reserved(zdev);
}
break;
case 0x0306: /* 0x308 or 0x302 for multiple devices */
@ -156,7 +156,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
case 0x0308: /* Standby -> Reserved */
if (!zdev)
break;
zpci_zdev_put(zdev);
zpci_device_reserved(zdev);
break;
default:
break;

View File

@ -68,6 +68,7 @@ static bool test_intel(int idx, void *data)
case INTEL_FAM6_BROADWELL_D:
case INTEL_FAM6_BROADWELL_G:
case INTEL_FAM6_BROADWELL_X:
case INTEL_FAM6_SAPPHIRERAPIDS_X:
case INTEL_FAM6_ATOM_SILVERMONT:
case INTEL_FAM6_ATOM_SILVERMONT_D:

View File

@ -6316,18 +6316,13 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
/*
* If we are running L2 and L1 has a new pending interrupt
* which can be injected, we should re-evaluate
* what should be done with this new L1 interrupt.
* If L1 intercepts external-interrupts, we should
* exit from L2 to L1. Otherwise, interrupt should be
* delivered directly to L2.
* which can be injected, this may cause a vmexit or it may
* be injected into L2. Either way, this interrupt will be
* processed via KVM_REQ_EVENT, not RVI, because we do not use
* virtual interrupt delivery to inject L1 interrupts into L2.
*/
if (is_guest_mode(vcpu) && max_irr_updated) {
if (nested_exit_on_intr(vcpu))
kvm_vcpu_exiting_guest_mode(vcpu);
else
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
if (is_guest_mode(vcpu) && max_irr_updated)
kvm_make_request(KVM_REQ_EVENT, vcpu);
} else {
max_irr = kvm_lapic_find_highest_irr(vcpu);
}

View File

@ -51,9 +51,6 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
enum xen_domain_type xen_domain_type = XEN_NATIVE;
EXPORT_SYMBOL_GPL(xen_domain_type);
unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
EXPORT_SYMBOL(machine_to_phys_mapping);
unsigned long machine_to_phys_nr;
@ -68,9 +65,11 @@ __read_mostly int xen_have_vector_callback;
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
/*
* NB: needs to live in .data because it's used by xen_prepare_pvh which runs
* before clearing the bss.
* NB: These need to live in .data or alike because they're used by
* xen_prepare_pvh() which runs before clearing the bss.
*/
enum xen_domain_type __ro_after_init xen_domain_type = XEN_NATIVE;
EXPORT_SYMBOL_GPL(xen_domain_type);
uint32_t xen_start_flags __section(".data") = 0;
EXPORT_SYMBOL(xen_start_flags);

View File

@ -51,8 +51,12 @@ void platform_power_off(void)
void platform_restart(void)
{
/* Flush and reset the mmu, simulate a processor reset, and
* jump to the reset vector. */
/* Try software reset first. */
WRITE_ONCE(*(u32 *)XTFPGA_SWRST_VADDR, 0xdead);
/* If software reset did not work, flush and reset the mmu,
* simulate a processor reset, and jump to the reset vector.
*/
cpu_reset();
/* control never gets here */
}
@ -66,7 +70,7 @@ void __init platform_calibrate_ccount(void)
#endif
#ifdef CONFIG_OF
#ifdef CONFIG_USE_OF
static void __init xtfpga_clk_setup(struct device_node *np)
{
@ -284,4 +288,4 @@ static int __init xtavnet_init(void)
*/
arch_initcall(xtavnet_init);
#endif /* CONFIG_OF */
#endif /* CONFIG_USE_OF */

View File

@ -129,6 +129,7 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(PCI_P2PDMA),
QUEUE_FLAG_NAME(ZONE_RESETALL),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
QUEUE_FLAG_NAME(HCTX_ACTIVE),
QUEUE_FLAG_NAME(NOWAIT),
};
#undef QUEUE_FLAG_NAME

View File

@ -33,6 +33,8 @@ config DRM_AMD_DC_HDCP
config DRM_AMD_DC_SI
bool "AMD DC support for Southern Islands ASICs"
depends on DRM_AMDGPU_SI
depends on DRM_AMD_DC
default n
help
Choose this option to enable new AMD DC support for SI asics

View File

@ -268,7 +268,11 @@ static void mxsfb_irq_disable(struct drm_device *drm)
struct mxsfb_drm_private *mxsfb = drm->dev_private;
mxsfb_enable_axi_clk(mxsfb);
mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc);
/* Disable and clear VBLANK IRQ */
writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
mxsfb_disable_axi_clk(mxsfb);
}

View File

@ -590,14 +590,14 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
.clock = 69700,
.hdisplay = 800,
.hsync_start = 800 + 6,
.hsync_end = 800 + 6 + 15,
.htotal = 800 + 6 + 15 + 16,
.hsync_start = 800 + 52,
.hsync_end = 800 + 52 + 8,
.htotal = 800 + 52 + 8 + 48,
.vdisplay = 1280,
.vsync_start = 1280 + 8,
.vsync_end = 1280 + 8 + 48,
.vtotal = 1280 + 8 + 48 + 52,
.vsync_start = 1280 + 16,
.vsync_end = 1280 + 16 + 6,
.vtotal = 1280 + 16 + 6 + 15,
.width_mm = 135,
.height_mm = 217,

View File

@ -3,6 +3,7 @@
// Driver for the IMX SNVS ON/OFF Power Key
// Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
@ -99,6 +100,11 @@ static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
static void imx_snvs_pwrkey_disable_clk(void *data)
{
clk_disable_unprepare(data);
}
static void imx_snvs_pwrkey_act(void *pdata)
{
struct pwrkey_drv_data *pd = pdata;
@ -111,6 +117,7 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
struct pwrkey_drv_data *pdata;
struct input_dev *input;
struct device_node *np;
struct clk *clk;
int error;
u32 vid;
@ -134,6 +141,28 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n");
}
clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Failed to get snvs clock (%pe)\n", clk);
return PTR_ERR(clk);
}
error = clk_prepare_enable(clk);
if (error) {
dev_err(&pdev->dev, "Failed to enable snvs clock (%pe)\n",
ERR_PTR(error));
return error;
}
error = devm_add_action_or_reset(&pdev->dev,
imx_snvs_pwrkey_disable_clk, clk);
if (error) {
dev_err(&pdev->dev,
"Failed to register clock cleanup handler (%pe)\n",
ERR_PTR(error));
return error;
}
pdata->wakeup = of_property_read_bool(np, "wakeup-source");
pdata->irq = platform_get_irq(pdev, 0);

View File

@ -480,6 +480,11 @@ int detach_capi_ctr(struct capi_ctr *ctr)
ctr_down(ctr, CAPI_CTR_DETACHED);
if (ctr->cnr < 1 || ctr->cnr - 1 >= CAPI_MAXCONTR) {
err = -EINVAL;
goto unlock_out;
}
if (capi_controller[ctr->cnr - 1] != ctr) {
err = -EINVAL;
goto unlock_out;

View File

@ -949,8 +949,8 @@ nj_release(struct tiger_hw *card)
nj_disable_hwirq(card);
mode_tiger(&card->bc[0], ISDN_P_NONE);
mode_tiger(&card->bc[1], ISDN_P_NONE);
card->isac.release(&card->isac);
spin_unlock_irqrestore(&card->lock, flags);
card->isac.release(&card->isac);
release_region(card->base, card->base_s);
card->base_s = 0;
}

View File

@ -846,10 +846,12 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
struct rcar_can_priv *priv = netdev_priv(ndev);
u16 ctlr;
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
}
if (!netif_running(ndev))
return 0;
netif_stop_queue(ndev);
netif_device_detach(ndev);
ctlr = readw(&priv->regs->ctlr);
ctlr |= RCAR_CAN_CTLR_CANM_HALT;
writew(ctlr, &priv->regs->ctlr);
@ -868,6 +870,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
u16 ctlr;
int err;
if (!netif_running(ndev))
return 0;
err = clk_enable(priv->clk);
if (err) {
netdev_err(ndev, "clk_enable() failed, error %d\n", err);
@ -881,10 +886,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
writew(ctlr, &priv->regs->ctlr);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
netif_device_attach(ndev);
netif_start_queue(ndev);
}
netif_device_attach(ndev);
netif_start_queue(ndev);
return 0;
}

View File

@ -731,16 +731,15 @@ static void peak_pci_remove(struct pci_dev *pdev)
struct net_device *prev_dev = chan->prev_dev;
dev_info(&pdev->dev, "removing device %s\n", dev->name);
/* do that only for first channel */
if (!prev_dev && chan->pciec_card)
peak_pciec_remove(chan->pciec_card);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
dev = prev_dev;
if (!dev) {
/* do that only for first channel */
if (chan->pciec_card)
peak_pciec_remove(chan->pciec_card);
if (!dev)
break;
}
priv = netdev_priv(dev);
chan = priv->priv;
}

View File

@ -551,11 +551,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
} else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
new_state = CAN_STATE_ERROR_WARNING;
} else {
/* no error bit (so, no error skb, back to active state) */
dev->can.state = CAN_STATE_ERROR_ACTIVE;
/* back to (or still in) ERROR_ACTIVE state */
new_state = CAN_STATE_ERROR_ACTIVE;
pdev->bec.txerr = 0;
pdev->bec.rxerr = 0;
return 0;
}
/* state hasn't changed */

View File

@ -229,7 +229,7 @@
#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
#define GSWIP_TABLE_ACTIVE_VLAN 0x01
#define GSWIP_TABLE_VLAN_MAPPING 0x02

View File

@ -981,9 +981,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
if (!dsa_is_user_port(ds, port))
return 0;
mutex_lock(&priv->reg_mutex);
/* Allow the user port gets connected to the cpu port and also
@ -1006,9 +1003,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
if (!dsa_is_user_port(ds, port))
return;
mutex_lock(&priv->reg_mutex);
/* Clear up all port matrix which could be restored in the next
@ -2593,7 +2587,7 @@ mt7530_probe(struct mdio_device *mdiodev)
return -ENOMEM;
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = DSA_MAX_PORTS;
priv->ds->num_ports = MT7530_NUM_PORTS;
/* Use medatek,mcm property to distinguish hardware type that would
* casues a little bit differences on power-on sequence.

View File

@ -157,7 +157,7 @@ static const struct {
{ ENETC_PM0_TFRM, "MAC tx frames" },
{ ENETC_PM0_TFCS, "MAC tx fcs errors" },
{ ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
{ ENETC_PM0_TERR, "MAC tx frames" },
{ ENETC_PM0_TERR, "MAC tx frame errors" },
{ ENETC_PM0_TUCA, "MAC tx unicast frames" },
{ ENETC_PM0_TMCA, "MAC tx multicast frames" },
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },

View File

@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
static LIST_HEAD(hnae3_client_list);
static LIST_HEAD(hnae3_ae_dev_list);
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
{
const struct pci_device_id *pci_id;
struct hnae3_ae_dev *ae_dev;
if (!ae_algo)
return;
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!pci_id)
continue;
if (IS_ENABLED(CONFIG_PCI_IOV))
pci_disable_sriov(ae_dev->pdev);
}
}
EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
/* we are keeping things simple and using single lock for all the
* list. This is a non-critical code so other updations, if happen
* in parallel, can wait.

View File

@ -754,6 +754,7 @@ struct hnae3_handle {
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);

View File

@ -1283,7 +1283,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
static int hns3_skb_linearize(struct hns3_enet_ring *ring,
struct sk_buff *skb,
u8 max_non_tso_bd_num,
unsigned int bd_num)
{
/* 'bd_num == UINT_MAX' means the skb' fraglist has a
@ -1300,8 +1299,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* will not help.
*/
if (skb->len > HNS3_MAX_TSO_SIZE ||
(!skb_is_gso(skb) && skb->len >
HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
(!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.hw_limitation++;
u64_stats_update_end(&ring->syncp);
@ -1336,8 +1334,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
goto out;
}
if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
bd_num))
if (hns3_skb_linearize(ring, skb, bd_num))
return -ENOMEM;
bd_num = hns3_tx_bd_count(skb->len);
@ -2424,6 +2421,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
{
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc[i].addr = 0;
ring->desc_cb[i].refill = 0;
}
static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
@ -2501,6 +2499,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
return ret;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
ring->desc_cb[i].refill = 1;
return 0;
}
@ -2531,12 +2530,14 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
ring->desc_cb[i].refill = 1;
ring->desc[i].rx.bd_base_info = 0;
}
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{
ring->desc_cb[i].reuse_flag = 0;
ring->desc_cb[i].refill = 1;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
@ -2634,10 +2635,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
int ntc = ring->next_to_clean;
int ntu = ring->next_to_use;
if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
return ring->desc_num;
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
}
static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
/* Return true if there is any allocation failure */
static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
int cleand_count)
{
struct hns3_desc_cb *desc_cb;
@ -2662,7 +2667,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n",
ret);
break;
writel(i, ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG);
return true;
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
@ -2675,6 +2683,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
}
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
return false;
}
static bool hns3_page_is_reusable(struct page *page)
@ -2905,6 +2914,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
{
ring->desc[ring->next_to_clean].rx.bd_base_info &=
cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
ring->desc_cb[ring->next_to_clean].refill = 0;
ring->next_to_clean += 1;
if (unlikely(ring->next_to_clean == ring->desc_num))
@ -3218,6 +3228,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring);
bool failure = false;
int recv_pkts = 0;
int err;
@ -3226,9 +3237,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
while (recv_pkts < budget) {
/* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
hns3_nic_alloc_rx_buffers(ring, unused_count);
unused_count = hns3_desc_unused(ring) -
ring->pending_buf;
failure = failure ||
hns3_nic_alloc_rx_buffers(ring, unused_count);
unused_count = 0;
}
/* Poll one pkt */
@ -3247,11 +3258,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
}
out:
/* Make all data has been write before submit */
if (unused_count > 0)
hns3_nic_alloc_rx_buffers(ring, unused_count);
return recv_pkts;
return failure ? budget : recv_pkts;
}
static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group)

View File

@ -170,11 +170,9 @@ enum hns3_nic_state {
#define HNS3_MAX_BD_SIZE 65535
#define HNS3_MAX_TSO_BD_NUM 63U
#define HNS3_MAX_TSO_SIZE \
(HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
#define HNS3_MAX_TSO_SIZE 1048576U
#define HNS3_MAX_NON_TSO_SIZE 9728U
#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
(HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
#define HNS3_VECTOR_GL0_OFFSET 0x100
#define HNS3_VECTOR_GL1_OFFSET 0x200
@ -285,6 +283,7 @@ struct hns3_desc_cb {
u32 length; /* length of the buffer */
u16 reuse_flag;
u16 refill;
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;

View File

@ -134,6 +134,15 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
*changed = true;
break;
case IEEE_8021QAZ_TSA_ETS:
/* The hardware will switch to sp mode if bandwidth is
* 0, so limit ets bandwidth must be greater than 0.
*/
if (!ets->tc_tx_bw[i]) {
dev_err(&hdev->pdev->dev,
"tc%u ets bw cannot be 0\n", i);
return -EINVAL;
}
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
HCLGE_SCH_MODE_DWRR)
*changed = true;

View File

@ -11518,6 +11518,7 @@ static int hclge_init(void)
static void hclge_exit(void)
{
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq);
}

View File

@ -671,6 +671,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
for (; k < HNAE3_MAX_TC; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
}
}

View File

@ -2160,9 +2160,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
hdev->reset_attempts = 0;
hdev->last_reset_time = jiffies;
while ((hdev->reset_type =
hclgevf_get_reset_level(hdev, &hdev->reset_pending))
!= HNAE3_NONE_RESET)
hdev->reset_type =
hclgevf_get_reset_level(hdev, &hdev->reset_pending);
if (hdev->reset_type != HNAE3_NONE_RESET)
hclgevf_reset(hdev);
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {

View File

@ -113,7 +113,8 @@ enum e1000_boards {
board_pch2lan,
board_pch_lpt,
board_pch_spt,
board_pch_cnp
board_pch_cnp,
board_pch_tgp
};
struct e1000_ps_page {
@ -499,6 +500,7 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_pch_cnp_info;
extern const struct e1000_info e1000_pch_tgp_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);

View File

@ -4811,7 +4811,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
u32 ctrl_ext, txdctl, snoop;
u32 ctrl_ext, txdctl, snoop, fflt_dbg;
s32 ret_val;
u16 i;
@ -4870,6 +4870,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
snoop = (u32)~(PCIE_NO_SNOOP_ALL);
e1000e_set_pcie_no_snoop(hw, snoop);
/* Enable workaround for packet loss issue on TGP PCH
* Do not gate DMA clock from the modPHY block
*/
if (mac->type >= e1000_pch_tgp) {
fflt_dbg = er32(FFLT_DBG);
fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK;
ew32(FFLT_DBG, fflt_dbg);
}
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext);
@ -5990,3 +5999,23 @@ const struct e1000_info e1000_pch_cnp_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};
const struct e1000_info e1000_pch_tgp_info = {
.mac = e1000_pch_tgp,
.flags = FLAG_IS_ICH
| FLAG_HAS_WOL
| FLAG_HAS_HW_TIMESTAMP
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
| FLAG_HAS_FLASH
| FLAG_HAS_JUMBO_FRAMES
| FLAG_APME_IN_WUC,
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
.max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};

View File

@ -286,6 +286,9 @@
/* Proprietary Latency Tolerance Reporting PCI Capability */
#define E1000_PCI_LTR_CAP_LPT 0xA8
/* Don't gate wake DMA clock */
#define E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK 0x1000
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);

View File

@ -50,6 +50,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch_lpt] = &e1000_pch_lpt_info,
[board_pch_spt] = &e1000_pch_spt_info,
[board_pch_cnp] = &e1000_pch_cnp_info,
[board_pch_tgp] = &e1000_pch_tgp_info,
};
struct e1000_reg_info {
@ -7837,20 +7838,20 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};

View File

@ -24,6 +24,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E810C_BACKPLANE:
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
case ICE_DEV_ID_E810_XXV_BACKPLANE:
case ICE_DEV_ID_E810_XXV_QSFP:
case ICE_DEV_ID_E810_XXV_SFP:
hw->mac_type = ICE_MAC_E810;
break;

View File

@ -21,6 +21,10 @@
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
/* Intel(R) Ethernet Controller E810-XXV for SFP */
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
/* Intel(R) Ethernet Connection E823-C for backplane */

View File

@ -1669,7 +1669,7 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid &&
hw->tnl.tbl[i].type == type &&
idx--)
idx-- == 0)
return i;
WARN_ON_ONCE(1);
@ -1829,7 +1829,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
u16 index;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
if (status) {

View File

@ -4773,6 +4773,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },

View File

@ -71,6 +71,7 @@ static int dwmac_generic_probe(struct platform_device *pdev)
static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "st,spear600-gmac"},
{ .compatible = "snps,dwmac-3.40a"},
{ .compatible = "snps,dwmac-3.50a"},
{ .compatible = "snps,dwmac-3.610"},
{ .compatible = "snps,dwmac-3.70a"},

View File

@ -605,7 +605,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
if (priv->synopsys_id != DWMAC_CORE_5_10)
if (priv->synopsys_id < DWMAC_CORE_4_10)
ts_event_en = PTP_TCR_TSEVNTENA;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;

View File

@ -508,6 +508,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->pmt = 1;
}
if (of_device_is_compatible(np, "snps,dwmac-3.40a")) {
plat->has_gmac = 1;
plat->enh_desc = 1;
plat->tx_coe = 1;
plat->bugged_jumbo = 1;
plat->pmt = 1;
}
if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
of_device_is_compatible(np, "snps,dwmac-4.10a") ||
of_device_is_compatible(np, "snps,dwmac-4.20a") ||

View File

@ -544,6 +544,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
err = device_register(&bus->dev);
if (err) {
pr_err("mii_bus %s failed to register\n", bus->id);
put_device(&bus->dev);
return -EINVAL;
}

View File

@ -117,6 +117,7 @@ config USB_LAN78XX
select PHYLIB
select MICROCHIP_PHY
select FIXED_PHY
select CRC32
help
This option adds support for Microchip LAN78XX based USB 2
& USB 3 10/100/1000 Ethernet adapters.

View File

@ -109,14 +109,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
switch (zdev->state) {
case ZPCI_FN_STATE_STANDBY:
*value = 0;
break;
default:
*value = 1;
break;
}
*value = zpci_is_device_configured(zdev) ? 1 : 0;
return 0;
}

View File

@ -1645,8 +1645,8 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
struct stm32_pinctrl_group *g = pctl->groups;
int i;
for (i = g->pin; i < g->pin + pctl->ngroups; i++)
stm32_pinctrl_restore_gpio_regs(pctl, i);
for (i = 0; i < pctl->ngroups; i++, g++)
stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
return 0;
}

View File

@ -247,7 +247,7 @@ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
return -ETIMEDOUT;
}
/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
{
int status;

View File

@ -220,7 +220,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail;
}
shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
shost->can_queue);
error = scsi_init_sense_cache(shost);

View File

@ -414,7 +414,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
goto done_free_fcport;
done_free_fcport:
if (bsg_request->msgcode == FC_BSG_RPT_ELS)
if (bsg_request->msgcode != FC_BSG_RPT_ELS)
qla2x00_free_fcport(fcport);
done:
return rval;

View File

@ -2907,8 +2907,6 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session->recovery_tmo = value;
break;
default:
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
if ((conn->state == ISCSI_CONN_BOUND) ||
(conn->state == ISCSI_CONN_UP)) {
err = transport->set_param(conn, ev->u.set_param.param,

View File

@ -276,8 +276,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == 0x3432)
xhci->quirks |= XHCI_BROKEN_STREAMS;
if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_EP_CTX_BROKEN_DCS;
}
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)

View File

@ -567,8 +567,11 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring;
struct xhci_command *cmd;
struct xhci_segment *new_seg;
struct xhci_segment *halted_seg = NULL;
union xhci_trb *new_deq;
int new_cycle;
union xhci_trb *halted_trb;
int index = 0;
dma_addr_t addr;
u64 hw_dequeue;
bool cycle_found = false;
@ -606,7 +609,27 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
new_seg = ep_ring->deq_seg;
new_deq = ep_ring->dequeue;
new_cycle = hw_dequeue & 0x1;
/*
* Quirk: xHC write-back of the DCS field in the hardware dequeue
* pointer is wrong - use the cycle state of the TRB pointed to by
* the dequeue pointer.
*/
if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS &&
!(ep->ep_state & EP_HAS_STREAMS))
halted_seg = trb_in_td(xhci, td->start_seg,
td->first_trb, td->last_trb,
hw_dequeue & ~0xf, false);
if (halted_seg) {
index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) /
sizeof(*halted_trb);
halted_trb = &halted_seg->trbs[index];
new_cycle = halted_trb->generic.field[3] & 0x1;
xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n",
(u8)(hw_dequeue & 0x1), index, new_cycle);
} else {
new_cycle = hw_dequeue & 0x1;
}
/*
* We want to find the pointer, segment and cycle state of the new trb

View File

@ -1902,6 +1902,7 @@ struct xhci_hcd {
#define XHCI_DISABLE_SPARSE BIT_ULL(38)
#define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39)
#define XHCI_NO_SOFT_RETRY BIT_ULL(40)
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
unsigned int num_active_eps;
unsigned int limit_active_eps;

View File

@ -894,9 +894,11 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
}
/*
* helper function to see if a given name and sequence number found
* in an inode back reference are already in a directory and correctly
* point to this inode
* See if a given name and sequence number found in an inode back reference are
* already in a directory and correctly point to this inode.
*
* Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
* exists.
*/
static noinline int inode_in_dir(struct btrfs_root *root,
struct btrfs_path *path,
@ -905,29 +907,35 @@ static noinline int inode_in_dir(struct btrfs_root *root,
{
struct btrfs_dir_item *di;
struct btrfs_key location;
int match = 0;
int ret = 0;
di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
index, name, name_len, 0);
if (di && !IS_ERR(di)) {
if (IS_ERR(di)) {
if (PTR_ERR(di) != -ENOENT)
ret = PTR_ERR(di);
goto out;
} else if (di) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
if (location.objectid != objectid)
goto out;
} else
} else {
goto out;
btrfs_release_path(path);
}
btrfs_release_path(path);
di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
if (di && !IS_ERR(di)) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
if (location.objectid != objectid)
goto out;
} else
if (IS_ERR(di)) {
ret = PTR_ERR(di);
goto out;
match = 1;
} else if (di) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
if (location.objectid == objectid)
ret = 1;
}
out:
btrfs_release_path(path);
return match;
return ret;
}
/*
@ -1477,10 +1485,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
if (ret)
goto out;
/* if we already have a perfect match, we're done */
if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
btrfs_ino(BTRFS_I(inode)), ref_index,
name, namelen)) {
ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
btrfs_ino(BTRFS_I(inode)), ref_index,
name, namelen);
if (ret < 0) {
goto out;
} else if (ret == 0) {
/*
* look for a conflicting back reference in the
* metadata. if we find one we have to unlink that name
@ -1538,6 +1548,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
btrfs_update_inode(trans, root, inode);
}
/* Else, ret == 1, we already have a perfect match, we're done. */
ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
kfree(name);

View File

@ -2334,7 +2334,6 @@ static int unsafe_request_wait(struct inode *inode)
int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file->f_mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
u64 flush_tid;
@ -2369,14 +2368,9 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
if (err < 0)
ret = err;
if (errseq_check(&ci->i_meta_err, READ_ONCE(fi->meta_err))) {
spin_lock(&file->f_lock);
err = errseq_check_and_advance(&ci->i_meta_err,
&fi->meta_err);
spin_unlock(&file->f_lock);
if (err < 0)
ret = err;
}
err = file_check_and_advance_wb_err(file);
if (err < 0)
ret = err;
out:
dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
return ret;

View File

@ -233,7 +233,6 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
spin_lock_init(&fi->rw_contexts_lock);
INIT_LIST_HEAD(&fi->rw_contexts);
fi->meta_err = errseq_sample(&ci->i_meta_err);
fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
return 0;

View File

@ -529,8 +529,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ceph_fscache_inode_init(ci);
ci->i_meta_err = 0;
return &ci->vfs_inode;
}

View File

@ -1481,7 +1481,6 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
{
struct ceph_mds_request *req;
struct rb_node *p;
struct ceph_inode_info *ci;
dout("cleanup_session_requests mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex);
@ -1490,16 +1489,10 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
struct ceph_mds_request, r_unsafe_item);
pr_warn_ratelimited(" dropping unsafe request %llu\n",
req->r_tid);
if (req->r_target_inode) {
/* dropping unsafe change of inode's attributes */
ci = ceph_inode(req->r_target_inode);
errseq_set(&ci->i_meta_err, -EIO);
}
if (req->r_unsafe_dir) {
/* dropping unsafe directory operation */
ci = ceph_inode(req->r_unsafe_dir);
errseq_set(&ci->i_meta_err, -EIO);
}
if (req->r_target_inode)
mapping_set_error(req->r_target_inode->i_mapping, -EIO);
if (req->r_unsafe_dir)
mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
__unregister_request(mdsc, req);
}
/* zero r_attempts, so kick_requests() will re-send requests */
@ -1668,7 +1661,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
spin_unlock(&mdsc->cap_dirty_lock);
if (dirty_dropped) {
errseq_set(&ci->i_meta_err, -EIO);
mapping_set_error(inode->i_mapping, -EIO);
if (ci->i_wrbuffer_ref_head == 0 &&
ci->i_wr_ref == 0 &&

View File

@ -997,16 +997,16 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
struct ceph_fs_client *new = fc->s_fs_info;
struct ceph_mount_options *fsopt = new->mount_options;
struct ceph_options *opt = new->client->options;
struct ceph_fs_client *other = ceph_sb_to_client(sb);
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
dout("ceph_compare_super %p\n", sb);
if (compare_mount_options(fsopt, opt, other)) {
if (compare_mount_options(fsopt, opt, fsc)) {
dout("monitor(s)/mount options don't match\n");
return 0;
}
if ((opt->flags & CEPH_OPT_FSID) &&
ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
ceph_fsid_compare(&opt->fsid, &fsc->client->fsid)) {
dout("fsid doesn't match\n");
return 0;
}
@ -1014,6 +1014,17 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
dout("flags differ\n");
return 0;
}
if (fsc->blocklisted && !ceph_test_mount_opt(fsc, CLEANRECOVER)) {
dout("client is blocklisted (and CLEANRECOVER is not set)\n");
return 0;
}
if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
dout("client has been forcibly unmounted\n");
return 0;
}
return 1;
}

View File

@ -430,8 +430,6 @@ struct ceph_inode_info {
struct fscache_cookie *fscache;
u32 i_fscache_gen;
#endif
errseq_t i_meta_err;
struct inode vfs_inode; /* at end */
};
@ -773,7 +771,6 @@ struct ceph_file_info {
spinlock_t rw_contexts_lock;
struct list_head rw_contexts;
errseq_t meta_err;
u32 filp_gen;
atomic_t num_locks;
};

View File

@ -5559,7 +5559,7 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
return -EINVAL;
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL;
if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags |
if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags ||
sqe->splice_fd_in)
return -EINVAL;

View File

@ -178,7 +178,7 @@ int kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
struct fd f = fdget(fd);
int ret = -EBADF;
if (!f.file)
if (!f.file || !(f.file->f_mode & FMODE_READ))
goto out;
ret = kernel_read_file(f.file, offset, buf, buf_size, file_size, id);

View File

@ -792,7 +792,10 @@ static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cr
svc_xprt_put(xprt);
}
out_err:
nfsd_destroy(net);
if (!list_empty(&nn->nfsd_serv->sv_permsocks))
nn->nfsd_serv->sv_nrthreads--;
else
nfsd_destroy(net);
return err;
}

View File

@ -7047,7 +7047,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct buffer_head *di_bh)
{
int ret, i, has_data, num_pages = 0;
int ret, has_data, num_pages = 0;
int need_free = 0;
u32 bit_off, num;
handle_t *handle;
@ -7056,26 +7056,17 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_alloc_context *data_ac = NULL;
struct page **pages = NULL;
loff_t end = osb->s_clustersize;
struct page *page = NULL;
struct ocfs2_extent_tree et;
int did_quota = 0;
has_data = i_size_read(inode) ? 1 : 0;
if (has_data) {
pages = kcalloc(ocfs2_pages_per_cluster(osb->sb),
sizeof(struct page *), GFP_NOFS);
if (pages == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
return ret;
}
ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
if (ret) {
mlog_errno(ret);
goto free_pages;
goto out;
}
}
@ -7095,7 +7086,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
}
if (has_data) {
unsigned int page_end;
unsigned int page_end = min_t(unsigned, PAGE_SIZE,
osb->s_clustersize);
u64 phys;
ret = dquot_alloc_space_nodirty(inode,
@ -7119,15 +7111,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
*/
block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
/*
* Non sparse file systems zero on extend, so no need
* to do that now.
*/
if (!ocfs2_sparse_alloc(osb) &&
PAGE_SIZE < osb->s_clustersize)
end = PAGE_SIZE;
ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
&num_pages);
if (ret) {
mlog_errno(ret);
need_free = 1;
@ -7138,20 +7123,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
* This should populate the 1st page for us and mark
* it up to date.
*/
ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
ret = ocfs2_read_inline_data(inode, page, di_bh);
if (ret) {
mlog_errno(ret);
need_free = 1;
goto out_unlock;
}
page_end = PAGE_SIZE;
if (PAGE_SIZE > osb->s_clustersize)
page_end = osb->s_clustersize;
for (i = 0; i < num_pages; i++)
ocfs2_map_and_dirty_page(inode, handle, 0, page_end,
pages[i], i > 0, &phys);
ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
&phys);
}
spin_lock(&oi->ip_lock);
@ -7182,8 +7162,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
}
out_unlock:
if (pages)
ocfs2_unlock_and_free_pages(pages, num_pages);
if (page)
ocfs2_unlock_and_free_pages(&page, num_pages);
out_commit:
if (ret < 0 && did_quota)
@ -7207,8 +7187,6 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
out:
if (data_ac)
ocfs2_free_alloc_context(data_ac);
free_pages:
kfree(pages);
return ret;
}

View File

@ -2171,11 +2171,17 @@ static int ocfs2_initialize_super(struct super_block *sb,
}
if (ocfs2_clusterinfo_valid(osb)) {
/*
* ci_stack and ci_cluster in ocfs2_cluster_info may not be null
* terminated, so make sure no overflow happens here by using
* memcpy. Destination strings will always be null terminated
* because osb is allocated using kzalloc.
*/
osb->osb_stackflags =
OCFS2_RAW_SB(di)->s_cluster_info.ci_stackflags;
strlcpy(osb->osb_cluster_stack,
memcpy(osb->osb_cluster_stack,
OCFS2_RAW_SB(di)->s_cluster_info.ci_stack,
OCFS2_STACK_LABEL_LEN + 1);
OCFS2_STACK_LABEL_LEN);
if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) {
mlog(ML_ERROR,
"couldn't mount because of an invalid "
@ -2184,9 +2190,9 @@ static int ocfs2_initialize_super(struct super_block *sb,
status = -EINVAL;
goto bail;
}
strlcpy(osb->osb_cluster_name,
memcpy(osb->osb_cluster_name,
OCFS2_RAW_SB(di)->s_cluster_info.ci_cluster,
OCFS2_CLUSTER_NAME_LEN + 1);
OCFS2_CLUSTER_NAME_LEN);
} else {
/* The empty string is identical with classic tools that
* don't know about s_cluster_info. */

View File

@ -1834,9 +1834,15 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
if (mode_wp && mode_dontwake)
return -EINVAL;
ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
uffdio_wp.range.len, mode_wp,
&ctx->mmap_changing);
if (mmget_not_zero(ctx->mm)) {
ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
uffdio_wp.range.len, mode_wp,
&ctx->mmap_changing);
mmput(ctx->mm);
} else {
return -ESRCH;
}
if (ret)
return ret;

View File

@ -104,7 +104,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
#endif
}
#if defined(CONFIG_UM) || defined(CONFIG_IA64)
#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64)
/*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its

View File

@ -225,6 +225,7 @@ struct hda_codec {
#endif
/* misc flags */
unsigned int configured:1; /* codec was configured */
unsigned int in_freeing:1; /* being released */
unsigned int registered:1; /* codec was registered */
unsigned int display_power_control:1; /* needs display power */

View File

@ -653,7 +653,7 @@ static int audit_filter_rules(struct task_struct *tsk,
result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
break;
case AUDIT_SADDR_FAM:
if (ctx->sockaddr)
if (ctx && ctx->sockaddr)
result = audit_comparator(ctx->sockaddr->ss_family,
f->op, f->val);
break;

View File

@ -1300,6 +1300,12 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
if (unlikely(dma_debug_disabled()))
return;
for_each_sg(sg, s, nents, i) {
check_for_stack(dev, sg_page(s), s->offset);
if (!PageHighMem(sg_page(s)))
check_for_illegal_area(dev, sg_virt(s), s->length);
}
for_each_sg(sg, s, mapped_ents, i) {
entry = dma_entry_alloc();
if (!entry)
@ -1315,12 +1321,6 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
entry->sg_call_ents = nents;
entry->sg_mapped_ents = mapped_ents;
check_for_stack(dev, sg_page(s), s->offset);
if (!PageHighMem(sg_page(s))) {
check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
}
check_sg_segment(dev, s);
add_dma_entry(entry);

View File

@ -6931,6 +6931,7 @@ void idle_task_exit(void)
finish_arch_post_lock_switch();
}
scs_task_reset(current);
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
}

View File

@ -6985,7 +6985,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op;
int bit;
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
bit = trace_test_and_set_recursion(TRACE_LIST_START);
if (bit < 0)
return;
@ -7060,7 +7060,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
{
int bit;
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
bit = trace_test_and_set_recursion(TRACE_LIST_START);
if (bit < 0)
return;

View File

@ -573,18 +573,6 @@ struct tracer {
* then this function calls...
* The function callback, which can use the FTRACE bits to
* check for recursion.
*
* Now if the arch does not support a feature, and it calls
* the global list function which calls the ftrace callback
* all three of these steps will do a recursion protection.
* There's no reason to do one if the previous caller already
* did. The recursion that we are protecting against will
* go through the same steps again.
*
* To prevent the multiple recursion checks, if a recursion
* bit is set that is higher than the MAX bit of the current
* check, then we know that the check was made by the previous
* caller, and we can skip the current check.
*/
enum {
/* Function recursion bits */
@ -592,12 +580,14 @@ enum {
TRACE_FTRACE_NMI_BIT,
TRACE_FTRACE_IRQ_BIT,
TRACE_FTRACE_SIRQ_BIT,
TRACE_FTRACE_TRANSITION_BIT,
/* INTERNAL_BITs must be greater than FTRACE_BITs */
/* Internal use recursion bits */
TRACE_INTERNAL_BIT,
TRACE_INTERNAL_NMI_BIT,
TRACE_INTERNAL_IRQ_BIT,
TRACE_INTERNAL_SIRQ_BIT,
TRACE_INTERNAL_TRANSITION_BIT,
TRACE_BRANCH_BIT,
/*
@ -637,12 +627,6 @@ enum {
* function is called to clear it.
*/
TRACE_GRAPH_NOTRACE_BIT,
/*
* When transitioning between context, the preempt_count() may
* not be correct. Allow for a single recursion to cover this case.
*/
TRACE_TRANSITION_BIT,
};
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
@ -662,12 +646,18 @@ enum {
#define TRACE_CONTEXT_BITS 4
#define TRACE_FTRACE_START TRACE_FTRACE_BIT
#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
#define TRACE_LIST_START TRACE_INTERNAL_BIT
#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
#define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
enum {
TRACE_CTX_NMI,
TRACE_CTX_IRQ,
TRACE_CTX_SOFTIRQ,
TRACE_CTX_NORMAL,
TRACE_CTX_TRANSITION,
};
static __always_inline int trace_get_context_bit(void)
{
@ -675,59 +665,48 @@ static __always_inline int trace_get_context_bit(void)
if (in_interrupt()) {
if (in_nmi())
bit = 0;
bit = TRACE_CTX_NMI;
else if (in_irq())
bit = 1;
bit = TRACE_CTX_IRQ;
else
bit = 2;
bit = TRACE_CTX_SOFTIRQ;
} else
bit = 3;
bit = TRACE_CTX_NORMAL;
return bit;
}
static __always_inline int trace_test_and_set_recursion(int start, int max)
static __always_inline int trace_test_and_set_recursion(int start)
{
unsigned int val = current->trace_recursion;
int bit;
/* A previous recursion check was made */
if ((val & TRACE_CONTEXT_MASK) > max)
return 0;
bit = trace_get_context_bit() + start;
if (unlikely(val & (1 << bit))) {
/*
* It could be that preempt_count has not been updated during
* a switch between contexts. Allow for a single recursion.
*/
bit = TRACE_TRANSITION_BIT;
bit = start + TRACE_CTX_TRANSITION;
if (trace_recursion_test(bit))
return -1;
trace_recursion_set(bit);
barrier();
return bit + 1;
return bit;
}
/* Normal check passed, clear the transition to allow it again */
trace_recursion_clear(TRACE_TRANSITION_BIT);
val |= 1 << bit;
current->trace_recursion = val;
barrier();
return bit + 1;
return bit;
}
static __always_inline void trace_clear_recursion(int bit)
{
unsigned int val = current->trace_recursion;
if (!bit)
return;
bit--;
bit = 1 << bit;
val &= ~bit;

View File

@ -144,7 +144,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
pc = preempt_count();
preempt_disable_notrace();
bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
bit = trace_test_and_set_recursion(TRACE_FTRACE_START);
if (bit < 0)
goto out;

View File

@ -1597,7 +1597,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
}
static inline bool slab_free_freelist_hook(struct kmem_cache *s,
void **head, void **tail)
void **head, void **tail,
int *cnt)
{
void *object;
@ -1624,6 +1625,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
*head = object;
if (!*tail)
*tail = object;
} else {
/*
* Adjust the reconstructed freelist depth
* accordingly if object's reuse is delayed.
*/
--(*cnt);
}
} while (object != old_tail);
@ -3148,7 +3155,9 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
struct kmem_cache_cpu *c;
unsigned long tid;
memcg_slab_free_hook(s, &head, 1);
/* memcg_slab_free_hook() is already called for bulk free. */
if (!tail)
memcg_slab_free_hook(s, &head, 1);
redo:
/*
* Determine the currently cpus per cpu slab.
@ -3192,7 +3201,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
* With KASAN enabled slab_free_freelist_hook modifies the freelist
* to remove objects, whose reuse must be delayed.
*/
if (slab_free_freelist_hook(s, &head, &tail))
if (slab_free_freelist_hook(s, &head, &tail, &cnt))
do_slab_free(s, page, head, tail, cnt, addr);
}
@ -3888,8 +3897,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
if (alloc_kmem_cache_cpus(s))
return 0;
free_kmem_cache_nodes(s);
error:
__kmem_cache_release(s);
return -EINVAL;
}

View File

@ -481,6 +481,12 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
__skb->gso_segs = skb_shinfo(skb)->gso_segs;
}
static struct proto bpf_dummy_proto = {
.name = "bpf_dummy",
.owner = THIS_MODULE,
.obj_size = sizeof(struct sock),
};
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
@ -525,20 +531,19 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
break;
}
sk = kzalloc(sizeof(struct sock), GFP_USER);
sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
if (!sk) {
kfree(data);
kfree(ctx);
return -ENOMEM;
}
sock_net_set(sk, net);
sock_init_data(NULL, sk);
skb = build_skb(data, 0);
if (!skb) {
kfree(data);
kfree(ctx);
kfree(sk);
sk_free(sk);
return -ENOMEM;
}
skb->sk = sk;
@ -611,8 +616,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
if (dev && dev != net->loopback_dev)
dev_put(dev);
kfree_skb(skb);
bpf_sk_storage_free(sk);
kfree(sk);
sk_free(sk);
kfree(ctx);
return ret;
}

View File

@ -931,9 +931,7 @@ static inline unsigned long br_multicast_lmqt(const struct net_bridge *br)
static inline unsigned long br_multicast_gmi(const struct net_bridge *br)
{
/* use the RFC default of 2 for QRV */
return 2 * br->multicast_query_interval +
br->multicast_query_response_interval;
return br->multicast_membership_interval;
}
#else
static inline int br_multicast_rcv(struct net_bridge *br,

View File

@ -121,7 +121,7 @@ enum {
struct tpcon {
int idx;
int len;
u8 state;
u32 state;
u8 bs;
u8 sn;
u8 ll_dl;
@ -846,6 +846,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct isotp_sock *so = isotp_sk(sk);
u32 old_state = so->tx.state;
struct sk_buff *skb;
struct net_device *dev;
struct canfd_frame *cf;
@ -858,37 +859,45 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
return -EADDRNOTAVAIL;
/* we do not support multiple buffers - for now */
if (so->tx.state != ISOTP_IDLE || wq_has_sleeper(&so->wait)) {
if (msg->msg_flags & MSG_DONTWAIT)
return -EAGAIN;
if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
wq_has_sleeper(&so->wait)) {
if (msg->msg_flags & MSG_DONTWAIT) {
err = -EAGAIN;
goto err_out;
}
/* wait for complete transmission of current pdu */
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
if (err)
goto err_out;
}
if (!size || size > MAX_MSG_LENGTH)
return -EINVAL;
if (!size || size > MAX_MSG_LENGTH) {
err = -EINVAL;
goto err_out;
}
err = memcpy_from_msg(so->tx.buf, msg, size);
if (err < 0)
return err;
goto err_out;
dev = dev_get_by_index(sock_net(sk), so->ifindex);
if (!dev)
return -ENXIO;
if (!dev) {
err = -ENXIO;
goto err_out;
}
skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb) {
dev_put(dev);
return err;
goto err_out;
}
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
so->tx.state = ISOTP_SENDING;
so->tx.len = size;
so->tx.idx = 0;
@ -947,15 +956,25 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (err) {
pr_notice_once("can-isotp: %s: can_send_ret %d\n",
__func__, err);
return err;
goto err_out;
}
if (wait_tx_done) {
/* wait for complete transmission of current pdu */
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
if (sk->sk_err)
return -sk->sk_err;
}
return size;
err_out:
so->tx.state = old_state;
if (so->tx.state == ISOTP_IDLE)
wake_up_interruptible(&so->wait);
return err;
}
static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,

View File

@ -326,6 +326,7 @@ int j1939_session_activate(struct j1939_session *session);
void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec);
void j1939_session_timers_cancel(struct j1939_session *session);
#define J1939_MIN_TP_PACKET_SIZE 9
#define J1939_MAX_TP_PACKET_SIZE (7 * 0xff)
#define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff)

View File

@ -249,11 +249,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
struct j1939_priv *priv, *priv_new;
int ret;
priv = j1939_priv_get_by_ndev(ndev);
spin_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
if (priv) {
kref_get(&priv->rx_kref);
spin_unlock(&j1939_netdev_lock);
return priv;
}
spin_unlock(&j1939_netdev_lock);
priv = j1939_priv_create(ndev);
if (!priv)
@ -269,10 +272,10 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
/* Someone was faster than us, use their priv and roll
* back our's.
*/
kref_get(&priv_new->rx_kref);
spin_unlock(&j1939_netdev_lock);
dev_put(ndev);
kfree(priv);
kref_get(&priv_new->rx_kref);
return priv_new;
}
j1939_priv_set(ndev, priv);

View File

@ -1230,12 +1230,11 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
session->err = -ETIME;
j1939_session_deactivate(session);
} else {
netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
__func__, session);
j1939_session_list_lock(session->priv);
if (session->state >= J1939_SESSION_ACTIVE &&
session->state < J1939_SESSION_ACTIVE_MAX) {
netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
__func__, session);
j1939_session_get(session);
hrtimer_start(&session->rxtimer,
ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
@ -1597,6 +1596,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
abort = J1939_XTP_ABORT_FAULT;
else if (len > priv->tp_max_packet_size)
abort = J1939_XTP_ABORT_RESOURCE;
else if (len < J1939_MIN_TP_PACKET_SIZE)
abort = J1939_XTP_ABORT_FAULT;
}
if (abort != J1939_XTP_NO_ABORT) {
@ -1771,6 +1772,7 @@ static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb,
static void j1939_xtp_rx_dat_one(struct j1939_session *session,
struct sk_buff *skb)
{
enum j1939_xtp_abort abort = J1939_XTP_ABORT_FAULT;
struct j1939_priv *priv = session->priv;
struct j1939_sk_buff_cb *skcb;
struct sk_buff *se_skb = NULL;
@ -1785,9 +1787,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
skcb = j1939_skb_to_cb(skb);
dat = skb->data;
if (skb->len <= 1)
if (skb->len != 8) {
/* makes no sense */
abort = J1939_XTP_ABORT_UNEXPECTED_DATA;
goto out_session_cancel;
}
switch (session->last_cmd) {
case 0xff:
@ -1885,7 +1889,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
out_session_cancel:
kfree_skb(se_skb);
j1939_session_timers_cancel(session);
j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
j1939_session_cancel(session, abort);
j1939_session_put(session);
}

View File

@ -1022,6 +1022,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
EXPORT_SYMBOL(tcp_md5_needed);
static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
{
if (!old)
return true;
/* l3index always overrides non-l3index */
if (old->l3index && new->l3index == 0)
return false;
if (old->l3index == 0 && new->l3index)
return true;
return old->prefixlen < new->prefixlen;
}
/* Find the Key structure for an address. */
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr,
@ -1059,8 +1073,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
match = false;
}
if (match && (!best_match ||
key->prefixlen > best_match->prefixlen))
if (match && better_md5_match(best_match, key))
best_match = key;
}
return best_match;
@ -1090,7 +1103,7 @@ static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
lockdep_sock_is_held(sk)) {
if (key->family != family)
continue;
if (key->l3index && key->l3index != l3index)
if (key->l3index != l3index)
continue;
if (!memcmp(&key->addr, addr, size) &&
key->prefixlen == prefixlen)

View File

@ -487,13 +487,14 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
int ip6_forward(struct sk_buff *skb)
{
struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
struct dst_entry *dst = skb_dst(skb);
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct inet6_skb_parm *opt = IP6CB(skb);
struct net *net = dev_net(dst->dev);
struct inet6_dev *idev;
u32 mtu;
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
if (net->ipv6.devconf_all->forwarding == 0)
goto error;

View File

@ -25,12 +25,7 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
static inline bool
segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
{
bool r;
pr_debug("segsleft_match:%c 0x%x <= 0x%x <= 0x%x\n",
invert ? '!' : ' ', min, id, max);
r = (id >= min && id <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
return (id >= min && id <= max) ^ invert;
}
static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
@ -65,30 +60,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
return false;
}
pr_debug("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
pr_debug("TYPE %04X ", rh->type);
pr_debug("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
pr_debug("IPv6 RT segsleft %02X ",
segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
rh->segments_left,
!!(rtinfo->invflags & IP6T_RT_INV_SGS)));
pr_debug("type %02X %02X %02X ",
rtinfo->rt_type, rh->type,
(!(rtinfo->flags & IP6T_RT_TYP) ||
((rtinfo->rt_type == rh->type) ^
!!(rtinfo->invflags & IP6T_RT_INV_TYP))));
pr_debug("len %02X %04X %02X ",
rtinfo->hdrlen, hdrlen,
!(rtinfo->flags & IP6T_RT_LEN) ||
((rtinfo->hdrlen == hdrlen) ^
!!(rtinfo->invflags & IP6T_RT_INV_LEN)));
pr_debug("res %02X %02X %02X ",
rtinfo->flags & IP6T_RT_RES,
((const struct rt0_hdr *)rh)->reserved,
!((rtinfo->flags & IP6T_RT_RES) &&
(((const struct rt0_hdr *)rh)->reserved)));
ret = (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
rh->segments_left,
!!(rtinfo->invflags & IP6T_RT_INV_SGS))) &&
@ -107,22 +78,22 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
reserved),
sizeof(_reserved),
&_reserved);
if (!rp) {
par->hotdrop = true;
return false;
}
ret = (*rp == 0);
}
pr_debug("#%d ", rtinfo->addrnr);
if (!(rtinfo->flags & IP6T_RT_FST)) {
return ret;
} else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) {
pr_debug("Not strict ");
if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
pr_debug("There isn't enough space\n");
return false;
} else {
unsigned int i = 0;
pr_debug("#%d ", rtinfo->addrnr);
for (temp = 0;
temp < (unsigned int)((hdrlen - 8) / 16);
temp++) {
@ -138,26 +109,20 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
return false;
}
if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
pr_debug("i=%d temp=%d;\n", i, temp);
if (ipv6_addr_equal(ap, &rtinfo->addrs[i]))
i++;
}
if (i == rtinfo->addrnr)
break;
}
pr_debug("i=%d #%d\n", i, rtinfo->addrnr);
if (i == rtinfo->addrnr)
return ret;
else
return false;
}
} else {
pr_debug("Strict ");
if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
pr_debug("There isn't enough space\n");
return false;
} else {
pr_debug("#%d ", rtinfo->addrnr);
for (temp = 0; temp < rtinfo->addrnr; temp++) {
ap = skb_header_pointer(skb,
ptr
@ -173,7 +138,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp]))
break;
}
pr_debug("temp=%d #%d\n", temp, rtinfo->addrnr);
if (temp == rtinfo->addrnr &&
temp == (unsigned int)((hdrlen - 8) / 16))
return ret;

View File

@ -94,7 +94,7 @@ config NF_CONNTRACK_MARK
config NF_CONNTRACK_SECMARK
bool 'Connection tracking security mark support'
depends on NETWORK_SECMARK
default m if NETFILTER_ADVANCED=n
default y if NETFILTER_ADVANCED=n
help
This option enables security markings to be applied to
connections. Typically they are copied to connections from

View File

@ -4090,6 +4090,11 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
#ifdef CONFIG_IP_VS_DEBUG
/* Global sysctls must be ro in non-init netns */
if (!net_eq(net, &init_net))
tbl[idx++].mode = 0444;
#endif
ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
if (ipvs->sysctl_hdr == NULL) {

View File

@ -277,6 +277,8 @@ static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev,
ndev->cur_conn_id);
if (conn_info) {
list_del(&conn_info->list);
if (conn_info == ndev->rf_conn_info)
ndev->rf_conn_info = NULL;
devm_kfree(&ndev->nfc_dev->dev, conn_info);
}
}

View File

@ -19,6 +19,10 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF) \
+= -fplugin-arg-structleak_plugin-byref
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL) \
+= -fplugin-arg-structleak_plugin-byref-all
ifdef CONFIG_GCC_PLUGIN_STRUCTLEAK
DISABLE_STRUCTLEAK_PLUGIN += -fplugin-arg-structleak_plugin-disable
endif
export DISABLE_STRUCTLEAK_PLUGIN
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) \
+= -DSTRUCTLEAK_PLUGIN

View File

@ -421,8 +421,9 @@ int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
if (!full_reset)
goto skip_reset;
/* clear STATESTS */
snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
/* clear STATESTS if not in reset */
if (snd_hdac_chip_readb(bus, GCTL) & AZX_GCTL_RESET)
snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
/* reset controller */
snd_hdac_bus_enter_link_reset(bus);

View File

@ -301,29 +301,31 @@ int snd_hda_codec_configure(struct hda_codec *codec)
{
int err;
if (codec->configured)
return 0;
if (is_generic_config(codec))
codec->probe_id = HDA_CODEC_ID_GENERIC;
else
codec->probe_id = 0;
err = snd_hdac_device_register(&codec->core);
if (err < 0)
return err;
if (!device_is_registered(&codec->core.dev)) {
err = snd_hdac_device_register(&codec->core);
if (err < 0)
return err;
}
if (!codec->preset)
codec_bind_module(codec);
if (!codec->preset) {
err = codec_bind_generic(codec);
if (err < 0) {
codec_err(codec, "Unable to bind the codec\n");
goto error;
codec_dbg(codec, "Unable to bind the codec\n");
return err;
}
}
codec->configured = 1;
return 0;
error:
snd_hdac_device_unregister(&codec->core);
return err;
}
EXPORT_SYMBOL_GPL(snd_hda_codec_configure);

View File

@ -791,6 +791,7 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
snd_array_free(&codec->nids);
remove_conn_list(codec);
snd_hdac_regmap_exit(&codec->core);
codec->configured = 0;
}
EXPORT_SYMBOL_GPL(snd_hda_codec_cleanup_for_unbind);

Some files were not shown because too many files have changed in this diff Show More