This is the 5.10.190 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmTWBikACgkQONu9yGCS
 aT7+sBAAgFSUVSAURxIgHfi1XvXnGI/eZI/0ub8AqkWRdMUVbpqXR0hsq1/KP5sj
 zz6l4QTfFyGm5EQPv3dEDi3Nztsb2dQEaJTIKQV6N4kY133fn6sYIxQE9Rb1jHcp
 UoI/WbiOAwRqKd5glsPANByen6U4871Y1h2drBgMXYzP1fVEhPCVxz1XhAyZlVTw
 n94774DCiSXaVUt82r5IPOV2HfZAnik4BThCD3oE1v9qN2ugow51GA11R9Mf49lR
 mthPqsMhOHl+IPgB+3VaL+DCDDbs6b+izBCNSy5Jdupqsd22RuuLE3a8gSgZSoYB
 3NrIqwOPUuVX3yUsvaTfklXMDU8RyKGEAIK/njW2NSFv8ccMa1lymhXnrkjSd7AP
 BdHVfpB9k3WJQt6ElMfH6S8KszWFyLgolsT7cG3osi7VgsbDuGW30ApTl7WI7nDB
 QgoM3X2FfYYZ8uc7+fT0PU8tk098VqB0c4PQbigN7i0l0J6o1PPrg4Ke9++qaSAs
 PZ+rUha4+9whgjjyF05sWSScgwo0Az4qEx5r1Igf3lfDOUnfIPy3cKd+O1zpjrt5
 6ZO3CQkYM4IO8mP5ps2t4rJvZuPfAVBi3Ndgtt/JbLlkxbkuhNzTE1TKA7UOZRAW
 RK6YlzhHsR5LQEOZb6pVJtO2HTaJSBjTrp4W+xj/e6k51z+I4Co=
 =RCz+
 -----END PGP SIGNATURE-----

Merge 5.10.190 into android12-5.10-lts

Changes in 5.10.190
	KVM: s390: pv: fix index value of replaced ASCE
	io_uring: don't audit the capability check in io_uring_create()
	gpio: tps68470: Make tps68470_gpio_output() always set the initial value
	btrfs: fix race between quota disable and relocation
	btrfs: fix extent buffer leak after tree mod log failure at split_node()
	i2c: Delete error messages for failed memory allocations
	i2c: Improve size determinations
	i2c: nomadik: Remove unnecessary goto label
	i2c: nomadik: Use devm_clk_get_enabled()
	i2c: nomadik: Remove a useless call in the remove function
	PCI/ASPM: Return 0 or -ETIMEDOUT from pcie_retrain_link()
	PCI/ASPM: Factor out pcie_wait_for_retrain()
	PCI/ASPM: Avoid link retraining race
	dlm: cleanup plock_op vs plock_xop
	dlm: rearrange async condition return
	fs: dlm: interrupt posix locks only when process is killed
	drm/ttm: add ttm_bo_pin()/ttm_bo_unpin() v2
	drm/ttm: never consider pinned BOs for eviction&swap
	tracing: Show real address for trace event arguments
	pwm: meson: Simplify duplicated per-channel tracking
	pwm: meson: fix handling of period/duty if greater than UINT_MAX
	ext4: fix to check return value of freeze_bdev() in ext4_shutdown()
	phy: qcom-snps: Use dev_err_probe() to simplify code
	phy: qcom-snps: correct struct qcom_snps_hsphy kerneldoc
	phy: qcom-snps-femto-v2: keep cfg_ahb_clk enabled during runtime suspend
	phy: qcom-snps-femto-v2: properly enable ref clock
	media: staging: atomisp: select V4L2_FWNODE
	i40e: Fix an NULL vs IS_ERR() bug for debugfs_create_dir()
	net: phy: marvell10g: fix 88x3310 power up
	net: hns3: reconstruct function hclge_ets_validate()
	net: hns3: fix wrong bw weight of disabled tc issue
	vxlan: move to its own directory
	vxlan: calculate correct header length for GPE
	phy: hisilicon: Fix an out of bounds check in hisi_inno_phy_probe()
	ethernet: atheros: fix return value check in atl1e_tso_csum()
	ipv6 addrconf: fix bug where deleting a mngtmpaddr can create a new temporary address
	tcp: Reduce chance of collisions in inet6_hashfn().
	ice: Fix memory management in ice_ethtool_fdir.c
	bonding: reset bond's flags when down link is P2P device
	team: reset team's flags when down link is P2P device
	platform/x86: msi-laptop: Fix rfkill out-of-sync on MSI Wind U100
	netfilter: nft_set_rbtree: fix overlap expiration walk
	netfilter: nftables: add helper function to validate set element data
	netfilter: nf_tables: skip immediate deactivate in _PREPARE_ERROR
	netfilter: nf_tables: disallow rule addition to bound chain via NFTA_RULE_CHAIN_ID
	net/sched: mqprio: refactor nlattr parsing to a separate function
	net/sched: mqprio: add extack to mqprio_parse_nlattr()
	net/sched: mqprio: Add length check for TCA_MQPRIO_{MAX/MIN}_RATE64
	benet: fix return value check in be_lancer_xmit_workarounds()
	tipc: check return value of pskb_trim()
	tipc: stop tipc crypto on failure in tipc_node_create
	RDMA/mlx4: Make check for invalid flags stricter
	drm/msm/dpu: drop enum dpu_core_perf_data_bus_id
	drm/msm/adreno: Fix snapshot BINDLESS_DATA size
	RDMA/mthca: Fix crash when polling CQ for shared QPs
	drm/msm: Fix IS_ERR_OR_NULL() vs NULL check in a5xx_submit_in_rb()
	ASoC: fsl_spdif: Silence output on stop
	block: Fix a source code comment in include/uapi/linux/blkzoned.h
	dm raid: fix missing reconfig_mutex unlock in raid_ctr() error paths
	dm raid: clean up four equivalent goto tags in raid_ctr()
	dm raid: protect md_stop() with 'reconfig_mutex'
	ata: pata_ns87415: mark ns87560_tf_read static
	ring-buffer: Fix wrong stat of cpu_buffer->read
	tracing: Fix warning in trace_buffered_event_disable()
	Revert "usb: gadget: tegra-xudc: Fix error check in tegra_xudc_powerdomain_init()"
	USB: gadget: Fix the memory leak in raw_gadget driver
	serial: qcom-geni: drop bogus runtime pm state update
	serial: 8250_dw: Preserve original value of DLF register
	serial: sifive: Fix sifive_serial_console_setup() section
	USB: serial: option: support Quectel EM060K_128
	USB: serial: option: add Quectel EC200A module support
	USB: serial: simple: add Kaufmann RKS+CAN VCP
	USB: serial: simple: sort driver entries
	can: gs_usb: gs_can_close(): add missing set of CAN state to CAN_STATE_STOPPED
	Revert "usb: dwc3: core: Enable AutoRetry feature in the controller"
	usb: dwc3: pci: skip BYT GPIO lookup table for hardwired phy
	usb: dwc3: don't reset device side if dwc3 was configured as host-only
	usb: ohci-at91: Fix the unhandle interrupt when resume
	USB: quirks: add quirk for Focusrite Scarlett
	usb: xhci-mtk: set the dma max_seg_size
	Revert "usb: xhci: tegra: Fix error check"
	Documentation: security-bugs.rst: update preferences when dealing with the linux-distros group
	Documentation: security-bugs.rst: clarify CVE handling
	staging: ks7010: potential buffer overflow in ks_wlan_set_encode_ext()
	tty: n_gsm: fix UAF in gsm_cleanup_mux
	ALSA: hda/relatek: Enable Mute LED on HP 250 G8
	hwmon: (nct7802) Fix for temp6 (PECI1) processed even if PECI1 disabled
	btrfs: check for commit error at btrfs_attach_transaction_barrier()
	file: always lock position for FMODE_ATOMIC_POS
	nfsd: Remove incorrect check in nfsd4_validate_stateid
	tpm_tis: Explicitly check for error code
	irq-bcm6345-l1: Do not assume a fixed block to cpu mapping
	irqchip/gic-v4.1: Properly lock VPEs when doing a directLPI invalidation
	KVM: VMX: Invert handling of CR0.WP for EPT without unrestricted guest
	KVM: VMX: Fold ept_update_paging_mode_cr0() back into vmx_set_cr0()
	KVM: nVMX: Do not clear CR3 load/store exiting bits if L1 wants 'em
	KVM: VMX: Don't fudge CR0 and CR4 for restricted L2 guest
	staging: rtl8712: Use constants from <linux/ieee80211.h>
	staging: r8712: Fix memory leak in _r8712_init_xmit_priv()
	btrfs: check if the transaction was aborted at btrfs_wait_for_commit()
	virtio-net: fix race between set queues and probe
	s390/dasd: fix hanging device after quiesce/resume
	ASoC: wm8904: Fill the cache for WM8904_ADC_TEST_0 register
	ceph: never send metrics if disable_send_metrics is set
	dm cache policy smq: ensure IO doesn't prevent cleaner policy progress
	drm/ttm: make ttm_bo_unpin more defensive
	ACPI: processor: perflib: Use the "no limit" frequency QoS
	ACPI: processor: perflib: Avoid updating frequency QoS unnecessarily
	cpufreq: intel_pstate: Drop ACPI _PSS states table patching
	selftests: mptcp: depend on SYN_COOKIES
	io_uring: treat -EAGAIN for REQ_F_NOWAIT as final for io-wq
	ASoC: cs42l51: fix driver to properly autoload with automatic module loading
	kprobes/x86: Fix fall-through warnings for Clang
	x86/kprobes: Do not decode opcode in resume_execution()
	x86/kprobes: Retrieve correct opcode for group instruction
	x86/kprobes: Identify far indirect JMP correctly
	x86/kprobes: Use int3 instead of debug trap for single-step
	x86/kprobes: Fix to identify indirect jmp and others using range case
	x86/kprobes: Move 'inline' to the beginning of the kprobe_is_ss() declaration
	x86/kprobes: Update kcb status flag after singlestepping
	x86/kprobes: Fix JNG/JNLE emulation
	io_uring: gate iowait schedule on having pending requests
	perf: Fix function pointer case
	loop: Select I/O scheduler 'none' from inside add_disk()
	arm64: dts: imx8mn-var-som: add missing pull-up for onboard PHY reset pinmux
	word-at-a-time: use the same return type for has_zero regardless of endianness
	KVM: s390: fix sthyi error handling
	wifi: cfg80211: Fix return value in scan logic
	net/mlx5: DR, fix memory leak in mlx5dr_cmd_create_reformat_ctx
	net/mlx5e: fix return value check in mlx5e_ipsec_remove_trailer()
	bpf: Add length check for SK_DIAG_BPF_STORAGE_REQ_MAP_FD parsing
	rtnetlink: let rtnl_bridge_setlink checks IFLA_BRIDGE_MODE length
	net: dsa: fix value check in bcm_sf2_sw_probe()
	perf test uprobe_from_different_cu: Skip if there is no gcc
	net: sched: cls_u32: Fix match key mis-addressing
	mISDN: hfcpci: Fix potential deadlock on &hc->lock
	net: annotate data-races around sk->sk_max_pacing_rate
	net: add missing READ_ONCE(sk->sk_rcvlowat) annotation
	net: add missing READ_ONCE(sk->sk_sndbuf) annotation
	net: add missing READ_ONCE(sk->sk_rcvbuf) annotation
	net: add missing data-race annotations around sk->sk_peek_off
	net: add missing data-race annotation for sk_ll_usec
	net/sched: cls_u32: No longer copy tcf_result on update to avoid use-after-free
	net/sched: cls_fw: No longer copy tcf_result on update to avoid use-after-free
	net/sched: cls_route: No longer copy tcf_result on update to avoid use-after-free
	bpf: sockmap: Remove preempt_disable in sock_map_sk_acquire
	net: ll_temac: Switch to use dev_err_probe() helper
	net: ll_temac: fix error checking of irq_of_parse_and_map()
	net: netsec: Ignore 'phy-mode' on SynQuacer in DT mode
	net: dcb: choose correct policy to parse DCB_ATTR_BCN
	s390/qeth: Don't call dev_close/dev_open (DOWN/UP)
	ip6mr: Fix skb_under_panic in ip6mr_cache_report()
	vxlan: Fix nexthop hash size
	net/mlx5: fs_core: Make find_closest_ft more generic
	net/mlx5: fs_core: Skip the FTs in the same FS_TYPE_PRIO_CHAINS fs_prio
	tcp_metrics: fix addr_same() helper
	tcp_metrics: annotate data-races around tm->tcpm_stamp
	tcp_metrics: annotate data-races around tm->tcpm_lock
	tcp_metrics: annotate data-races around tm->tcpm_vals[]
	tcp_metrics: annotate data-races around tm->tcpm_net
	tcp_metrics: fix data-race in tcpm_suck_dst() vs fastopen
	scsi: zfcp: Defer fc_rport blocking until after ADISC response
	libceph: fix potential hang in ceph_osdc_notify()
	USB: zaurus: Add ID for A-300/B-500/C-700
	ceph: defer stopping mdsc delayed_work
	exfat: use kvmalloc_array/kvfree instead of kmalloc_array/kfree
	exfat: release s_lock before calling dir_emit()
	mtd: spinand: toshiba: Fix ecc_get_status
	mtd: rawnand: meson: fix OOB available bytes for ECC
	arm64: dts: stratix10: fix incorrect I2C property for SCL signal
	net: tun_chr_open(): set sk_uid from current_fsuid()
	net: tap_open(): set sk_uid from current_fsuid()
	bpf: Disable preemption in bpf_event_output
	open: make RESOLVE_CACHED correctly test for O_TMPFILE
	drm/ttm: check null pointer before accessing when swapping
	file: reinstate f_pos locking optimization for regular files
	tracing: Fix sleeping while atomic in kdb ftdump
	fs/sysv: Null check to prevent null-ptr-deref bug
	Bluetooth: L2CAP: Fix use-after-free in l2cap_sock_ready_cb
	net: usbnet: Fix WARNING in usbnet_start_xmit/usb_submit_urb
	fs: Protect reconfiguration of sb read-write from racing writes
	ext2: Drop fragment support
	mtd: rawnand: omap_elm: Fix incorrect type in assignment
	mtd: rawnand: fsl_upm: Fix an off-by one test in fun_exec_op()
	powerpc/mm/altmap: Fix altmap boundary check
	selftests/rseq: check if libc rseq support is registered
	selftests/rseq: Play nice with binaries statically linked against glibc 2.35+
	soundwire: bus: add better dev_dbg to track complete() calls
	soundwire: bus: pm_runtime_request_resume on peripheral attachment
	soundwire: fix enumeration completion
	PM / wakeirq: support enabling wake-up irq after runtime_suspend called
	PM: sleep: wakeirq: fix wake irq arming
	exfat: speed up iterate/lookup by fixing start point of traversing cluster chain
	exfat: support dynamic allocate bh for exfat_entry_set_cache
	exfat: check if filename entries exceeds max filename length
	mt76: move band capabilities in mt76_phy
	mt76: mt7615: Fix fall-through warnings for Clang
	wifi: mt76: mt7615: do not advertise 5 GHz on first phy of MT7615D (DBDC)
	ARM: dts: imx: add usb alias
	ARM: dts: imx6sll: fixup of operating points
	ARM: dts: nxp/imx6sll: fix wrong property name in usbphy node
	x86/CPU/AMD: Do not leak quotient data after a division by 0
	Linux 5.10.190

Fix up build problem in ext4 due to merge of ed3d841f2f ("ext4: fix to
check return value of freeze_bdev() in ext4_shutdown()") conflicting
with a previous block layer core change coming in through the f2fs tree
in the past, that is not upstream, but ANDROID specific.

Change-Id: Ib95e59ce8ba653bcc791802735afafcd26bd996f
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-08-23 15:13:56 +00:00
commit df0f5bd7a8
192 changed files with 2180 additions and 1204 deletions

View File

@ -63,31 +63,28 @@ information submitted to the security list and any followup discussions
of the report are treated confidentially even after the embargo has been
lifted, in perpetuity.
Coordination
------------
Coordination with other groups
------------------------------
Fixes for sensitive bugs, such as those that might lead to privilege
escalations, may need to be coordinated with the private
<linux-distros@vs.openwall.org> mailing list so that distribution vendors
are well prepared to issue a fixed kernel upon public disclosure of the
upstream fix. Distros will need some time to test the proposed patch and
will generally request at least a few days of embargo, and vendor update
publication prefers to happen Tuesday through Thursday. When appropriate,
the security team can assist with this coordination, or the reporter can
include linux-distros from the start. In this case, remember to prefix
the email Subject line with "[vs]" as described in the linux-distros wiki:
<http://oss-security.openwall.org/wiki/mailing-lists/distros#how-to-use-the-lists>
The kernel security team strongly recommends that reporters of potential
security issues NEVER contact the "linux-distros" mailing list until
AFTER discussing it with the kernel security team. Do not Cc: both
lists at once. You may contact the linux-distros mailing list after a
fix has been agreed on and you fully understand the requirements that
doing so will impose on you and the kernel community.
The different lists have different goals and the linux-distros rules do
not contribute to actually fixing any potential security problems.
CVE assignment
--------------
The security team does not normally assign CVEs, nor do we require them
for reports or fixes, as this can needlessly complicate the process and
may delay the bug handling. If a reporter wishes to have a CVE identifier
assigned ahead of public disclosure, they will need to contact the private
linux-distros list, described above. When such a CVE identifier is known
before a patch is provided, it is desirable to mention it in the commit
message if the reporter agrees.
The security team does not assign CVEs, nor do we require them for
reports or fixes, as this can needlessly complicate the process and may
delay the bug handling. If a reporter wishes to have a CVE identifier
assigned, they should find one by themselves, for example by contacting
MITRE directly. However under no circumstances will a patch inclusion
be delayed to wait for a CVE identifier to arrive.
Non-disclosure agreements
-------------------------

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 189
SUBLEVEL = 190
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -45,6 +45,10 @@ aliases {
spi1 = &ecspi2;
spi2 = &ecspi3;
spi3 = &ecspi4;
usb0 = &usbotg;
usb1 = &usbh1;
usb2 = &usbh2;
usb3 = &usbh3;
usbphy0 = &usbphy1;
usbphy1 = &usbphy2;
};

View File

@ -39,6 +39,9 @@ aliases {
spi1 = &ecspi2;
spi2 = &ecspi3;
spi3 = &ecspi4;
usb0 = &usbotg1;
usb1 = &usbotg2;
usb2 = &usbh;
usbphy0 = &usbphy1;
usbphy1 = &usbphy2;
};

View File

@ -36,6 +36,8 @@ aliases {
spi1 = &ecspi2;
spi3 = &ecspi3;
spi4 = &ecspi4;
usb0 = &usbotg1;
usb1 = &usbotg2;
usbphy0 = &usbphy1;
usbphy1 = &usbphy2;
};
@ -49,20 +51,18 @@ cpu0: cpu@0 {
device_type = "cpu";
reg = <0>;
next-level-cache = <&L2>;
operating-points = <
operating-points =
/* kHz uV */
996000 1275000
792000 1175000
396000 1075000
198000 975000
>;
fsl,soc-operating-points = <
<996000 1275000>,
<792000 1175000>,
<396000 1075000>,
<198000 975000>;
fsl,soc-operating-points =
/* ARM kHz SOC-PU uV */
996000 1175000
792000 1175000
396000 1175000
198000 1175000
>;
<996000 1175000>,
<792000 1175000>,
<396000 1175000>,
<198000 1175000>;
clock-latency = <61036>; /* two CLK32 periods */
#cooling-cells = <2>;
clocks = <&clks IMX6SLL_CLK_ARM>,
@ -552,7 +552,7 @@ usbphy2: usb-phy@20ca000 {
reg = <0x020ca000 0x1000>;
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SLL_CLK_USBPHY2>;
phy-reg_3p0-supply = <&reg_3p0>;
phy-3p0-supply = <&reg_3p0>;
fsl,anatop = <&anatop>;
};

View File

@ -49,6 +49,9 @@ aliases {
spi2 = &ecspi3;
spi3 = &ecspi4;
spi4 = &ecspi5;
usb0 = &usbotg1;
usb1 = &usbotg2;
usb2 = &usbh;
usbphy0 = &usbphy1;
usbphy1 = &usbphy2;
};

View File

@ -47,6 +47,8 @@ aliases {
spi1 = &ecspi2;
spi2 = &ecspi3;
spi3 = &ecspi4;
usb0 = &usbotg1;
usb1 = &usbotg2;
usbphy0 = &usbphy1;
usbphy1 = &usbphy2;
};

View File

@ -7,6 +7,12 @@
#include <dt-bindings/reset/imx7-reset.h>
/ {
aliases {
usb0 = &usbotg1;
usb1 = &usbotg2;
usb2 = &usbh;
};
cpus {
cpu0: cpu@0 {
clock-frequency = <996000000>;

View File

@ -47,6 +47,8 @@ aliases {
spi1 = &ecspi2;
spi2 = &ecspi3;
spi3 = &ecspi4;
usb0 = &usbotg1;
usb1 = &usbh;
};
cpus {

View File

@ -129,7 +129,7 @@ &i2c1 {
status = "okay";
clock-frequency = <100000>;
i2c-sda-falling-time-ns = <890>; /* hcnt */
i2c-sdl-falling-time-ns = <890>; /* lcnt */
i2c-scl-falling-time-ns = <890>; /* lcnt */
adc@14 {
compatible = "lltc,ltc2497";

View File

@ -162,7 +162,7 @@ &i2c2 {
status = "okay";
clock-frequency = <100000>;
i2c-sda-falling-time-ns = <890>; /* hcnt */
i2c-sdl-falling-time-ns = <890>; /* lcnt */
i2c-scl-falling-time-ns = <890>; /* lcnt */
adc@14 {
compatible = "lltc,ltc2497";

View File

@ -351,7 +351,7 @@ MX8MN_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
MX8MN_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
MX8MN_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
MX8MN_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x19
MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x159
>;
};

View File

@ -34,7 +34,7 @@ static inline long find_zero(unsigned long mask)
return leading_zero_bits >> 3;
}
static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
{
unsigned long rhs = val | c->low_bits;
*data = rhs;

View File

@ -313,8 +313,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
start = ALIGN_DOWN(start, page_size);
if (altmap) {
alt_start = altmap->base_pfn;
alt_end = altmap->base_pfn + altmap->reserve +
altmap->free + altmap->alloc + altmap->align;
alt_end = altmap->base_pfn + altmap->reserve + altmap->free;
}
pr_debug("vmemmap_free %lx...%lx\n", start, end);

View File

@ -460,9 +460,9 @@ static int sthyi_update_cache(u64 *rc)
*
* Fills the destination with system information returned by the STHYI
* instruction. The data is generated by emulation or execution of STHYI,
* if available. The return value is the condition code that would be
* returned, the rc parameter is the return code which is passed in
* register R2 + 1.
* if available. The return value is either a negative error value or
* the condition code that would be returned, the rc parameter is the
* return code which is passed in register R2 + 1.
*/
int sthyi_fill(void *dst, u64 *rc)
{

View File

@ -387,8 +387,8 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
*/
int handle_sthyi(struct kvm_vcpu *vcpu)
{
int reg1, reg2, r = 0;
u64 code, addr, cc = 0, rc = 0;
int reg1, reg2, cc = 0, r = 0;
u64 code, addr, rc = 0;
struct sthyi_sctns *sctns = NULL;
if (!test_kvm_facility(vcpu->kvm, 74))
@ -419,7 +419,10 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
return -ENOMEM;
cc = sthyi_fill(sctns, &rc);
if (cc < 0) {
free_page((unsigned long)sctns);
return cc;
}
out:
if (!cc) {
if (kvm_s390_pv_cpu_is_protected(vcpu)) {

View File

@ -2786,6 +2786,7 @@ int s390_replace_asce(struct gmap *gmap)
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
page->index = 0;
table = page_to_virt(page);
memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));

View File

@ -451,4 +451,5 @@
/* BUG word 2 */
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
#endif /* _ASM_X86_CPUFEATURES_H */

View File

@ -58,14 +58,29 @@ struct arch_specific_insn {
/* copy of the original instruction */
kprobe_opcode_t *insn;
/*
* boostable = false: This instruction type is not boostable.
* boostable = true: This instruction has been boosted: we have
* boostable = 0: This instruction type is not boostable.
* boostable = 1: This instruction has been boosted: we have
* added a relative jump after the instruction copy in insn,
* so no single-step and fixup are needed (unless there's
* a post_handler).
*/
bool boostable;
bool if_modifier;
unsigned boostable:1;
unsigned char size; /* The size of insn */
union {
unsigned char opcode;
struct {
unsigned char type;
} jcc;
struct {
unsigned char type;
unsigned char asize;
} loop;
struct {
unsigned char reg;
} indirect;
};
s32 rel32; /* relative offset must be s32, s16, or s8 */
void (*emulate_op)(struct kprobe *p, struct pt_regs *regs);
/* Number of bytes of text poked */
int tp_len;
};
@ -104,7 +119,6 @@ extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
extern int kprobe_int3_handler(struct pt_regs *regs);
extern int kprobe_debug_handler(struct pt_regs *regs);
#else

View File

@ -809,10 +809,12 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow);
extern u16 amd_get_nb_id(int cpu);
extern u32 amd_get_nodes_per_socket(void);
extern bool cpu_has_ibpb_brtype_microcode(void);
extern void amd_clear_divider(void);
#else
static inline u16 amd_get_nb_id(int cpu) { return 0; }
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; }
static inline void amd_clear_divider(void) { }
#endif
static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)

View File

@ -76,6 +76,10 @@ static const int amd_zenbleed[] =
AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
static const int amd_div0[] =
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
int osvw_id = *erratum++;
@ -1168,6 +1172,11 @@ static void init_amd(struct cpuinfo_x86 *c)
check_null_seg_clears_base(c);
zenbleed_check(c);
if (cpu_has_amd_erratum(c, amd_div0)) {
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
setup_force_cpu_bug(X86_BUG_DIV0);
}
}
#ifdef CONFIG_X86_32
@ -1312,3 +1321,13 @@ void amd_check_microcode(void)
{
on_each_cpu(zenbleed_check_cpu, NULL, 1);
}
/*
* Issue a DIV 0/1 insn to clear any division data from previous DIV
* operations.
*/
void noinstr amd_clear_divider(void)
{
asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
:: "a" (0), "d" (0), "r" (1));
}

View File

@ -133,26 +133,6 @@ void synthesize_relcall(void *dest, void *from, void *to)
}
NOKPROBE_SYMBOL(synthesize_relcall);
/*
* Skip the prefixes of the instruction.
*/
static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
{
insn_attr_t attr;
attr = inat_get_opcode_attribute((insn_byte_t)*insn);
while (inat_is_legacy_prefix(attr)) {
insn++;
attr = inat_get_opcode_attribute((insn_byte_t)*insn);
}
#ifdef CONFIG_X86_64
if (inat_is_rex_prefix(attr))
insn++;
#endif
return insn;
}
NOKPROBE_SYMBOL(skip_prefixes);
/*
* Returns non-zero if INSN is boostable.
* RIP relative instructions are adjusted at copying time in 64 bits mode
@ -185,29 +165,28 @@ int can_boost(struct insn *insn, void *addr)
opcode = insn->opcode.bytes[0];
switch (opcode & 0xf0) {
case 0x60:
/* can't boost "bound" */
return (opcode != 0x62);
case 0x70:
return 0; /* can't boost conditional jump */
case 0x90:
return opcode != 0x9a; /* can't boost call far */
case 0xc0:
/* can't boost software-interruptions */
return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
case 0xd0:
/* can boost AA* and XLAT */
return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
case 0xe0:
/* can boost in/out and absolute jmps */
return ((opcode & 0x04) || opcode == 0xea);
case 0xf0:
/* clear and set flags are boostable */
return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
switch (opcode) {
case 0x62: /* bound */
case 0x70 ... 0x7f: /* Conditional jumps */
case 0x9a: /* Call far */
case 0xc0 ... 0xc1: /* Grp2 */
case 0xcc ... 0xce: /* software exceptions */
case 0xd0 ... 0xd3: /* Grp2 */
case 0xd6: /* (UD) */
case 0xd8 ... 0xdf: /* ESC */
case 0xe0 ... 0xe3: /* LOOP*, JCXZ */
case 0xe8 ... 0xe9: /* near Call, JMP */
case 0xeb: /* Short JMP */
case 0xf0 ... 0xf4: /* LOCK/REP, HLT */
case 0xf6 ... 0xf7: /* Grp3 */
case 0xfe: /* Grp4 */
/* ... are not boostable */
return 0;
case 0xff: /* Grp5 */
/* Only indirect jmp is boostable */
return X86_MODRM_REG(insn->modrm.bytes[0]) == 4;
default:
/* call is not boostable */
return opcode != 0x9a;
return 1;
}
}
@ -326,25 +305,6 @@ static int can_probe(unsigned long paddr)
return (addr == paddr);
}
/*
* Returns non-zero if opcode modifies the interrupt flag.
*/
static int is_IF_modifier(kprobe_opcode_t *insn)
{
/* Skip prefixes */
insn = skip_prefixes(insn);
switch (*insn) {
case 0xfa: /* cli */
case 0xfb: /* sti */
case 0xcf: /* iret/iretd */
case 0x9d: /* popf/popfd */
return 1;
}
return 0;
}
/*
* Copy an instruction with recovering modified instruction by kprobes
* and adjust the displacement if the instruction uses the %rip-relative
@ -412,13 +372,14 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
return insn->length;
}
/* Prepare reljump right after instruction to boost */
static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
struct insn *insn)
/* Prepare reljump or int3 right after instruction */
static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p,
struct insn *insn)
{
int len = insn->length;
if (can_boost(insn, p->addr) &&
if (!IS_ENABLED(CONFIG_PREEMPTION) &&
!p->post_handler && can_boost(insn, p->addr) &&
MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) {
/*
* These instructions can be executed directly if it
@ -427,9 +388,14 @@ static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
synthesize_reljump(buf + len, p->ainsn.insn + len,
p->addr + insn->length);
len += JMP32_INSN_SIZE;
p->ainsn.boostable = true;
p->ainsn.boostable = 1;
} else {
p->ainsn.boostable = false;
/* Otherwise, put an int3 for trapping singlestep */
if (MAX_INSN_SIZE - len < INT3_INSN_SIZE)
return -ENOSPC;
buf[len] = INT3_INSN_OPCODE;
len += INT3_INSN_SIZE;
}
return len;
@ -466,25 +432,290 @@ void free_insn_page(void *page)
module_memfree(page);
}
/* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */
static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs)
{
switch (p->ainsn.opcode) {
case 0xfa: /* cli */
regs->flags &= ~(X86_EFLAGS_IF);
break;
case 0xfb: /* sti */
regs->flags |= X86_EFLAGS_IF;
break;
case 0x9c: /* pushf */
int3_emulate_push(regs, regs->flags);
break;
case 0x9d: /* popf */
regs->flags = int3_emulate_pop(regs);
break;
}
regs->ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
}
NOKPROBE_SYMBOL(kprobe_emulate_ifmodifiers);
static void kprobe_emulate_ret(struct kprobe *p, struct pt_regs *regs)
{
int3_emulate_ret(regs);
}
NOKPROBE_SYMBOL(kprobe_emulate_ret);
static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
{
unsigned long func = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
func += p->ainsn.rel32;
int3_emulate_call(regs, func);
}
NOKPROBE_SYMBOL(kprobe_emulate_call);
static nokprobe_inline
void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond)
{
unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
if (cond)
ip += p->ainsn.rel32;
int3_emulate_jmp(regs, ip);
}
static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
{
__kprobe_emulate_jmp(p, regs, true);
}
NOKPROBE_SYMBOL(kprobe_emulate_jmp);
static const unsigned long jcc_mask[6] = {
[0] = X86_EFLAGS_OF,
[1] = X86_EFLAGS_CF,
[2] = X86_EFLAGS_ZF,
[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
[4] = X86_EFLAGS_SF,
[5] = X86_EFLAGS_PF,
};
static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
{
bool invert = p->ainsn.jcc.type & 1;
bool match;
if (p->ainsn.jcc.type < 0xc) {
match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1];
} else {
match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
if (p->ainsn.jcc.type >= 0xe)
match = match || (regs->flags & X86_EFLAGS_ZF);
}
__kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
}
NOKPROBE_SYMBOL(kprobe_emulate_jcc);
static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
{
bool match;
if (p->ainsn.loop.type != 3) { /* LOOP* */
if (p->ainsn.loop.asize == 32)
match = ((*(u32 *)&regs->cx)--) != 0;
#ifdef CONFIG_X86_64
else if (p->ainsn.loop.asize == 64)
match = ((*(u64 *)&regs->cx)--) != 0;
#endif
else
match = ((*(u16 *)&regs->cx)--) != 0;
} else { /* JCXZ */
if (p->ainsn.loop.asize == 32)
match = *(u32 *)(&regs->cx) == 0;
#ifdef CONFIG_X86_64
else if (p->ainsn.loop.asize == 64)
match = *(u64 *)(&regs->cx) == 0;
#endif
else
match = *(u16 *)(&regs->cx) == 0;
}
if (p->ainsn.loop.type == 0) /* LOOPNE */
match = match && !(regs->flags & X86_EFLAGS_ZF);
else if (p->ainsn.loop.type == 1) /* LOOPE */
match = match && (regs->flags & X86_EFLAGS_ZF);
__kprobe_emulate_jmp(p, regs, match);
}
NOKPROBE_SYMBOL(kprobe_emulate_loop);
static const int addrmode_regoffs[] = {
offsetof(struct pt_regs, ax),
offsetof(struct pt_regs, cx),
offsetof(struct pt_regs, dx),
offsetof(struct pt_regs, bx),
offsetof(struct pt_regs, sp),
offsetof(struct pt_regs, bp),
offsetof(struct pt_regs, si),
offsetof(struct pt_regs, di),
#ifdef CONFIG_X86_64
offsetof(struct pt_regs, r8),
offsetof(struct pt_regs, r9),
offsetof(struct pt_regs, r10),
offsetof(struct pt_regs, r11),
offsetof(struct pt_regs, r12),
offsetof(struct pt_regs, r13),
offsetof(struct pt_regs, r14),
offsetof(struct pt_regs, r15),
#endif
};
static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
{
unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
int3_emulate_call(regs, regs_get_register(regs, offs));
}
NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);
static void kprobe_emulate_jmp_indirect(struct kprobe *p, struct pt_regs *regs)
{
unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
int3_emulate_jmp(regs, regs_get_register(regs, offs));
}
NOKPROBE_SYMBOL(kprobe_emulate_jmp_indirect);
static int prepare_emulation(struct kprobe *p, struct insn *insn)
{
insn_byte_t opcode = insn->opcode.bytes[0];
switch (opcode) {
case 0xfa: /* cli */
case 0xfb: /* sti */
case 0x9c: /* pushfl */
case 0x9d: /* popf/popfd */
/*
* IF modifiers must be emulated since it will enable interrupt while
* int3 single stepping.
*/
p->ainsn.emulate_op = kprobe_emulate_ifmodifiers;
p->ainsn.opcode = opcode;
break;
case 0xc2: /* ret/lret */
case 0xc3:
case 0xca:
case 0xcb:
p->ainsn.emulate_op = kprobe_emulate_ret;
break;
case 0x9a: /* far call absolute -- segment is not supported */
case 0xea: /* far jmp absolute -- segment is not supported */
case 0xcc: /* int3 */
case 0xcf: /* iret -- in-kernel IRET is not supported */
return -EOPNOTSUPP;
break;
case 0xe8: /* near call relative */
p->ainsn.emulate_op = kprobe_emulate_call;
if (insn->immediate.nbytes == 2)
p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
else
p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
break;
case 0xeb: /* short jump relative */
case 0xe9: /* near jump relative */
p->ainsn.emulate_op = kprobe_emulate_jmp;
if (insn->immediate.nbytes == 1)
p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
else if (insn->immediate.nbytes == 2)
p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
else
p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
break;
case 0x70 ... 0x7f:
/* 1 byte conditional jump */
p->ainsn.emulate_op = kprobe_emulate_jcc;
p->ainsn.jcc.type = opcode & 0xf;
p->ainsn.rel32 = *(char *)insn->immediate.bytes;
break;
case 0x0f:
opcode = insn->opcode.bytes[1];
if ((opcode & 0xf0) == 0x80) {
/* 2 bytes Conditional Jump */
p->ainsn.emulate_op = kprobe_emulate_jcc;
p->ainsn.jcc.type = opcode & 0xf;
if (insn->immediate.nbytes == 2)
p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
else
p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
} else if (opcode == 0x01 &&
X86_MODRM_REG(insn->modrm.bytes[0]) == 0 &&
X86_MODRM_MOD(insn->modrm.bytes[0]) == 3) {
/* VM extensions - not supported */
return -EOPNOTSUPP;
}
break;
case 0xe0: /* Loop NZ */
case 0xe1: /* Loop */
case 0xe2: /* Loop */
case 0xe3: /* J*CXZ */
p->ainsn.emulate_op = kprobe_emulate_loop;
p->ainsn.loop.type = opcode & 0x3;
p->ainsn.loop.asize = insn->addr_bytes * 8;
p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
break;
case 0xff:
/*
* Since the 0xff is an extended group opcode, the instruction
* is determined by the MOD/RM byte.
*/
opcode = insn->modrm.bytes[0];
if ((opcode & 0x30) == 0x10) {
if ((opcode & 0x8) == 0x8)
return -EOPNOTSUPP; /* far call */
/* call absolute, indirect */
p->ainsn.emulate_op = kprobe_emulate_call_indirect;
} else if ((opcode & 0x30) == 0x20) {
if ((opcode & 0x8) == 0x8)
return -EOPNOTSUPP; /* far jmp */
/* jmp near absolute indirect */
p->ainsn.emulate_op = kprobe_emulate_jmp_indirect;
} else
break;
if (insn->addr_bytes != sizeof(unsigned long))
return -EOPNOTSUPP; /* Don't support differnt size */
if (X86_MODRM_MOD(opcode) != 3)
return -EOPNOTSUPP; /* TODO: support memory addressing */
p->ainsn.indirect.reg = X86_MODRM_RM(opcode);
#ifdef CONFIG_X86_64
if (X86_REX_B(insn->rex_prefix.value))
p->ainsn.indirect.reg += 8;
#endif
break;
default:
break;
}
p->ainsn.size = insn->length;
return 0;
}
static int arch_copy_kprobe(struct kprobe *p)
{
struct insn insn;
kprobe_opcode_t buf[MAX_INSN_SIZE];
int len;
int ret, len;
/* Copy an instruction with recovering if other optprobe modifies it.*/
len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
if (!len)
return -EINVAL;
/*
* __copy_instruction can modify the displacement of the instruction,
* but it doesn't affect boostable check.
*/
len = prepare_boost(buf, p, &insn);
/* Analyze the opcode and setup emulate functions */
ret = prepare_emulation(p, &insn);
if (ret < 0)
return ret;
/* Check whether the instruction modifies Interrupt Flag or not */
p->ainsn.if_modifier = is_IF_modifier(buf);
/* Add int3 for single-step or booster jmp */
len = prepare_singlestep(buf, p, &insn);
if (len < 0)
return len;
/* Also, displacement change doesn't affect the first byte */
p->opcode = buf[0];
@ -507,6 +738,9 @@ int arch_prepare_kprobe(struct kprobe *p)
if (!can_probe((unsigned long)p->addr))
return -EILSEQ;
memset(&p->ainsn, 0, sizeof(p->ainsn));
/* insn: must be on special executable page on x86. */
p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn)
@ -574,29 +808,7 @@ set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
{
__this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_flags = kcb->kprobe_old_flags
= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
if (p->ainsn.if_modifier)
kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
}
static nokprobe_inline void clear_btf(void)
{
if (test_thread_flag(TIF_BLOCKSTEP)) {
unsigned long debugctl = get_debugctlmsr();
debugctl &= ~DEBUGCTLMSR_BTF;
update_debugctlmsr(debugctl);
}
}
static nokprobe_inline void restore_btf(void)
{
if (test_thread_flag(TIF_BLOCKSTEP)) {
unsigned long debugctl = get_debugctlmsr();
debugctl |= DEBUGCTLMSR_BTF;
update_debugctlmsr(debugctl);
}
= (regs->flags & X86_EFLAGS_IF);
}
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
@ -611,6 +823,26 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(arch_prepare_kretprobe);
static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
/* Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
/* This will restore both kcb and current_kprobe */
restore_previous_kprobe(kcb);
} else {
/*
* Always update the kcb status because
* reset_curent_kprobe() doesn't update kcb.
*/
kcb->kprobe_status = KPROBE_HIT_SSDONE;
if (cur->post_handler)
cur->post_handler(cur, regs, 0);
reset_current_kprobe();
}
}
NOKPROBE_SYMBOL(kprobe_post_process);
static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb, int reenter)
{
@ -618,7 +850,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
return;
#if !defined(CONFIG_PREEMPTION)
if (p->ainsn.boostable && !p->post_handler) {
if (p->ainsn.boostable) {
/* Boost up -- we can execute copied instructions directly */
if (!reenter)
reset_current_kprobe();
@ -637,18 +869,50 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
kcb->kprobe_status = KPROBE_REENTER;
} else
kcb->kprobe_status = KPROBE_HIT_SS;
/* Prepare real single stepping */
clear_btf();
regs->flags |= X86_EFLAGS_TF;
if (p->ainsn.emulate_op) {
p->ainsn.emulate_op(p, regs);
kprobe_post_process(p, regs, kcb);
return;
}
/* Disable interrupt, and set ip register on trampoline */
regs->flags &= ~X86_EFLAGS_IF;
/* single step inline if the instruction is an int3 */
if (p->opcode == INT3_INSN_OPCODE)
regs->ip = (unsigned long)p->addr;
else
regs->ip = (unsigned long)p->ainsn.insn;
regs->ip = (unsigned long)p->ainsn.insn;
}
NOKPROBE_SYMBOL(setup_singlestep);
/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "int3"
* instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
* copy is p->ainsn.insn. We also doesn't use trap, but "int3" again
* right after the copied instruction.
* Different from the trap single-step, "int3" single-step can not
* handle the instruction which changes the ip register, e.g. jmp,
* call, conditional jmp, and the instructions which changes the IF
* flags because interrupt must be disabled around the single-stepping.
* Such instructions are software emulated, but others are single-stepped
* using "int3".
*
* When the 2nd "int3" handled, the regs->ip and regs->flags needs to
* be adjusted, so that we can resume execution on correct code.
*/
static void resume_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
unsigned long copy_ip = (unsigned long)p->ainsn.insn;
unsigned long orig_ip = (unsigned long)p->addr;
/* Restore saved interrupt flag and ip register */
regs->flags |= kcb->kprobe_saved_flags;
/* Note that regs->ip is executed int3 so must be a step back */
regs->ip += (orig_ip - copy_ip) - INT3_INSN_SIZE;
}
NOKPROBE_SYMBOL(resume_singlestep);
/*
* We have reentered the kprobe_handler(), since another probe was hit while
* within the handler. We save the original kprobes variables and just single
@ -684,6 +948,12 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
}
NOKPROBE_SYMBOL(reenter_kprobe);
static nokprobe_inline int kprobe_is_ss(struct kprobe_ctlblk *kcb)
{
return (kcb->kprobe_status == KPROBE_HIT_SS ||
kcb->kprobe_status == KPROBE_REENTER);
}
/*
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
* remain disabled throughout this function.
@ -728,7 +998,18 @@ int kprobe_int3_handler(struct pt_regs *regs)
reset_current_kprobe();
return 1;
}
} else if (*addr != INT3_INSN_OPCODE) {
} else if (kprobe_is_ss(kcb)) {
p = kprobe_running();
if ((unsigned long)p->ainsn.insn < regs->ip &&
(unsigned long)p->ainsn.insn + MAX_INSN_SIZE > regs->ip) {
/* Most provably this is the second int3 for singlestep */
resume_singlestep(p, regs, kcb);
kprobe_post_process(p, regs, kcb);
return 1;
}
}
if (*addr != INT3_INSN_OPCODE) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
@ -801,135 +1082,6 @@ __used __visible void *trampoline_handler(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(trampoline_handler);
/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "int 3"
* instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
* copy is p->ainsn.insn.
*
* This function prepares to return from the post-single-step
* interrupt. We have to fix up the stack as follows:
*
* 0) Except in the case of absolute or indirect jump or call instructions,
* the new ip is relative to the copied instruction. We need to make
* it relative to the original instruction.
*
* 1) If the single-stepped instruction was pushfl, then the TF and IF
* flags are set in the just-pushed flags, and may need to be cleared.
*
* 2) If the single-stepped instruction was a call, the return address
* that is atop the stack is the address following the copied instruction.
* We need to make it the address following the original instruction.
*
* If this is the first time we've single-stepped the instruction at
* this probepoint, and the instruction is boostable, boost it: add a
* jump instruction after the copied instruction, that jumps to the next
* instruction after the probepoint.
*/
static void resume_execution(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
unsigned long *tos = stack_addr(regs);
unsigned long copy_ip = (unsigned long)p->ainsn.insn;
unsigned long orig_ip = (unsigned long)p->addr;
kprobe_opcode_t *insn = p->ainsn.insn;
/* Skip prefixes */
insn = skip_prefixes(insn);
regs->flags &= ~X86_EFLAGS_TF;
switch (*insn) {
case 0x9c: /* pushfl */
*tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
*tos |= kcb->kprobe_old_flags;
break;
case 0xc2: /* iret/ret/lret */
case 0xc3:
case 0xca:
case 0xcb:
case 0xcf:
case 0xea: /* jmp absolute -- ip is correct */
/* ip is already adjusted, no more changes required */
p->ainsn.boostable = true;
goto no_change;
case 0xe8: /* call relative - Fix return addr */
*tos = orig_ip + (*tos - copy_ip);
break;
#ifdef CONFIG_X86_32
case 0x9a: /* call absolute -- same as call absolute, indirect */
*tos = orig_ip + (*tos - copy_ip);
goto no_change;
#endif
case 0xff:
if ((insn[1] & 0x30) == 0x10) {
/*
* call absolute, indirect
* Fix return addr; ip is correct.
* But this is not boostable
*/
*tos = orig_ip + (*tos - copy_ip);
goto no_change;
} else if (((insn[1] & 0x31) == 0x20) ||
((insn[1] & 0x31) == 0x21)) {
/*
* jmp near and far, absolute indirect
* ip is correct. And this is boostable
*/
p->ainsn.boostable = true;
goto no_change;
}
default:
break;
}
regs->ip += orig_ip - copy_ip;
no_change:
restore_btf();
}
NOKPROBE_SYMBOL(resume_execution);
/*
* Interrupts are disabled on entry as trap1 is an interrupt gate and they
* remain disabled throughout this function.
*/
int kprobe_debug_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (!cur)
return 0;
resume_execution(cur, regs, kcb);
regs->flags |= kcb->kprobe_saved_flags;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
/* Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
}
reset_current_kprobe();
out:
/*
* if somebody else is singlestepping across a probe point, flags
* will have TF set, in which case, continue the remaining processing
* of do_debug, as if this is not a probe hit.
*/
if (regs->flags & X86_EFLAGS_TF)
return 0;
return 1;
}
NOKPROBE_SYMBOL(kprobe_debug_handler);
int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
@ -947,20 +1099,9 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* normal page fault.
*/
regs->ip = (unsigned long)cur->addr;
/*
* Trap flag (TF) has been set here because this fault
* happened where the single stepping will be done.
* So clear it by resetting the current kprobe:
*/
regs->flags &= ~X86_EFLAGS_TF;
/*
* Since the single step (trap) has been cancelled,
* we need to restore BTF here.
*/
restore_btf();
/*
* If the TF flag was set before the kprobe hit,
* If the IF flag was set before the kprobe hit,
* don't touch it:
*/
regs->flags |= kcb->kprobe_old_flags;

View File

@ -198,6 +198,8 @@ DEFINE_IDTENTRY(exc_divide_error)
{
do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
FPE_INTDIV, error_get_trap_addr(regs));
amd_clear_divider();
}
DEFINE_IDTENTRY(exc_overflow)
@ -917,9 +919,6 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
dr6 &= ~DR_STEP;
if (kprobe_debug_handler(regs))
goto out;
/*
* The kernel doesn't use INT1
*/

View File

@ -135,8 +135,7 @@ module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
#define KVM_VM_CR0_ALWAYS_ON \
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \
X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
@ -1520,6 +1519,11 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long old_rflags;
/*
* Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU
* is an unrestricted guest in order to mark L2 as needing emulation
* if L1 runs L2 as a restricted guest.
*/
if (is_unrestricted_guest(vcpu)) {
kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
vmx->rflags = rflags;
@ -3064,42 +3068,22 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
}
static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
unsigned long cr0,
struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
if (!(cr0 & X86_CR0_PG)) {
/* From paging/starting to nonpaging */
exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING);
vcpu->arch.cr0 = cr0;
vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
} else if (!is_paging(vcpu)) {
/* From nonpaging to paging */
exec_controls_clearbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING);
vcpu->arch.cr0 = cr0;
vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
}
if (!(cr0 & X86_CR0_WP))
*hw_cr0 &= ~X86_CR0_WP;
}
#define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
CPU_BASED_CR3_STORE_EXITING)
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long hw_cr0;
u32 tmp;
hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
if (is_unrestricted_guest(vcpu))
if (enable_unrestricted_guest)
hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
else {
hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
if (!enable_ept)
hw_cr0 |= X86_CR0_WP;
if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
enter_pmode(vcpu);
@ -3117,8 +3101,47 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
}
#endif
if (enable_ept && !is_unrestricted_guest(vcpu))
ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
if (enable_ept && !enable_unrestricted_guest) {
/*
* Ensure KVM has an up-to-date snapshot of the guest's CR3. If
* the below code _enables_ CR3 exiting, vmx_cache_reg() will
* (correctly) stop reading vmcs.GUEST_CR3 because it thinks
* KVM's CR3 is installed.
*/
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
/*
* When running with EPT but not unrestricted guest, KVM must
* intercept CR3 accesses when paging is _disabled_. This is
* necessary because restricted guests can't actually run with
* paging disabled, and so KVM stuffs its own CR3 in order to
* run the guest when identity mapped page tables.
*
* Do _NOT_ check the old CR0.PG, e.g. to optimize away the
* update, it may be stale with respect to CR3 interception,
* e.g. after nested VM-Enter.
*
* Lastly, honor L1's desires, i.e. intercept CR3 loads and/or
* stores to forward them to L1, even if KVM does not need to
* intercept them to preserve its identity mapped page tables.
*/
if (!(cr0 & X86_CR0_PG)) {
exec_controls_setbit(vmx, CR3_EXITING_BITS);
} else if (!is_guest_mode(vcpu)) {
exec_controls_clearbit(vmx, CR3_EXITING_BITS);
} else {
tmp = exec_controls_get(vmx);
tmp &= ~CR3_EXITING_BITS;
tmp |= get_vmcs12(vcpu)->cpu_based_vm_exec_control & CR3_EXITING_BITS;
exec_controls_set(vmx, tmp);
}
if (!is_paging(vcpu) != !(cr0 & X86_CR0_PG)) {
vcpu->arch.cr0 = cr0;
vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
}
}
vmcs_writel(CR0_READ_SHADOW, cr0);
vmcs_writel(GUEST_CR0, hw_cr0);
@ -3213,7 +3236,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
unsigned long hw_cr4;
hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
if (is_unrestricted_guest(vcpu))
if (enable_unrestricted_guest)
hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
else if (vmx->rmode.vm86_active)
hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
@ -3233,7 +3256,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vcpu->arch.cr4 = cr4;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
if (!is_unrestricted_guest(vcpu)) {
if (!enable_unrestricted_guest) {
if (enable_ept) {
if (!is_paging(vcpu)) {
hw_cr4 &= ~X86_CR4_PAE;

View File

@ -56,6 +56,8 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
{
acpi_status status = 0;
unsigned long long ppc = 0;
s32 qos_value;
int index;
int ret;
if (!pr)
@ -75,17 +77,30 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
return -ENODEV;
}
pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
(int)ppc, ppc ? "" : "not");
index = ppc;
pr->performance_platform_limit = (int)ppc;
if (ppc >= pr->performance->state_count ||
unlikely(!freq_qos_request_active(&pr->perflib_req)))
if (pr->performance_platform_limit == index ||
ppc >= pr->performance->state_count)
return 0;
ret = freq_qos_update_request(&pr->perflib_req,
pr->performance->states[ppc].core_frequency * 1000);
pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
index, index ? "is" : "is not");
pr->performance_platform_limit = index;
if (unlikely(!freq_qos_request_active(&pr->perflib_req)))
return 0;
/*
* If _PPC returns 0, it means that all of the available states can be
* used ("no limit").
*/
if (index == 0)
qos_value = FREQ_QOS_MAX_DEFAULT_VALUE;
else
qos_value = pr->performance->states[index].core_frequency * 1000;
ret = freq_qos_update_request(&pr->perflib_req, qos_value);
if (ret < 0) {
pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
pr->id, ret);
@ -168,9 +183,16 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
if (!pr)
continue;
/*
* Reset performance_platform_limit in case there is a stale
* value in it, so as to make it match the "no limit" QoS value
* below.
*/
pr->performance_platform_limit = 0;
ret = freq_qos_add_request(&policy->constraints,
&pr->perflib_req,
FREQ_QOS_MAX, INT_MAX);
&pr->perflib_req, FREQ_QOS_MAX,
FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);

View File

@ -260,7 +260,7 @@ static u8 ns87560_check_status(struct ata_port *ap)
* LOCKING:
* Inherited from caller.
*/
void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
static void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;

View File

@ -25,8 +25,11 @@ extern u64 pm_runtime_active_time(struct device *dev);
#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
#define WAKE_IRQ_DEDICATED_REVERSE BIT(2)
#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
WAKE_IRQ_DEDICATED_MANAGED)
WAKE_IRQ_DEDICATED_MANAGED | \
WAKE_IRQ_DEDICATED_REVERSE)
#define WAKE_IRQ_DEDICATED_ENABLED BIT(3)
struct wake_irq {
struct device *dev;
@ -39,7 +42,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_enable_wake_irq_check(struct device *dev,
bool can_change_status);
extern void dev_pm_disable_wake_irq_check(struct device *dev);
extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable);
extern void dev_pm_enable_wake_irq_complete(struct device *dev);
#ifdef CONFIG_PM_SLEEP

View File

@ -675,6 +675,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
if (retval)
goto fail;
dev_pm_enable_wake_irq_complete(dev);
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
@ -720,7 +722,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
return retval;
fail:
dev_pm_disable_wake_irq_check(dev);
dev_pm_disable_wake_irq_check(dev, true);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
@ -903,7 +905,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq_check(dev);
dev_pm_disable_wake_irq_check(dev, false);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);

View File

@ -145,24 +145,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
return IRQ_HANDLED;
}
/**
* dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
* @dev: Device entry
* @irq: Device wake-up interrupt
*
* Unless your hardware has separate wake-up interrupts in addition
* to the device IO interrupts, you don't need this.
*
* Sets up a threaded interrupt handler for a device that has
* a dedicated wake-up interrupt in addition to the device IO
* interrupt.
*
* The interrupt starts disabled, and needs to be managed for
* the device by the bus code or the device driver using
* dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
* functions.
*/
int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
{
struct wake_irq *wirq;
int err;
@ -200,7 +183,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (err)
goto err_free_irq;
wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
return err;
@ -213,8 +196,57 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
return err;
}
/**
* dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
* @dev: Device entry
* @irq: Device wake-up interrupt
*
* Unless your hardware has separate wake-up interrupts in addition
* to the device IO interrupts, you don't need this.
*
* Sets up a threaded interrupt handler for a device that has
* a dedicated wake-up interrupt in addition to the device IO
* interrupt.
*
* The interrupt starts disabled, and needs to be managed for
* the device by the bus code or the device driver using
* dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
* functions.
*/
int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
{
return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
}
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
/**
* dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
* with reverse enable ordering
* @dev: Device entry
* @irq: Device wake-up interrupt
*
* Unless your hardware has separate wake-up interrupts in addition
* to the device IO interrupts, you don't need this.
*
* Sets up a threaded interrupt handler for a device that has a dedicated
* wake-up interrupt in addition to the device IO interrupt. It sets
* the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
* to enable dedicated wake-up interrupt after running the runtime suspend
* callback for @dev.
*
* The interrupt starts disabled, and needs to be managed for
* the device by the bus code or the device driver using
* dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
* functions.
*/
int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
{
return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
}
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
/**
* dev_pm_enable_wake_irq - Enable device wake-up interrupt
* @dev: Device
@ -285,25 +317,56 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
return;
enable:
enable_irq(wirq->irq);
if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
enable_irq(wirq->irq);
wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
}
}
/**
* dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
* @dev: Device
* @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
*
* Disables wake-up interrupt conditionally based on status.
* Should be only called from rpm_suspend() and rpm_resume() path.
*/
void dev_pm_disable_wake_irq_check(struct device *dev)
void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
disable_irq_nosync(wirq->irq);
}
}
/**
* dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
* @dev: Device using the wake IRQ
*
* Enable wake IRQ conditionally based on status, mainly used if want to
* enable wake IRQ after running ->runtime_suspend() which depends on
* WAKE_IRQ_DEDICATED_REVERSE.
*
* Should be only called from rpm_suspend() path.
*/
void dev_pm_enable_wake_irq_complete(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
enable_irq(wirq->irq);
}
/**
@ -320,7 +383,7 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
if (device_may_wakeup(wirq->dev)) {
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
!pm_runtime_status_suspended(wirq->dev))
!(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
enable_irq(wirq->irq);
enable_irq_wake(wirq->irq);
@ -343,7 +406,7 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
disable_irq_wake(wirq->irq);
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
!pm_runtime_status_suspended(wirq->dev))
!(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
disable_irq_nosync(wirq->irq);
}
}

View File

@ -314,6 +314,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
int size = 0;
int status;
u32 expected;
int rc;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
@ -333,8 +334,13 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
size += recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE);
rc = recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE);
if (rc < 0) {
size = rc;
goto out;
}
size += rc;
if (size < expected) {
dev_err(&chip->dev, "Unable to read remainder of result\n");
size = -ETIME;

View File

@ -443,20 +443,6 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
(u32) cpu->acpi_perf_data.states[i].control);
}
/*
* The _PSS table doesn't contain whole turbo frequency range.
* This just contains +1 MHZ above the max non turbo frequency,
* with control value corresponding to max turbo ratio. But
* when cpufreq set policy is called, it will call with this
* max frequency, which will cause a reduced performance as
* this driver uses real max turbo frequency as the max
* frequency. So correct this frequency in _PSS table to
* correct max turbo frequency based on the turbo state.
* Also need to convert to MHz as _PSS freq is in MHz.
*/
if (!global.turbo_disabled)
cpu->acpi_perf_data.states[0].core_frequency =
policy->cpuinfo.max_freq / 1000;
cpu->valid_pss_table = true;
pr_debug("_PPC limits will be enforced\n");

View File

@ -91,13 +91,13 @@ static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
struct regmap *regmap = tps68470_gpio->tps68470_regmap;
/* Set the initial value */
tps68470_gpio_set(gc, offset, value);
/* rest are always outputs */
if (offset >= TPS68470_N_REGULAR_GPIO)
return 0;
/* Set the initial value */
tps68470_gpio_set(gc, offset, value);
return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset),
TPS68470_GPIO_MODE_MASK,
TPS68470_GPIO_MODE_OUT_CMOS);

View File

@ -81,7 +81,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
* since we've already mapped it once in
* submit_reloc()
*/
if (WARN_ON(!ptr))
if (WARN_ON(IS_ERR_OR_NULL(ptr)))
return;
for (i = 0; i < dwords; i++) {

View File

@ -200,7 +200,7 @@ static const struct a6xx_shader_block {
SHADER(A6XX_SP_LB_3_DATA, 0x800),
SHADER(A6XX_SP_LB_4_DATA, 0x800),
SHADER(A6XX_SP_LB_5_DATA, 0x200),
SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000),
SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800),
SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
SHADER(A6XX_SP_UAV_DATA, 0x80),
SHADER(A6XX_SP_INST_TAG, 0x80),

View File

@ -14,19 +14,6 @@
#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
/**
* enum dpu_core_perf_data_bus_id - data bus identifier
* @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
* @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
* @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
*/
enum dpu_core_perf_data_bus_id {
DPU_CORE_PERF_DATA_BUS_ID_MNOC,
DPU_CORE_PERF_DATA_BUS_ID_LLCC,
DPU_CORE_PERF_DATA_BUS_ID_EBI,
DPU_CORE_PERF_DATA_BUS_ID_MAX,
};
/**
* struct dpu_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request

View File

@ -115,7 +115,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_resource_manager *man;
if (!list_empty(&bo->lru))
if (!list_empty(&bo->lru) || bo->pin_count)
return;
if (mem->placement & TTM_PL_FLAG_NO_EVICT)
@ -165,7 +165,8 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
ttm_bo_del_from_lru(bo);
ttm_bo_add_mem_to_lru(bo, &bo->mem);
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT) &&
!bo->pin_count) {
switch (bo->mem.mem_type) {
case TTM_PL_TT:
ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
@ -544,8 +545,9 @@ static void ttm_bo_release(struct kref *kref)
* shrinkers, now that they are queued for
* destruction.
*/
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT || bo->pin_count) {
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
bo->pin_count = 0;
ttm_bo_del_from_lru(bo);
ttm_bo_add_mem_to_lru(bo, &bo->mem);
}
@ -670,6 +672,13 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
{
bool ret = false;
if (bo->pin_count) {
*locked = false;
if (busy)
*busy = false;
return false;
}
if (bo->base.resv == ctx->resv) {
dma_resv_assert_held(bo->base.resv);
if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
@ -1174,6 +1183,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->moving = NULL;
bo->mem.placement = TTM_PL_FLAG_CACHED;
bo->acc_size = acc_size;
bo->pin_count = 0;
bo->sg = sg;
if (resv) {
bo->base.resv = resv;

View File

@ -352,7 +352,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
return -ENOMEM;
fbo->base = *bo;
fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
ttm_bo_get(bo);
fbo->bo = bo;
@ -372,6 +371,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
fbo->base.pin_count = 1;
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;

View File

@ -708,7 +708,7 @@ static umode_t nct7802_temp_is_visible(struct kobject *kobj,
if (index >= 38 && index < 46 && !(reg & 0x01)) /* PECI 0 */
return 0;
if (index >= 0x46 && (!(reg & 0x02))) /* PECI 1 */
if (index >= 46 && !(reg & 0x02)) /* PECI 1 */
return 0;
return attr->mode;

View File

@ -694,10 +694,8 @@ static int iic_probe(struct platform_device *ofdev)
int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
dev_err(&ofdev->dev, "failed to allocate device data\n");
if (!dev)
return -ENOMEM;
}
platform_set_drvdata(ofdev, dev);

View File

@ -970,12 +970,10 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
struct i2c_vendor_data *vendor = id->data;
u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1;
dev = devm_kzalloc(&adev->dev, sizeof(struct nmk_i2c_dev), GFP_KERNEL);
if (!dev) {
dev_err(&adev->dev, "cannot allocate memory\n");
ret = -ENOMEM;
goto err_no_mem;
}
dev = devm_kzalloc(&adev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->vendor = vendor;
dev->adev = adev;
nmk_i2c_of_probe(np, dev);
@ -996,30 +994,21 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
resource_size(&adev->res));
if (!dev->virtbase) {
ret = -ENOMEM;
goto err_no_mem;
}
if (!dev->virtbase)
return -ENOMEM;
dev->irq = adev->irq[0];
ret = devm_request_irq(&adev->dev, dev->irq, i2c_irq_handler, 0,
DRIVER_NAME, dev);
if (ret) {
dev_err(&adev->dev, "cannot claim the irq %d\n", dev->irq);
goto err_no_mem;
return ret;
}
dev->clk = devm_clk_get(&adev->dev, NULL);
dev->clk = devm_clk_get_enabled(&adev->dev, NULL);
if (IS_ERR(dev->clk)) {
dev_err(&adev->dev, "could not get i2c clock\n");
ret = PTR_ERR(dev->clk);
goto err_no_mem;
}
ret = clk_prepare_enable(dev->clk);
if (ret) {
dev_err(&adev->dev, "can't prepare_enable clock\n");
goto err_no_mem;
dev_err(&adev->dev, "could enable i2c clock\n");
return PTR_ERR(dev->clk);
}
init_hw(dev);
@ -1042,22 +1031,15 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
ret = i2c_add_adapter(adap);
if (ret)
goto err_no_adap;
return ret;
pm_runtime_put(&adev->dev);
return 0;
err_no_adap:
clk_disable_unprepare(dev->clk);
err_no_mem:
return ret;
}
static void nmk_i2c_remove(struct amba_device *adev)
{
struct resource *res = &adev->res;
struct nmk_i2c_dev *dev = amba_get_drvdata(adev);
i2c_del_adapter(&dev->adap);
@ -1066,8 +1048,6 @@ static void nmk_i2c_remove(struct amba_device *adev)
clear_all_interrupts(dev);
/* disable the controller */
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
clk_disable_unprepare(dev->clk);
release_mem_region(res->start, resource_size(res));
}
static struct i2c_vendor_data vendor_stn8815 = {

View File

@ -443,9 +443,8 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
goto out0;
}
id = kzalloc(sizeof(struct cami2c), GFP_KERNEL);
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
dev_err(&pdev->dev, "no mem for private data\n");
ret = -ENOMEM;
goto out0;
}

View File

@ -226,10 +226,8 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
dev_err(&interface->dev, "Out of memory\n");
if (!dev)
goto error;
}
dev->usb_dev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;

View File

@ -530,15 +530,15 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP);
}
if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
MLX4_IB_RX_HASH_DST_IPV4 |
MLX4_IB_RX_HASH_SRC_IPV6 |
MLX4_IB_RX_HASH_DST_IPV6 |
MLX4_IB_RX_HASH_SRC_PORT_TCP |
MLX4_IB_RX_HASH_DST_PORT_TCP |
MLX4_IB_RX_HASH_SRC_PORT_UDP |
MLX4_IB_RX_HASH_DST_PORT_UDP |
MLX4_IB_RX_HASH_INNER)) {
if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 |
MLX4_IB_RX_HASH_DST_IPV4 |
MLX4_IB_RX_HASH_SRC_IPV6 |
MLX4_IB_RX_HASH_DST_IPV6 |
MLX4_IB_RX_HASH_SRC_PORT_TCP |
MLX4_IB_RX_HASH_DST_PORT_TCP |
MLX4_IB_RX_HASH_SRC_PORT_UDP |
MLX4_IB_RX_HASH_DST_PORT_UDP |
MLX4_IB_RX_HASH_INNER)) {
pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
ucmd->rx_hash_fields_mask);
return (-EOPNOTSUPP);

View File

@ -1390,7 +1390,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
if (mthca_array_get(&dev->qp_table.qp, mqpn))
err = -EBUSY;
else
mthca_array_set(&dev->qp_table.qp, mqpn, qp->sqp);
mthca_array_set(&dev->qp_table.qp, mqpn, qp);
spin_unlock_irq(&dev->qp_table.lock);
if (err)

View File

@ -82,6 +82,7 @@ struct bcm6345_l1_chip {
};
struct bcm6345_l1_cpu {
struct bcm6345_l1_chip *intc;
void __iomem *map_base;
unsigned int parent_irq;
u32 enable_cache[];
@ -115,17 +116,11 @@ static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
static void bcm6345_l1_irq_handle(struct irq_desc *desc)
{
struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
struct bcm6345_l1_cpu *cpu;
struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc);
struct bcm6345_l1_chip *intc = cpu->intc;
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int idx;
#ifdef CONFIG_SMP
cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
#else
cpu = intc->cpus[0];
#endif
chained_irq_enter(chip, desc);
for (idx = 0; idx < intc->n_words; idx++) {
@ -257,6 +252,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn,
if (!cpu)
return -ENOMEM;
cpu->intc = intc;
cpu->map_base = ioremap(res.start, sz);
if (!cpu->map_base)
return -ENOMEM;
@ -272,7 +268,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn,
return -EINVAL;
}
irq_set_chained_handler_and_data(cpu->parent_irq,
bcm6345_l1_irq_handle, intc);
bcm6345_l1_irq_handle, cpu);
return 0;
}

View File

@ -267,13 +267,23 @@ static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
}
static struct irq_chip its_vpe_irq_chip;
static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
{
struct its_vlpi_map *map = get_vlpi_map(d);
struct its_vpe *vpe = NULL;
int cpu;
if (map) {
cpu = vpe_to_cpuid_lock(map->vpe, flags);
if (d->chip == &its_vpe_irq_chip) {
vpe = irq_data_get_irq_chip_data(d);
} else {
struct its_vlpi_map *map = get_vlpi_map(d);
if (map)
vpe = map->vpe;
}
if (vpe) {
cpu = vpe_to_cpuid_lock(vpe, flags);
} else {
/* Physical LPIs are already locked via the irq_desc lock */
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
@ -287,10 +297,18 @@ static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
{
struct its_vlpi_map *map = get_vlpi_map(d);
struct its_vpe *vpe = NULL;
if (map)
vpe_to_cpuid_unlock(map->vpe, flags);
if (d->chip == &its_vpe_irq_chip) {
vpe = irq_data_get_irq_chip_data(d);
} else {
struct its_vlpi_map *map = get_vlpi_map(d);
if (map)
vpe = map->vpe;
}
if (vpe)
vpe_to_cpuid_unlock(vpe, flags);
}
static struct its_collection *valid_col(struct its_collection *col)
@ -1422,13 +1440,28 @@ static void wait_for_syncr(void __iomem *rdbase)
cpu_relax();
}
static void __direct_lpi_inv(struct irq_data *d, u64 val)
{
void __iomem *rdbase;
unsigned long flags;
int cpu;
/* Target the redistributor this LPI is currently routed to */
cpu = irq_to_cpuid_lock(d, &flags);
raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
gic_write_lpir(val, rdbase + GICR_INVLPIR);
wait_for_syncr(rdbase);
raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
irq_to_cpuid_unlock(d, flags);
}
static void direct_lpi_inv(struct irq_data *d)
{
struct its_vlpi_map *map = get_vlpi_map(d);
void __iomem *rdbase;
unsigned long flags;
u64 val;
int cpu;
if (map) {
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
@ -1442,15 +1475,7 @@ static void direct_lpi_inv(struct irq_data *d)
val = d->hwirq;
}
/* Target the redistributor this LPI is currently routed to */
cpu = irq_to_cpuid_lock(d, &flags);
raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
gic_write_lpir(val, rdbase + GICR_INVLPIR);
wait_for_syncr(rdbase);
raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
irq_to_cpuid_unlock(d, flags);
__direct_lpi_inv(d, val);
}
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
@ -3928,18 +3953,10 @@ static void its_vpe_send_inv(struct irq_data *d)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
if (gic_rdists->has_direct_lpi) {
void __iomem *rdbase;
/* Target the redistributor this VPE is currently known on */
raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
wait_for_syncr(rdbase);
raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
} else {
if (gic_rdists->has_direct_lpi)
__direct_lpi_inv(d, d->parent_data->hwirq);
else
its_vpe_send_cmd(vpe, its_send_inv);
}
}
static void its_vpe_mask_irq(struct irq_data *d)

View File

@ -839,7 +839,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
*z1t = cpu_to_le16(new_z1); /* now send data */
if (bch->tx_idx < bch->tx_skb->len)
return;
dev_kfree_skb(bch->tx_skb);
dev_kfree_skb_any(bch->tx_skb);
if (get_next_bframe(bch))
goto next_t_frame;
return;
@ -895,7 +895,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
}
bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
bz->f1 = new_f1; /* next frame */
dev_kfree_skb(bch->tx_skb);
dev_kfree_skb_any(bch->tx_skb);
get_next_bframe(bch);
}
@ -1119,7 +1119,7 @@ tx_birq(struct bchannel *bch)
if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
hfcpci_fill_fifo(bch);
else {
dev_kfree_skb(bch->tx_skb);
dev_kfree_skb_any(bch->tx_skb);
if (get_next_bframe(bch))
hfcpci_fill_fifo(bch);
}
@ -2277,7 +2277,7 @@ _hfcpci_softirq(struct device *dev, void *unused)
return 0;
if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
spin_lock(&hc->lock);
spin_lock_irq(&hc->lock);
bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
main_rec_hfcpci(bch);
@ -2288,7 +2288,7 @@ _hfcpci_softirq(struct device *dev, void *unused)
main_rec_hfcpci(bch);
tx_birq(bch);
}
spin_unlock(&hc->lock);
spin_unlock_irq(&hc->lock);
}
return 0;
}

View File

@ -854,7 +854,13 @@ struct smq_policy {
struct background_tracker *bg_work;
bool migrations_allowed;
bool migrations_allowed:1;
/*
* If this is set the policy will try and clean the whole cache
* even if the device is not idle.
*/
bool cleaner:1;
};
/*----------------------------------------------------------------*/
@ -1133,7 +1139,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the
* size of the clean queue.
*/
if (idle) {
if (idle || mq->cleaner) {
/*
* We'd like to clean everything.
*/
@ -1716,11 +1722,9 @@ static void calc_hotspot_params(sector_t origin_size,
*hotspot_block_size /= 2u;
}
static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size,
bool mimic_mq,
bool migrations_allowed)
static struct dm_cache_policy *
__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
bool mimic_mq, bool migrations_allowed, bool cleaner)
{
unsigned i;
unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
@ -1807,6 +1811,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
goto bad_btracker;
mq->migrations_allowed = migrations_allowed;
mq->cleaner = cleaner;
return &mq->policy;
@ -1830,21 +1835,24 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
return __smq_create(cache_size, origin_size, cache_block_size, false, true);
return __smq_create(cache_size, origin_size, cache_block_size,
false, true, false);
}
static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
return __smq_create(cache_size, origin_size, cache_block_size, true, true);
return __smq_create(cache_size, origin_size, cache_block_size,
true, true, false);
}
static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
return __smq_create(cache_size, origin_size, cache_block_size, false, false);
return __smq_create(cache_size, origin_size, cache_block_size,
false, false, true);
}
/*----------------------------------------------------------------*/

View File

@ -3258,8 +3258,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
r = md_start(&rs->md);
if (r) {
ti->error = "Failed to start raid array";
mddev_unlock(&rs->md);
goto bad_md_start;
goto bad_unlock;
}
/* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
@ -3267,8 +3266,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
if (r) {
ti->error = "Failed to set raid4/5/6 journal mode";
mddev_unlock(&rs->md);
goto bad_journal_mode_set;
goto bad_unlock;
}
}
@ -3279,14 +3277,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (rs_is_raid456(rs)) {
r = rs_set_raid456_stripe_cache(rs);
if (r)
goto bad_stripe_cache;
goto bad_unlock;
}
/* Now do an early reshape check */
if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
r = rs_check_reshape(rs);
if (r)
goto bad_check_reshape;
goto bad_unlock;
/* Restore new, ctr requested layout to perform check */
rs_config_restore(rs, &rs_layout);
@ -3295,7 +3293,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
r = rs->md.pers->check_reshape(&rs->md);
if (r) {
ti->error = "Reshape check failed";
goto bad_check_reshape;
goto bad_unlock;
}
}
}
@ -3306,11 +3304,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mddev_unlock(&rs->md);
return 0;
bad_md_start:
bad_journal_mode_set:
bad_stripe_cache:
bad_check_reshape:
bad_unlock:
md_stop(&rs->md);
mddev_unlock(&rs->md);
bad:
raid_set_free(rs);
@ -3321,7 +3317,9 @@ static void raid_dtr(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
mddev_lock_nointr(&rs->md);
md_stop(&rs->md);
mddev_unlock(&rs->md);
raid_set_free(rs);
}

View File

@ -6316,6 +6316,8 @@ static void __md_stop(struct mddev *mddev)
void md_stop(struct mddev *mddev)
{
lockdep_assert_held(&mddev->reconfig_mutex);
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/

View File

@ -136,7 +136,7 @@ static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op,
unsigned int i;
int ret;
if (op->cs > NAND_MAX_CHIPS)
if (op->cs >= NAND_MAX_CHIPS)
return -EINVAL;
if (check_only)

View File

@ -1180,7 +1180,6 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
struct meson_nfc *nfc = nand_get_controller_data(nand);
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
struct mtd_info *mtd = nand_to_mtd(nand);
int nsectors = mtd->writesize / 1024;
int ret;
if (!mtd->name) {
@ -1198,7 +1197,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
nand->options |= NAND_NO_SUBPAGE_WRITE;
ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps,
mtd->oobsize - 2 * nsectors);
mtd->oobsize - 2);
if (ret) {
dev_err(nfc->dev, "failed to ECC init\n");
return -EINVAL;

View File

@ -174,17 +174,17 @@ static void elm_load_syndrome(struct elm_info *info,
switch (info->bch_type) {
case BCH8_ECC:
/* syndrome fragment 0 = ecc[9-12B] */
val = cpu_to_be32(*(u32 *) &ecc[9]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]);
elm_write_reg(info, offset, val);
/* syndrome fragment 1 = ecc[5-8B] */
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[5]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]);
elm_write_reg(info, offset, val);
/* syndrome fragment 2 = ecc[1-4B] */
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[1]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]);
elm_write_reg(info, offset, val);
/* syndrome fragment 3 = ecc[0B] */
@ -194,35 +194,35 @@ static void elm_load_syndrome(struct elm_info *info,
break;
case BCH4_ECC:
/* syndrome fragment 0 = ecc[20-52b] bits */
val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) |
((ecc[2] & 0xf) << 28);
elm_write_reg(info, offset, val);
/* syndrome fragment 1 = ecc[0-20b] bits */
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12;
elm_write_reg(info, offset, val);
break;
case BCH16_ECC:
val = cpu_to_be32(*(u32 *) &ecc[22]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]);
elm_write_reg(info, offset, val);
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[18]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]);
elm_write_reg(info, offset, val);
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[14]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]);
elm_write_reg(info, offset, val);
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[10]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]);
elm_write_reg(info, offset, val);
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[6]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]);
elm_write_reg(info, offset, val);
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[2]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]);
elm_write_reg(info, offset, val);
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16;
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16;
elm_write_reg(info, offset, val);
break;
default:

View File

@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf);
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@ -92,7 +92,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
if (spi_mem_exec_op(spinand->spimem, &op))
return nanddev_get_ecc_requirements(nand)->strength;
mbf >>= 4;
mbf = *(spinand->scratchbuf) >> 4;
if (WARN_ON(mbf > nanddev_get_ecc_requirements(nand)->strength || !mbf))
return nanddev_get_ecc_requirements(nand)->strength;

View File

@ -29,7 +29,7 @@ obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_TAP) += tap.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_VXLAN) += vxlan.o
obj-$(CONFIG_VXLAN) += vxlan/
obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_BAREUDP) += bareudp.o
obj-$(CONFIG_GTP) += gtp.o

View File

@ -1442,6 +1442,11 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
memcpy(bond_dev->broadcast, slave_dev->broadcast,
slave_dev->addr_len);
if (slave_dev->flags & IFF_POINTOPOINT) {
bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
}
}
/* On bonding slaves other than the currently active slave, suppress

View File

@ -732,6 +732,8 @@ static int gs_can_close(struct net_device *netdev)
usb_kill_anchored_urbs(&dev->tx_submitted);
atomic_set(&dev->active_tx_urbs, 0);
dev->can.state = CAN_STATE_STOPPED;
/* reset the device */
rc = gs_cmd_reset(dev);
if (rc < 0)

View File

@ -1301,7 +1301,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
clk_prepare_enable(priv->clk);
ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
if (IS_ERR(priv->clk_mdiv)) {
@ -1309,7 +1311,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
goto out_clk;
}
clk_prepare_enable(priv->clk_mdiv);
ret = clk_prepare_enable(priv->clk_mdiv);
if (ret)
goto out_clk;
ret = bcm_sf2_sw_rst(priv);
if (ret) {

View File

@ -1642,8 +1642,11 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ ntohs(ip_hdr(skb)->tot_len));
if (real_len < skb->len)
pskb_trim(skb, real_len);
if (real_len < skb->len) {
err = pskb_trim(skb, real_len);
if (err)
return err;
}
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (unlikely(skb->len == hdr_len)) {

View File

@ -1139,7 +1139,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
(lancer_chip(adapter) || BE3_chip(adapter) ||
skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
ip = (struct iphdr *)ip_hdr(skb);
pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len))))
goto tx_drop;
}
/* If vlan tag is already inlined in the packet, skip HW VLAN

View File

@ -53,7 +53,10 @@ static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
for (i = 0; i < HNAE3_MAX_TC; i++) {
ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
if (i < hdev->tm_info.num_tc)
ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
else
ets->tc_tx_bw[i] = 0;
if (hdev->tm_info.tc_info[i].tc_sch_mode ==
HCLGE_SCH_MODE_SP)
@ -105,26 +108,31 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
return 0;
}
static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
u8 *tc, bool *changed)
static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
bool *changed)
{
bool has_ets_tc = false;
u32 total_ets_bw = 0;
u8 max_tc = 0;
int ret;
u8 max_tc_id = 0;
u8 i;
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
*changed = true;
if (ets->prio_tc[i] > max_tc)
max_tc = ets->prio_tc[i];
if (ets->prio_tc[i] > max_tc_id)
max_tc_id = ets->prio_tc[i];
}
ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
if (ret)
return ret;
/* return max tc number, max tc id need to plus 1 */
return max_tc_id + 1;
}
static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
struct ieee_ets *ets, bool *changed,
u8 tc_num)
{
bool has_ets_tc = false;
u32 total_ets_bw = 0;
u8 i;
for (i = 0; i < HNAE3_MAX_TC; i++) {
switch (ets->tc_tsa[i]) {
@ -134,6 +142,13 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
*changed = true;
break;
case IEEE_8021QAZ_TSA_ETS:
if (i >= tc_num) {
dev_err(&hdev->pdev->dev,
"tc%u is disabled, cannot set ets bw\n",
i);
return -EINVAL;
}
/* The hardware will switch to sp mode if bandwidth is
* 0, so limit ets bandwidth must be greater than 0.
*/
@ -158,7 +173,26 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
if (has_ets_tc && total_ets_bw != BW_PERCENT)
return -EINVAL;
*tc = max_tc + 1;
return 0;
}
static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
u8 *tc, bool *changed)
{
u8 tc_num;
int ret;
tc_num = hclge_ets_tc_changed(hdev, ets, changed);
ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc);
if (ret)
return ret;
ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num);
if (ret)
return ret;
*tc = tc_num;
if (*tc != hdev->tm_info.num_tc)
*changed = true;

View File

@ -651,6 +651,7 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
{
#define BW_PERCENT 100
#define DEFAULT_BW_WEIGHT 1
u8 i;
@ -672,7 +673,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
for (; k < HNAE3_MAX_TC; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT;
}
}

View File

@ -1839,7 +1839,7 @@ void i40e_dbg_pf_exit(struct i40e_pf *pf)
void i40e_dbg_init(void)
{
i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
if (!i40e_dbg_root)
if (IS_ERR(i40e_dbg_root))
pr_info("init of debugfs failed\n");
}

View File

@ -1135,16 +1135,21 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
ICE_FLOW_FLD_OFF_INVAL);
}
/* add filter for outer headers */
fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
assign_bit(fltr_idx, hw->fdir_perfect_fltr, perfect_filter);
/* add filter for outer headers */
ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
ICE_FD_HW_SEG_NON_TUN);
if (ret == -EEXIST)
/* Rule already exists, free memory and continue */
devm_kfree(dev, seg);
else if (ret)
if (ret == -EEXIST) {
/* Rule already exists, free memory and count as success */
ret = 0;
goto err_exit;
} else if (ret) {
/* could not write filter, free memory */
goto err_exit;
}
/* make tunneled filter HW entries if possible */
memcpy(&tun_seg[1], seg, sizeof(*seg));
@ -1159,18 +1164,13 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
devm_kfree(dev, tun_seg);
}
if (perfect_filter)
set_bit(fltr_idx, hw->fdir_perfect_fltr);
else
clear_bit(fltr_idx, hw->fdir_perfect_fltr);
return ret;
err_exit:
devm_kfree(dev, tun_seg);
devm_kfree(dev, seg);
return -EOPNOTSUPP;
return ret;
}
/**
@ -1680,7 +1680,9 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
}
/* input struct is added to the HW filter list */
ice_fdir_update_list_entry(pf, input, fsp->location);
ret = ice_fdir_update_list_entry(pf, input, fsp->location);
if (ret)
goto release_lock;
ret = ice_fdir_write_all_fltr(pf, input, true);
if (ret)

View File

@ -8409,7 +8409,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
struct ixgbe_adapter *adapter = q_vector->adapter;
if (unlikely(skb_tail_pointer(skb) < hdr.network +
VXLAN_HEADROOM))
vxlan_headroom(0)))
return;
/* verify the port is recognized as VXLAN */

View File

@ -121,7 +121,9 @@ static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
trailer_len = alen + plen + 2;
pskb_trim(skb, skb->len - trailer_len);
ret = pskb_trim(skb, skb->len - trailer_len);
if (unlikely(ret))
return ret;
if (skb->protocol == htons(ETH_P_IP)) {
ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
ip_send_check(ipv4hdr);

View File

@ -802,7 +802,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
struct fs_node *iter = list_entry(start, struct fs_node, list);
struct mlx5_flow_table *ft = NULL;
if (!root || root->type == FS_TYPE_PRIO_CHAINS)
if (!root)
return NULL;
list_for_each_advance_continue(iter, &root->children, reverse) {
@ -818,20 +818,42 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
return ft;
}
/* If reverse is false then return the first flow table in next priority of
* prio in the tree, else return the last flow table in the previous priority
* of prio in the tree.
*/
static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
static struct fs_node *find_prio_chains_parent(struct fs_node *parent,
struct fs_node **child)
{
struct fs_node *node = NULL;
while (parent && parent->type != FS_TYPE_PRIO_CHAINS) {
node = parent;
parent = parent->parent;
}
if (child)
*child = node;
return parent;
}
/* If reverse is false then return the first flow table next to the passed node
* in the tree, else return the last flow table before the node in the tree.
* If skip is true, skip the flow tables in the same prio_chains prio.
*/
static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse,
bool skip)
{
struct fs_node *prio_chains_parent = NULL;
struct mlx5_flow_table *ft = NULL;
struct fs_node *curr_node;
struct fs_node *parent;
parent = prio->node.parent;
curr_node = &prio->node;
if (skip)
prio_chains_parent = find_prio_chains_parent(node, NULL);
parent = node->parent;
curr_node = node;
while (!ft && parent) {
ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
if (parent != prio_chains_parent)
ft = find_closest_ft_recursive(parent, &curr_node->list,
reverse);
curr_node = parent;
parent = curr_node->parent;
}
@ -839,15 +861,15 @@ static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool revers
}
/* Assuming all the tree is locked by mutex chain lock */
static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node)
{
return find_closest_ft(prio, false);
return find_closest_ft(node, false, true);
}
/* Assuming all the tree is locked by mutex chain lock */
static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node)
{
return find_closest_ft(prio, true);
return find_closest_ft(node, true, true);
}
static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
@ -859,7 +881,7 @@ static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
return find_next_chained_ft(prio);
return find_next_chained_ft(&prio->node);
}
static int connect_fts_in_prio(struct mlx5_core_dev *dev,
@ -883,21 +905,55 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
return 0;
}
static struct mlx5_flow_table *find_closet_ft_prio_chains(struct fs_node *node,
struct fs_node *parent,
struct fs_node **child,
bool reverse)
{
struct mlx5_flow_table *ft;
ft = find_closest_ft(node, reverse, false);
if (ft && parent == find_prio_chains_parent(&ft->node, child))
return ft;
return NULL;
}
/* Connect flow tables from previous priority of prio to ft */
static int connect_prev_fts(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct fs_prio *prio)
{
struct fs_node *prio_parent, *parent = NULL, *child, *node;
struct mlx5_flow_table *prev_ft;
int err = 0;
prev_ft = find_prev_chained_ft(prio);
if (prev_ft) {
prio_parent = find_prio_chains_parent(&prio->node, &child);
/* return directly if not under the first sub ns of prio_chains prio */
if (prio_parent && !list_is_first(&child->list, &prio_parent->children))
return 0;
prev_ft = find_prev_chained_ft(&prio->node);
while (prev_ft) {
struct fs_prio *prev_prio;
fs_get_obj(prev_prio, prev_ft->node.parent);
return connect_fts_in_prio(dev, prev_prio, ft);
err = connect_fts_in_prio(dev, prev_prio, ft);
if (err)
break;
if (!parent) {
parent = find_prio_chains_parent(&prev_prio->node, &child);
if (!parent)
break;
}
node = child;
prev_ft = find_closet_ft_prio_chains(node, parent, &child, true);
}
return 0;
return err;
}
static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
@ -1036,7 +1092,7 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table
if (err)
return err;
next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
next_ft = first_ft ? first_ft : find_next_chained_ft(&prio->node);
err = connect_fwd_rules(dev, ft, next_ft);
if (err)
return err;
@ -1114,7 +1170,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = unmanaged ? ft_attr->next_ft :
find_next_chained_ft(fs_prio);
find_next_chained_ft(&fs_prio->node);
ft->def_miss_action = ns->def_miss_action;
ft->ns = ns;
err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
@ -2073,13 +2129,20 @@ EXPORT_SYMBOL(mlx5_del_flow_rules);
/* Assuming prio->node.children(flow tables) is sorted by level */
static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
{
struct fs_node *prio_parent, *child;
struct fs_prio *prio;
fs_get_obj(prio, ft->node.parent);
if (!list_is_last(&ft->node.list, &prio->node.children))
return list_next_entry(ft, node.list);
return find_next_chained_ft(prio);
prio_parent = find_prio_chains_parent(&prio->node, &child);
if (prio_parent && list_is_first(&child->list, &prio_parent->children))
return find_closest_ft(&prio->node, false, false);
return find_next_chained_ft(&prio->node);
}
static int update_root_ft_destroy(struct mlx5_flow_table *ft)

View File

@ -425,11 +425,12 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
if (err)
return err;
goto err_free_in;
*reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
kvfree(in);
err_free_in:
kvfree(in);
return err;
}

View File

@ -1845,6 +1845,17 @@ static int netsec_of_probe(struct platform_device *pdev,
return err;
}
/*
* SynQuacer is physically configured with TX and RX delays
* but the standard firmware claimed otherwise for a long
* time, ignore it.
*/
if (of_machine_is_compatible("socionext,developer-box") &&
priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) {
dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n");
priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
}
priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (!priv->phy_np) {
dev_err(&pdev->dev, "missing required property 'phy-handle'\n");

View File

@ -1550,15 +1550,15 @@ static int temac_probe(struct platform_device *pdev)
}
/* Error handle returned DMA RX and TX interrupts */
if (lp->rx_irq < 0) {
if (lp->rx_irq != -EPROBE_DEFER)
dev_err(&pdev->dev, "could not get DMA RX irq\n");
return lp->rx_irq;
if (lp->rx_irq <= 0) {
rc = lp->rx_irq ?: -EINVAL;
return dev_err_probe(&pdev->dev, rc,
"could not get DMA RX irq\n");
}
if (lp->tx_irq < 0) {
if (lp->tx_irq != -EPROBE_DEFER)
dev_err(&pdev->dev, "could not get DMA TX irq\n");
return lp->tx_irq;
if (lp->tx_irq <= 0) {
rc = lp->tx_irq ?: -EINVAL;
return dev_err_probe(&pdev->dev, rc,
"could not get DMA TX irq\n");
}
if (temac_np) {

View File

@ -263,6 +263,13 @@ static int mv3310_power_up(struct phy_device *phydev)
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
MV_V2_PORT_CTRL_PWRDOWN);
/* Sometimes, the power down bit doesn't clear immediately, and
* a read of this register causes the bit not to clear. Delay
* 100us to allow the PHY to come out of power down mode before
* the next access.
*/
udelay(100);
if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310 ||
priv->firmware_ver < 0x00030000)
return ret;

View File

@ -523,7 +523,7 @@ static int tap_open(struct inode *inode, struct file *file)
q->sock.state = SS_CONNECTED;
q->sock.file = file;
q->sock.ops = &tap_socket_ops;
sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
sock_init_data_uid(&q->sock, &q->sk, current_fsuid());
q->sk.sk_write_space = tap_sock_write_space;
q->sk.sk_destruct = tap_sock_destruct;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;

View File

@ -2130,6 +2130,15 @@ static void team_setup_by_port(struct net_device *dev,
dev->mtu = port_dev->mtu;
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
eth_hw_addr_inherit(dev, port_dev);
if (port_dev->flags & IFF_POINTOPOINT) {
dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
} else if ((port_dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ==
(IFF_BROADCAST | IFF_MULTICAST)) {
dev->flags |= (IFF_BROADCAST | IFF_MULTICAST);
dev->flags &= ~(IFF_POINTOPOINT | IFF_NOARP);
}
}
static int team_dev_type_check_change(struct net_device *dev,

View File

@ -3457,7 +3457,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid);
sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
tfile->sk.sk_write_space = tun_sock_write_space;
tfile->sk.sk_sndbuf = INT_MAX;

View File

@ -604,9 +604,23 @@ static const struct usb_device_id products[] = {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8005, /* A-300 */
ZAURUS_FAKE_INTERFACE,
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8006, /* B-500/SL-5600 */
ZAURUS_MASTER_INTERFACE,
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8006, /* B-500/SL-5600 */
ZAURUS_FAKE_INTERFACE,
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
@ -614,6 +628,13 @@ static const struct usb_device_id products[] = {
.idProduct = 0x8007, /* C-700 */
ZAURUS_MASTER_INTERFACE,
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8007, /* C-700 */
ZAURUS_FAKE_INTERFACE,
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,

View File

@ -1738,6 +1738,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
} else if (!info->in || !info->out)
status = usbnet_get_endpoints (dev, udev);
else {
u8 ep_addrs[3] = {
info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0
};
dev->in = usb_rcvbulkpipe (xdev, info->in);
dev->out = usb_sndbulkpipe (xdev, info->out);
if (!(info->flags & FLAG_NO_SETINT))
@ -1747,6 +1751,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
else
status = 0;
if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs))
status = -EINVAL;
}
if (status >= 0 && dev->status)
status = init_status (dev, udev);

View File

@ -289,9 +289,23 @@ static const struct usb_device_id products [] = {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8005, /* A-300 */
ZAURUS_FAKE_INTERFACE,
.driver_info = (unsigned long)&bogus_mdlm_info,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8006, /* B-500/SL-5600 */
ZAURUS_MASTER_INTERFACE,
.driver_info = ZAURUS_PXA_INFO,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8006, /* B-500/SL-5600 */
ZAURUS_FAKE_INTERFACE,
.driver_info = (unsigned long)&bogus_mdlm_info,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
@ -299,6 +313,13 @@ static const struct usb_device_id products [] = {
.idProduct = 0x8007, /* C-700 */
ZAURUS_MASTER_INTERFACE,
.driver_info = ZAURUS_PXA_INFO,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8007, /* C-700 */
ZAURUS_FAKE_INTERFACE,
.driver_info = (unsigned long)&bogus_mdlm_info,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,

View File

@ -3223,6 +3223,8 @@ static int virtnet_probe(struct virtio_device *vdev)
}
}
_virtnet_set_queues(vi, vi->curr_queue_pairs);
/* serialize netdev register + virtio_device_ready() with ndo_open() */
rtnl_lock();
@ -3243,8 +3245,6 @@ static int virtnet_probe(struct virtio_device *vdev)
goto free_unregister_netdev;
}
virtnet_set_queues(vi, vi->curr_queue_pairs);
/* Assume link up if device can't report link status,
otherwise get link status from config. */
netif_carrier_off(dev);

View File

@ -0,0 +1,7 @@
#
# Makefile for the vxlan driver
#
obj-$(CONFIG_VXLAN) += vxlan.o
vxlan-objs := vxlan_core.o

View File

@ -2720,7 +2720,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ndst = &rt->dst;
err = skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM,
err = skb_tunnel_check_pmtu(skb, ndst, vxlan_headroom(flags & VXLAN_F_GPE),
netif_is_any_bridge_port(dev));
if (err < 0) {
goto tx_error;
@ -2781,7 +2781,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto out_unlock;
}
err = skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM,
err = skb_tunnel_check_pmtu(skb, ndst,
vxlan_headroom((flags & VXLAN_F_GPE) | VXLAN_F_IPV6),
netif_is_any_bridge_port(dev));
if (err < 0) {
goto tx_error;
@ -3158,14 +3159,12 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
struct vxlan_rdst *dst = &vxlan->default_dst;
struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
dst->remote_ifindex);
bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6);
/* This check is different than dev->max_mtu, because it looks at
* the lowerdev->mtu, rather than the static dev->max_mtu
*/
if (lowerdev) {
int max_mtu = lowerdev->mtu -
(use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
int max_mtu = lowerdev->mtu - vxlan_headroom(vxlan->cfg.flags);
if (new_mtu > max_mtu)
return -EINVAL;
}
@ -3784,11 +3783,11 @@ static void vxlan_config_apply(struct net_device *dev,
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
unsigned short needed_headroom = ETH_HLEN;
bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6);
int max_mtu = ETH_MAX_MTU;
u32 flags = conf->flags;
if (!changelink) {
if (conf->flags & VXLAN_F_GPE)
if (flags & VXLAN_F_GPE)
vxlan_raw_setup(dev);
else
vxlan_ether_setup(dev);
@ -3814,8 +3813,7 @@ static void vxlan_config_apply(struct net_device *dev,
dev->needed_tailroom = lowerdev->needed_tailroom;
max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
VXLAN_HEADROOM);
max_mtu = lowerdev->mtu - vxlan_headroom(flags);
if (max_mtu < ETH_MIN_MTU)
max_mtu = ETH_MIN_MTU;
@ -3826,10 +3824,9 @@ static void vxlan_config_apply(struct net_device *dev,
if (dev->mtu > max_mtu)
dev->mtu = max_mtu;
if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
needed_headroom += VXLAN6_HEADROOM;
else
needed_headroom += VXLAN_HEADROOM;
if (flags & VXLAN_F_COLLECT_METADATA)
flags |= VXLAN_F_IPV6;
needed_headroom += vxlan_headroom(flags);
dev->needed_headroom = needed_headroom;
memcpy(&vxlan->cfg, conf, sizeof(*conf));

View File

@ -160,9 +160,9 @@ static void mt76_init_stream_cap(struct mt76_phy *phy,
void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
{
if (phy->dev->cap.has_2ghz)
if (phy->cap.has_2ghz)
mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
if (phy->dev->cap.has_5ghz)
if (phy->cap.has_5ghz)
mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
}
EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
@ -463,13 +463,13 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
dev_set_drvdata(dev->dev, dev);
mt76_phy_init(dev, hw);
if (dev->cap.has_2ghz) {
if (phy->cap.has_2ghz) {
ret = mt76_init_sband_2g(dev, rates, n_rates);
if (ret)
return ret;
}
if (dev->cap.has_5ghz) {
if (phy->cap.has_5ghz) {
ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht);
if (ret)
return ret;

View File

@ -561,6 +561,7 @@ struct mt76_phy {
struct mt76_channel_state *chan_state;
ktime_t survey_time;
struct mt76_hw_cap cap;
struct mt76_sband sband_2g;
struct mt76_sband sband_5g;
@ -630,7 +631,6 @@ struct mt76_dev {
struct debugfs_blob_wrapper eeprom;
struct debugfs_blob_wrapper otp;
struct mt76_hw_cap cap;
struct mt76_rate_power rate_power;

View File

@ -170,7 +170,7 @@ int mt7603_eeprom_init(struct mt7603_dev *dev)
}
eeprom = (u8 *)dev->mt76.eeprom.data;
dev->mt76.cap.has_2ghz = true;
dev->mphy.cap.has_2ghz = true;
memcpy(dev->mt76.macaddr, eeprom + MT_EE_MAC_ADDR, ETH_ALEN);
/* Check for 1SS devices */

View File

@ -202,7 +202,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
int ret;
/* Increase buffer size to receive large VHT MPDUs */
if (dev->mt76.cap.has_5ghz)
if (dev->mphy.cap.has_5ghz)
rx_buf_size *= 2;
mt76_dma_attach(&dev->mt76);

View File

@ -100,20 +100,20 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
if (is_mt7663(&dev->mt76)) {
/* dual band */
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
dev->mphy.cap.has_2ghz = true;
dev->mphy.cap.has_5ghz = true;
return;
}
if (is_mt7622(&dev->mt76)) {
/* 2GHz only */
dev->mt76.cap.has_2ghz = true;
dev->mphy.cap.has_2ghz = true;
return;
}
if (is_mt7611(&dev->mt76)) {
/* 5GHz only */
dev->mt76.cap.has_5ghz = true;
dev->mphy.cap.has_5ghz = true;
return;
}
@ -121,17 +121,17 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
eeprom[MT_EE_WIFI_CONF]);
switch (val) {
case MT_EE_5GHZ:
dev->mt76.cap.has_5ghz = true;
break;
case MT_EE_2GHZ:
dev->mt76.cap.has_2ghz = true;
dev->mphy.cap.has_5ghz = true;
break;
case MT_EE_DBDC:
dev->dbdc_support = true;
/* fall through */
fallthrough;
case MT_EE_2GHZ:
dev->mphy.cap.has_2ghz = true;
break;
default:
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
dev->mphy.cap.has_2ghz = true;
dev->mphy.cap.has_5ghz = true;
break;
}
}

View File

@ -52,15 +52,15 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
mt76x02_eeprom_parse_hw_cap(dev);
dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n",
dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz);
dev->mphy.cap.has_2ghz, dev->mphy.cap.has_5ghz);
if (dev->no_2ghz) {
dev->mt76.cap.has_2ghz = false;
dev->mphy.cap.has_2ghz = false;
dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
}
if (is_mt7630(dev)) {
dev->mt76.cap.has_5ghz = false;
dev->mphy.cap.has_5ghz = false;
dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
}

View File

@ -245,7 +245,7 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
if (ret)
return ret;
if (dev->mt76.cap.has_5ghz) {
if (dev->mphy.cap.has_5ghz) {
struct ieee80211_supported_band *sband;
sband = &dev->mphy.sband_5g.sband;
@ -253,7 +253,7 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
mt76x0_init_txpower(dev, sband);
}
if (dev->mt76.cap.has_2ghz)
if (dev->mphy.cap.has_2ghz)
mt76x0_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt76x02_init_debugfs(dev);

View File

@ -447,11 +447,11 @@ static void mt76x0_phy_ant_select(struct mt76x02_dev *dev)
else
coex3 |= BIT(4);
coex3 |= BIT(3);
if (dev->mt76.cap.has_2ghz)
if (dev->mphy.cap.has_2ghz)
wlan |= BIT(6);
} else {
/* sigle antenna mode */
if (dev->mt76.cap.has_5ghz) {
if (dev->mphy.cap.has_5ghz) {
coex3 |= BIT(3) | BIT(4);
} else {
wlan |= BIT(6);

View File

@ -75,14 +75,14 @@ void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev)
switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
case BOARD_TYPE_5GHZ:
dev->mt76.cap.has_5ghz = true;
dev->mphy.cap.has_5ghz = true;
break;
case BOARD_TYPE_2GHZ:
dev->mt76.cap.has_2ghz = true;
dev->mphy.cap.has_2ghz = true;
break;
default:
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
dev->mphy.cap.has_2ghz = true;
dev->mphy.cap.has_5ghz = true;
break;
}
}

View File

@ -57,14 +57,14 @@ static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev)
val = FIELD_GET(MT_EE_WIFI_CONF_BAND_SEL, val);
switch (val) {
case MT_EE_5GHZ:
dev->mt76.cap.has_5ghz = true;
dev->mphy.cap.has_5ghz = true;
break;
case MT_EE_2GHZ:
dev->mt76.cap.has_2ghz = true;
dev->mphy.cap.has_2ghz = true;
break;
default:
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
dev->mphy.cap.has_2ghz = true;
dev->mphy.cap.has_5ghz = true;
break;
}

View File

@ -528,10 +528,9 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy)
{
struct ieee80211_sband_iftype_data *data;
struct ieee80211_supported_band *band;
struct mt76_dev *mdev = &phy->dev->mt76;
int n;
if (mdev->cap.has_2ghz) {
if (phy->mt76->cap.has_2ghz) {
data = phy->iftype[NL80211_BAND_2GHZ];
n = mt7915_init_he_caps(phy, NL80211_BAND_2GHZ, data);
@ -540,7 +539,7 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy)
band->n_iftype_data = n;
}
if (mdev->cap.has_5ghz) {
if (phy->mt76->cap.has_5ghz) {
data = phy->iftype[NL80211_BAND_5GHZ];
n = mt7915_init_he_caps(phy, NL80211_BAND_5GHZ, data);

View File

@ -192,12 +192,39 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
link->clkpm_disable = blacklist ? 1 : 0;
}
static bool pcie_retrain_link(struct pcie_link_state *link)
static int pcie_wait_for_retrain(struct pci_dev *pdev)
{
struct pci_dev *parent = link->pdev;
unsigned long end_jiffies;
u16 reg16;
/* Wait for Link Training to be cleared by hardware */
end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
do {
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_LT))
return 0;
msleep(1);
} while (time_before(jiffies, end_jiffies));
return -ETIMEDOUT;
}
static int pcie_retrain_link(struct pcie_link_state *link)
{
struct pci_dev *parent = link->pdev;
int rc;
u16 reg16;
/*
* Ensure the updated LNKCTL parameters are used during link
* training by checking that there is no ongoing link training to
* avoid LTSSM race as recommended in Implementation Note at the
* end of PCIe r6.0.1 sec 7.5.3.7.
*/
rc = pcie_wait_for_retrain(parent);
if (rc)
return rc;
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
reg16 |= PCI_EXP_LNKCTL_RL;
pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
@ -211,15 +238,7 @@ static bool pcie_retrain_link(struct pcie_link_state *link)
pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
}
/* Wait for link training end. Break out after waiting for timeout */
end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
do {
pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_LT))
break;
msleep(1);
} while (time_before(jiffies, end_jiffies));
return !(reg16 & PCI_EXP_LNKSTA_LT);
return pcie_wait_for_retrain(parent);
}
/*
@ -288,15 +307,15 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
reg16 &= ~PCI_EXP_LNKCTL_CCC;
pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
if (pcie_retrain_link(link))
return;
if (pcie_retrain_link(link)) {
/* Training failed. Restore common clock configurations */
pci_err(parent, "ASPM: Could not configure common clock\n");
list_for_each_entry(child, &linkbus->devices, bus_list)
pcie_capability_write_word(child, PCI_EXP_LNKCTL,
/* Training failed. Restore common clock configurations */
pci_err(parent, "ASPM: Could not configure common clock\n");
list_for_each_entry(child, &linkbus->devices, bus_list)
pcie_capability_write_word(child, PCI_EXP_LNKCTL,
child_reg[PCI_FUNC(child->devfn)]);
pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
}
}
/* Convert L0s latency encoding to ns */

View File

@ -153,7 +153,7 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
phy_set_drvdata(phy, &priv->ports[i]);
i++;
if (i > INNO_PHY_PORT_NUM) {
if (i >= INNO_PHY_PORT_NUM) {
dev_warn(dev, "Support %d ports in maximum\n", i);
break;
}

View File

@ -68,23 +68,27 @@ static const char * const qcom_snps_hsphy_vreg_names[] = {
/**
* struct qcom_snps_hsphy - snps hs phy attributes
*
* @dev: device structure
*
* @phy: generic phy
* @base: iomapped memory space for snps hs phy
*
* @cfg_ahb_clk: AHB2PHY interface clock
* @ref_clk: phy reference clock
* @iface_clk: phy interface clock
* @num_clks: number of clocks
* @clks: array of clocks
* @phy_reset: phy reset control
* @vregs: regulator supplies bulk data
* @phy_initialized: if PHY has been initialized correctly
* @mode: contains the current mode the PHY is in
* @update_seq_cfg: tuning parameters for phy init
*/
struct qcom_snps_hsphy {
struct device *dev;
struct phy *phy;
void __iomem *base;
struct clk *cfg_ahb_clk;
struct clk *ref_clk;
int num_clks;
struct clk_bulk_data *clks;
struct reset_control *phy_reset;
struct regulator_bulk_data vregs[SNPS_HS_NUM_VREGS];
@ -92,6 +96,34 @@ struct qcom_snps_hsphy {
enum phy_mode mode;
};
static int qcom_snps_hsphy_clk_init(struct qcom_snps_hsphy *hsphy)
{
struct device *dev = hsphy->dev;
hsphy->num_clks = 2;
hsphy->clks = devm_kcalloc(dev, hsphy->num_clks, sizeof(*hsphy->clks), GFP_KERNEL);
if (!hsphy->clks)
return -ENOMEM;
/*
* TODO: Currently no device tree instantiation of the PHY is using the clock.
* This needs to be fixed in order for this code to be able to use devm_clk_bulk_get().
*/
hsphy->clks[0].id = "cfg_ahb";
hsphy->clks[0].clk = devm_clk_get_optional(dev, "cfg_ahb");
if (IS_ERR(hsphy->clks[0].clk))
return dev_err_probe(dev, PTR_ERR(hsphy->clks[0].clk),
"failed to get cfg_ahb clk\n");
hsphy->clks[1].id = "ref";
hsphy->clks[1].clk = devm_clk_get(dev, "ref");
if (IS_ERR(hsphy->clks[1].clk))
return dev_err_probe(dev, PTR_ERR(hsphy->clks[1].clk),
"failed to get ref clk\n");
return 0;
}
static inline void qcom_snps_hsphy_write_mask(void __iomem *base, u32 offset,
u32 mask, u32 val)
{
@ -122,22 +154,13 @@ static int qcom_snps_hsphy_suspend(struct qcom_snps_hsphy *hsphy)
0, USB2_AUTO_RESUME);
}
clk_disable_unprepare(hsphy->cfg_ahb_clk);
return 0;
}
static int qcom_snps_hsphy_resume(struct qcom_snps_hsphy *hsphy)
{
int ret;
dev_dbg(&hsphy->phy->dev, "Resume QCOM SNPS PHY, mode\n");
ret = clk_prepare_enable(hsphy->cfg_ahb_clk);
if (ret) {
dev_err(&hsphy->phy->dev, "failed to enable cfg ahb clock\n");
return ret;
}
return 0;
}
@ -183,16 +206,16 @@ static int qcom_snps_hsphy_init(struct phy *phy)
if (ret)
return ret;
ret = clk_prepare_enable(hsphy->cfg_ahb_clk);
ret = clk_bulk_prepare_enable(hsphy->num_clks, hsphy->clks);
if (ret) {
dev_err(&phy->dev, "failed to enable cfg ahb clock, %d\n", ret);
dev_err(&phy->dev, "failed to enable clocks, %d\n", ret);
goto poweroff_phy;
}
ret = reset_control_assert(hsphy->phy_reset);
if (ret) {
dev_err(&phy->dev, "failed to assert phy_reset, %d\n", ret);
goto disable_ahb_clk;
goto disable_clks;
}
usleep_range(100, 150);
@ -200,7 +223,7 @@ static int qcom_snps_hsphy_init(struct phy *phy)
ret = reset_control_deassert(hsphy->phy_reset);
if (ret) {
dev_err(&phy->dev, "failed to de-assert phy_reset, %d\n", ret);
goto disable_ahb_clk;
goto disable_clks;
}
qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_CFG0,
@ -246,8 +269,8 @@ static int qcom_snps_hsphy_init(struct phy *phy)
return 0;
disable_ahb_clk:
clk_disable_unprepare(hsphy->cfg_ahb_clk);
disable_clks:
clk_bulk_disable_unprepare(hsphy->num_clks, hsphy->clks);
poweroff_phy:
regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs);
@ -259,7 +282,7 @@ static int qcom_snps_hsphy_exit(struct phy *phy)
struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy);
reset_control_assert(hsphy->phy_reset);
clk_disable_unprepare(hsphy->cfg_ahb_clk);
clk_bulk_disable_unprepare(hsphy->num_clks, hsphy->clks);
regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs);
hsphy->phy_initialized = false;
@ -299,17 +322,15 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev)
if (!hsphy)
return -ENOMEM;
hsphy->dev = dev;
hsphy->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsphy->base))
return PTR_ERR(hsphy->base);
hsphy->ref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(hsphy->ref_clk)) {
ret = PTR_ERR(hsphy->ref_clk);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get ref clk, %d\n", ret);
return ret;
}
ret = qcom_snps_hsphy_clk_init(hsphy);
if (ret)
return dev_err_probe(dev, ret, "failed to initialize clocks\n");
hsphy->phy_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(hsphy->phy_reset)) {
@ -322,12 +343,9 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev)
hsphy->vregs[i].supply = qcom_snps_hsphy_vreg_names[i];
ret = devm_regulator_bulk_get(dev, num, hsphy->vregs);
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get regulator supplies: %d\n",
ret);
return ret;
}
if (ret)
return dev_err_probe(dev, ret,
"failed to get regulator supplies\n");
pm_runtime_set_active(dev);
pm_runtime_enable(dev);

View File

@ -210,7 +210,7 @@ static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
return -EINVAL;
if (quirks->ec_read_only)
return -EOPNOTSUPP;
return 0;
/* read current device state */
result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
@ -841,15 +841,15 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
static void msi_init_rfkill(struct work_struct *ignored)
{
if (rfk_wlan) {
rfkill_set_sw_state(rfk_wlan, !wlan_s);
msi_rfkill_set_state(rfk_wlan, !wlan_s);
rfkill_wlan_set(NULL, !wlan_s);
}
if (rfk_bluetooth) {
rfkill_set_sw_state(rfk_bluetooth, !bluetooth_s);
msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s);
rfkill_bluetooth_set(NULL, !bluetooth_s);
}
if (rfk_threeg) {
rfkill_set_sw_state(rfk_threeg, !threeg_s);
msi_rfkill_set_state(rfk_threeg, !threeg_s);
rfkill_threeg_set(NULL, !threeg_s);
}
}

View File

@ -147,12 +147,13 @@ static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
return err;
}
return pwm_set_chip_data(pwm, channel);
return 0;
}
static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
if (channel)
clk_disable_unprepare(channel->clk);
@ -161,9 +162,10 @@ static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
unsigned int duty, period, pre_div, cnt, duty_cnt;
struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
unsigned int pre_div, cnt, duty_cnt;
unsigned long fin_freq;
u64 duty, period;
duty = state->duty_cycle;
period = state->period;
@ -185,19 +187,19 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq);
pre_div = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * 0xffffLL);
pre_div = div64_u64(fin_freq * period, NSEC_PER_SEC * 0xffffLL);
if (pre_div > MISC_CLK_DIV_MASK) {
dev_err(meson->chip.dev, "unable to get period pre_div\n");
return -EINVAL;
}
cnt = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * (pre_div + 1));
cnt = div64_u64(fin_freq * period, NSEC_PER_SEC * (pre_div + 1));
if (cnt > 0xffff) {
dev_err(meson->chip.dev, "unable to get period cnt\n");
return -EINVAL;
}
dev_dbg(meson->chip.dev, "period=%u pre_div=%u cnt=%u\n", period,
dev_dbg(meson->chip.dev, "period=%llu pre_div=%u cnt=%u\n", period,
pre_div, cnt);
if (duty == period) {
@ -210,14 +212,13 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
channel->lo = cnt;
} else {
/* Then check is we can have the duty with the same pre_div */
duty_cnt = div64_u64(fin_freq * (u64)duty,
NSEC_PER_SEC * (pre_div + 1));
duty_cnt = div64_u64(fin_freq * duty, NSEC_PER_SEC * (pre_div + 1));
if (duty_cnt > 0xffff) {
dev_err(meson->chip.dev, "unable to get duty cycle\n");
return -EINVAL;
}
dev_dbg(meson->chip.dev, "duty=%u pre_div=%u duty_cnt=%u\n",
dev_dbg(meson->chip.dev, "duty=%llu pre_div=%u duty_cnt=%u\n",
duty, pre_div, duty_cnt);
channel->pre_div = pre_div;
@ -230,7 +231,7 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
static void meson_pwm_enable(struct meson_pwm *meson, struct pwm_device *pwm)
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
struct meson_pwm_channel_data *channel_data;
unsigned long flags;
u32 value;
@ -273,8 +274,8 @@ static void meson_pwm_disable(struct meson_pwm *meson, struct pwm_device *pwm)
static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
int err = 0;
if (!state)

View File

@ -133,6 +133,7 @@ static int dasd_ioctl_resume(struct dasd_block *block)
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
dasd_schedule_block_bh(block);
dasd_schedule_device_bh(base);
return 0;
}

View File

@ -719,7 +719,6 @@ struct qeth_card_info {
u16 chid;
u8 ids_valid:1; /* cssid,iid,chid */
u8 dev_addr_is_registered:1;
u8 open_when_online:1;
u8 promisc_mode:1;
u8 use_v1_blkt:1;
u8 is_vm_nic:1;

View File

@ -5351,8 +5351,6 @@ int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
qeth_clear_ipacmd_list(card);
rtnl_lock();
card->info.open_when_online = card->dev->flags & IFF_UP;
dev_close(card->dev);
netif_device_detach(card->dev);
netif_carrier_off(card->dev);
rtnl_unlock();

View File

@ -2270,9 +2270,12 @@ static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
qeth_enable_hw_features(dev);
qeth_l2_enable_brport_features(card);
if (card->info.open_when_online) {
card->info.open_when_online = 0;
dev_open(dev, NULL);
if (netif_running(dev)) {
local_bh_disable();
napi_schedule(&card->napi);
/* kick-start the NAPI softirq: */
local_bh_enable();
qeth_l2_set_rx_mode(dev);
}
rtnl_unlock();
}

Some files were not shown because too many files have changed in this diff Show More