Merge 5.10.13 into android12-5.10

Changes in 5.10.13
	iwlwifi: provide gso_type to GSO packets
	nbd: freeze the queue while we're adding connections
	tty: avoid using vfs_iocb_iter_write() for redirected console writes
	ACPI: sysfs: Prefer "compatible" modalias
	ACPI: thermal: Do not call acpi_thermal_check() directly
	kernel: kexec: remove the lock operation of system_transition_mutex
	ALSA: hda/realtek: Enable headset of ASUS B1400CEPE with ALC256
	ALSA: hda/via: Apply the workaround generically for Clevo machines
	parisc: Enable -mlong-calls gcc option by default when !CONFIG_MODULES
	media: cec: add stm32 driver
	media: cedrus: Fix H264 decoding
	media: hantro: Fix reset_raw_fmt initialization
	media: rc: fix timeout handling after switch to microsecond durations
	media: rc: ite-cir: fix min_timeout calculation
	media: rc: ensure that uevent can be read directly after rc device register
	ARM: dts: tbs2910: rename MMC node aliases
	ARM: dts: ux500: Reserve memory carveouts
	ARM: dts: imx6qdl-gw52xx: fix duplicate regulator naming
	wext: fix NULL-ptr-dereference with cfg80211's lack of commit()
	x86/xen: avoid warning in Xen pv guest with CONFIG_AMD_MEM_ENCRYPT enabled
	ASoC: AMD Renoir - refine DMI entries for some Lenovo products
	Revert "drm/amdgpu/swsmu: drop set_fan_speed_percent (v2)"
	drm/nouveau/kms/gk104-gp1xx: Fix > 64x64 cursors
	drm/i915: Always flush the active worker before returning from the wait
	drm/i915/gt: Always try to reserve GGTT address 0x0
	drivers/nouveau/kms/nv50-: Reject format modifiers for cursor planes
	bcache: only check feature sets when sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES
	net: usb: qmi_wwan: added support for Thales Cinterion PLSx3 modem family
	s390: uv: Fix sysfs max number of VCPUs reporting
	s390/vfio-ap: No need to disable IRQ after queue reset
	PM: hibernate: flush swap writer after marking
	x86/entry: Emit a symbol for register restoring thunk
	efi/apple-properties: Reinstate support for boolean properties
	crypto: marvel/cesa - Fix tdma descriptor on 64-bit
	drivers: soc: atmel: Avoid calling at91_soc_init on non AT91 SoCs
	drivers: soc: atmel: add null entry at the end of at91_soc_allowed_list[]
	btrfs: fix lockdep warning due to seqcount_mutex on 32bit arch
	btrfs: fix possible free space tree corruption with online conversion
	KVM: x86/pmu: Fix HW_REF_CPU_CYCLES event pseudo-encoding in intel_arch_events[]
	KVM: x86/pmu: Fix UBSAN shift-out-of-bounds warning in intel_pmu_refresh()
	KVM: arm64: Filter out v8.1+ events on v8.0 HW
	KVM: nSVM: cancel KVM_REQ_GET_NESTED_STATE_PAGES on nested vmexit
	KVM: x86: allow KVM_REQ_GET_NESTED_STATE_PAGES outside guest mode for VMX
	KVM: nVMX: Sync unsync'd vmcs02 state to vmcs12 on migration
	KVM: x86: get smi pending status correctly
	KVM: Forbid the use of tagged userspace addresses for memslots
	io_uring: fix wqe->lock/completion_lock deadlock
	xen: Fix XenStore initialisation for XS_LOCAL
	leds: trigger: fix potential deadlock with libata
	arm64: dts: broadcom: Fix USB DMA address translation for Stingray
	mt7601u: fix kernel crash unplugging the device
	mt76: mt7663s: fix rx buffer refcounting
	mt7601u: fix rx buffer refcounting
	iwlwifi: Fix IWL_SUBDEVICE_NO_160 macro to use the correct bit.
	drm/i915/gt: Clear CACHE_MODE prior to clearing residuals
	drm/i915/pmu: Don't grab wakeref when enabling events
	net/mlx5e: Fix IPSEC stats
	ARM: dts: imx6qdl-kontron-samx6i: fix pwms for lcd-backlight
	drm/nouveau/svm: fail NOUVEAU_SVM_INIT ioctl on unsupported devices
	drm/vc4: Correct lbm size and calculation
	drm/vc4: Correct POS1_SCL for hvs5
	drm/nouveau/dispnv50: Restore pushing of all data.
	drm/i915: Check for all subplatform bits
	drm/i915/selftest: Fix potential memory leak
	uapi: fix big endian definition of ipv6_rpl_sr_hdr
	KVM: Documentation: Fix spec for KVM_CAP_ENABLE_CAP_VM
	tee: optee: replace might_sleep with cond_resched
	xen-blkfront: allow discard-* nodes to be optional
	blk-mq: test QUEUE_FLAG_HCTX_ACTIVE for sbitmap_shared in hctx_may_queue
	clk: imx: fix Kconfig warning for i.MX SCU clk
	clk: mmp2: fix build without CONFIG_PM
	clk: qcom: gcc-sm250: Use floor ops for sdcc clks
	ARM: imx: build suspend-imx6.S with arm instruction set
	ARM: zImage: atags_to_fdt: Fix node names on added root nodes
	netfilter: nft_dynset: add timeout extension to template
	Revert "RDMA/mlx5: Fix devlink deadlock on net namespace deletion"
	Revert "block: simplify set_init_blocksize" to regain lost performance
	xfrm: Fix oops in xfrm_replay_advance_bmp
	xfrm: fix disable_xfrm sysctl when used on xfrm interfaces
	selftests: xfrm: fix test return value override issue in xfrm_policy.sh
	xfrm: Fix wraparound in xfrm_policy_addr_delta()
	arm64: dts: ls1028a: fix the offset of the reset register
	ARM: imx: fix imx8m dependencies
	ARM: dts: imx6qdl-kontron-samx6i: fix i2c_lcd/cam default status
	ARM: dts: imx6qdl-sr-som: fix some cubox-i platforms
	arm64: dts: imx8mp: Correct the gpio ranges of gpio3
	firmware: imx: select SOC_BUS to fix firmware build
	RDMA/cxgb4: Fix the reported max_recv_sge value
	ASoC: dt-bindings: lpass: Fix and common up lpass dai ids
	ASoC: qcom: Fix incorrect volatile registers
	ASoC: qcom: Fix broken support to MI2S TERTIARY and QUATERNARY
	ASoC: qcom: lpass-ipq806x: fix bitwidth regmap field
	spi: altera: Fix memory leak on error path
	ASoC: Intel: Skylake: skl-topology: Fix OOPs ib skl_tplg_complete
	powerpc/64s: prevent recursive replay_soft_interrupts causing superfluous interrupt
	pNFS/NFSv4: Fix a layout segment leak in pnfs_layout_process()
	pNFS/NFSv4: Update the layout barrier when we schedule a layoutreturn
	ASoC: SOF: Intel: soundwire: fix select/depend unmet dependencies
	ASoC: qcom: lpass: Fix out-of-bounds DAI ID lookup
	iwlwifi: pcie: avoid potential PNVM leaks
	iwlwifi: pnvm: don't skip everything when not reloading
	iwlwifi: pnvm: don't try to load after failures
	iwlwifi: pcie: set LTR on more devices
	iwlwifi: pcie: use jiffies for memory read spin time limit
	iwlwifi: pcie: reschedule in long-running memory reads
	mac80211: pause TX while changing interface type
	ice: fix FDir IPv6 flexbyte
	ice: Implement flow for IPv6 next header (extension header)
	ice: update dev_addr in ice_set_mac_address even if HW filter exists
	ice: Don't allow more channels than LAN MSI-X available
	ice: Fix MSI-X vector fallback logic
	i40e: acquire VSI pointer only after VF is initialized
	igc: fix link speed advertising
	net/mlx5: Fix memory leak on flow table creation error flow
	net/mlx5e: E-switch, Fix rate calculation for overflow
	net/mlx5e: free page before return
	net/mlx5e: Reduce tc unsupported key print level
	net/mlx5: Maintain separate page trees for ECPF and PF functions
	net/mlx5e: Disable hw-tc-offload when MLX5_CLS_ACT config is disabled
	net/mlx5e: Fix CT rule + encap slow path offload and deletion
	net/mlx5e: Correctly handle changing the number of queues when the interface is down
	net/mlx5e: Revert parameters on errors when changing trust state without reset
	net/mlx5e: Revert parameters on errors when changing MTU and LRO state without reset
	net/mlx5: CT: Fix incorrect removal of tuple_nat_node from nat rhashtable
	can: dev: prevent potential information leak in can_fill_info()
	ACPI/IORT: Do not blindly trust DMA masks from firmware
	of/device: Update dma_range_map only when dev has valid dma-ranges
	iommu/amd: Use IVHD EFR for early initialization of IOMMU features
	iommu/vt-d: Correctly check addr alignment in qi_flush_dev_iotlb_pasid()
	nvme-multipath: Early exit if no path is available
	selftests: forwarding: Specify interface when invoking mausezahn
	rxrpc: Fix memory leak in rxrpc_lookup_local
	NFC: fix resource leak when target index is invalid
	NFC: fix possible resource leak
	ASoC: mediatek: mt8183-da7219: ignore TDM DAI link by default
	ASoC: mediatek: mt8183-mt6358: ignore TDM DAI link by default
	ASoC: topology: Properly unregister DAI on removal
	ASoC: topology: Fix memory corruption in soc_tplg_denum_create_values()
	scsi: qla2xxx: Fix description for parameter ql2xenforce_iocb_limit
	team: protect features update by RCU to avoid deadlock
	tcp: make TCP_USER_TIMEOUT accurate for zero window probes
	tcp: fix TLP timer not set when CA_STATE changes from DISORDER to OPEN
	vsock: fix the race conditions in multi-transport support
	Linux 5.10.13

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I75f419b25f24da559e446d62f75ce6bb9b0a5396
This commit is contained in:
Greg Kroah-Hartman 2021-02-04 00:35:49 +01:00
commit cf5b2483a5
168 changed files with 1321 additions and 562 deletions

View File

@ -100,6 +100,11 @@ Instruction Macros
~~~~~~~~~~~~~~~~~~
This section covers ``SYM_FUNC_*`` and ``SYM_CODE_*`` enumerated above.
``objtool`` requires that all code must be contained in an ELF symbol. Symbol
names that have a ``.L`` prefix do not emit symbol table entries. ``.L``
prefixed symbols can be used within a code region, but should be avoided for
denoting a range of code via ``SYM_*_START/END`` annotations.
* ``SYM_FUNC_START`` and ``SYM_FUNC_START_LOCAL`` are supposed to be **the
most frequent markings**. They are used for functions with standard calling
conventions -- global and local. Like in C, they both align the functions to

View File

@ -1324,7 +1324,7 @@ documentation when it pops into existence).
:Capability: KVM_CAP_ENABLE_CAP_VM
:Architectures: all
:Type: vcpu ioctl
:Type: vm ioctl
:Parameters: struct kvm_enable_cap (in)
:Returns: 0 on success; -1 on error

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 12
SUBLEVEL = 13
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -15,7 +15,8 @@ static int node_offset(void *fdt, const char *node_path)
{
int offset = fdt_path_offset(fdt, node_path);
if (offset == -FDT_ERR_NOTFOUND)
offset = fdt_add_subnode(fdt, 0, node_path);
/* Add the node to root if not found, dropping the leading '/' */
offset = fdt_add_subnode(fdt, 0, node_path + 1);
return offset;
}

View File

@ -16,6 +16,13 @@ chosen {
stdout-path = &uart1;
};
aliases {
mmc0 = &usdhc2;
mmc1 = &usdhc3;
mmc2 = &usdhc4;
/delete-property/ mmc3;
};
memory@10000000 {
device_type = "memory";
reg = <0x10000000 0x80000000>;

View File

@ -418,7 +418,7 @@ reg_2p5v: ldo2 {
/* VDD_AUD_1P8: Audio codec */
reg_aud_1p8v: ldo3 {
regulator-name = "vdd1p8";
regulator-name = "vdd1p8a";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;

View File

@ -137,7 +137,7 @@ lcd_out: endpoint {
lcd_backlight: lcd-backlight {
compatible = "pwm-backlight";
pwms = <&pwm4 0 5000000>;
pwms = <&pwm4 0 5000000 0>;
pwm-names = "LCD_BKLT_PWM";
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
@ -167,7 +167,7 @@ i2c_lcd: i2c-gpio-lcd {
i2c-gpio,delay-us = <2>; /* ~100 kHz */
#address-cells = <1>;
#size-cells = <0>;
status = "disabld";
status = "disabled";
};
i2c_cam: i2c-gpio-cam {
@ -179,7 +179,7 @@ i2c_cam: i2c-gpio-cam {
i2c-gpio,delay-us = <2>; /* ~100 kHz */
#address-cells = <1>;
#size-cells = <0>;
status = "disabld";
status = "disabled";
};
};

View File

@ -53,7 +53,6 @@ vcc_3v3: regulator-vcc-3v3 {
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
phy-handle = <&phy>;
phy-mode = "rgmii-id";
phy-reset-duration = <2>;
phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
@ -63,10 +62,19 @@ mdio {
#address-cells = <1>;
#size-cells = <0>;
phy: ethernet-phy@0 {
/*
* The PHY can appear at either address 0 or 4 due to the
* configuration (LED) pin not being pulled sufficiently.
*/
ethernet-phy@0 {
reg = <0>;
qca,clk-out-frequency = <125000000>;
};
ethernet-phy@4 {
reg = <4>;
qca,clk-out-frequency = <125000000>;
};
};
};

View File

@ -12,4 +12,42 @@ cpu@300 {
200000 0>;
};
};
reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
ranges;
/* Modem trace memory */
ram@06000000 {
reg = <0x06000000 0x00f00000>;
no-map;
};
/* Modem shared memory */
ram@06f00000 {
reg = <0x06f00000 0x00100000>;
no-map;
};
/* Modem private memory */
ram@07000000 {
reg = <0x07000000 0x01000000>;
no-map;
};
/*
* Initial Secure Software ISSW memory
*
* This is probably only used if the kernel tries
* to actually call into trustzone to run secure
* applications, which the mainline kernel probably
* will not do on this old chipset. But you can never
* be too careful, so reserve this memory anyway.
*/
ram@17f00000 {
reg = <0x17f00000 0x00100000>;
no-map;
};
};
};

View File

@ -12,4 +12,42 @@ cpu@300 {
200000 0>;
};
};
reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
ranges;
/* Modem trace memory */
ram@06000000 {
reg = <0x06000000 0x00f00000>;
no-map;
};
/* Modem shared memory */
ram@06f00000 {
reg = <0x06f00000 0x00100000>;
no-map;
};
/* Modem private memory */
ram@07000000 {
reg = <0x07000000 0x01000000>;
no-map;
};
/*
* Initial Secure Software ISSW memory
*
* This is probably only used if the kernel tries
* to actually call into trustzone to run secure
* applications, which the mainline kernel probably
* will not do on this old chipset. But you can never
* be too careful, so reserve this memory anyway.
*/
ram@17f00000 {
reg = <0x17f00000 0x00100000>;
no-map;
};
};
};

View File

@ -0,0 +1,35 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "ste-dbx5x0.dtsi"
/ {
cpus {
cpu@300 {
/* cpufreq controls */
operating-points = <1152000 0
800000 0
400000 0
200000 0>;
};
};
reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
ranges;
/*
* Initial Secure Software ISSW memory
*
* This is probably only used if the kernel tries
* to actually call into trustzone to run secure
* applications, which the mainline kernel probably
* will not do on this old chipset. But you can never
* be too careful, so reserve this memory anyway.
*/
ram@17f00000 {
reg = <0x17f00000 0x00100000>;
no-map;
};
};
};

View File

@ -4,7 +4,7 @@
*/
/dts-v1/;
#include "ste-db8500.dtsi"
#include "ste-db9500.dtsi"
#include "ste-href-ab8500.dtsi"
#include "ste-href-family-pinctrl.dtsi"

View File

@ -67,6 +67,7 @@
#define MX6Q_CCM_CCR 0x0
.align 3
.arm
.macro sync_l2_cache

View File

@ -4,11 +4,16 @@
*/
usb {
compatible = "simple-bus";
dma-ranges;
#address-cells = <2>;
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
/*
* Internally, USB bus to the interconnect can only address up
* to 40-bit
*/
dma-ranges = <0 0 0 0 0x100 0x0>;
usbphy0: usb-phy@0 {
compatible = "brcm,sr-usb-combo-phy";
reg = <0x0 0x00000000 0x0 0x100>;

View File

@ -93,7 +93,7 @@ dpclk: clock-controller@f1f0000 {
reboot {
compatible ="syscon-reboot";
regmap = <&rst>;
offset = <0xb0>;
offset = <0>;
mask = <0x02>;
};

View File

@ -259,7 +259,7 @@ gpio3: gpio@30220000 {
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 0 144 4>;
gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 26 144 4>;
};
gpio4: gpio@30230000 {

View File

@ -202,9 +202,8 @@ config PREFETCH
depends on PA8X00 || PA7200
config MLONGCALLS
bool "Enable the -mlong-calls compiler option for big kernels"
default y if !MODULES || UBSAN || FTRACE
default n
def_bool y if !MODULES || UBSAN || FTRACE
bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
depends on PA8X00
help
If you configure the kernel to include many drivers built-in instead

View File

@ -997,10 +997,17 @@ intr_do_preempt:
bb,<,n %r20, 31 - PSW_SM_I, intr_restore
nop
/* ssm PSW_SM_I done later in intr_restore */
#ifdef CONFIG_MLONGCALLS
ldil L%intr_restore, %r2
load32 preempt_schedule_irq, %r1
bv %r0(%r1)
ldo R%intr_restore(%r2), %r2
#else
ldil L%intr_restore, %r1
BL preempt_schedule_irq, %r2
nop
b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
ldo R%intr_restore(%r1), %r2
#endif
#endif /* CONFIG_PREEMPTION */
/*

View File

@ -180,14 +180,19 @@ void notrace restore_interrupts(void)
void replay_soft_interrupts(void)
{
/*
* We use local_paca rather than get_paca() to avoid all
* the debug_smp_processor_id() business in this low level
* function
*/
unsigned char happened = local_paca->irq_happened;
struct pt_regs regs;
/*
* Be careful here, calling these interrupt handlers can cause
* softirqs to be raised, which they may run when calling irq_exit,
* which will cause local_irq_enable() to be run, which can then
* recurse into this function. Don't keep any state across
* interrupt handler calls which may change underneath us.
*
* We use local_paca rather than get_paca() to avoid all the
* debug_smp_processor_id() business in this low level function.
*/
ppc_save_regs(&regs);
regs.softe = IRQS_ENABLED;
@ -209,7 +214,7 @@ void replay_soft_interrupts(void)
* This is a higher priority interrupt than the others, so
* replay it first.
*/
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_HMI)) {
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
local_paca->irq_happened &= ~PACA_IRQ_HMI;
regs.trap = 0xe60;
handle_hmi_exception(&regs);
@ -217,7 +222,7 @@ void replay_soft_interrupts(void)
hard_irq_disable();
}
if (happened & PACA_IRQ_DEC) {
if (local_paca->irq_happened & PACA_IRQ_DEC) {
local_paca->irq_happened &= ~PACA_IRQ_DEC;
regs.trap = 0x900;
timer_interrupt(&regs);
@ -225,7 +230,7 @@ void replay_soft_interrupts(void)
hard_irq_disable();
}
if (happened & PACA_IRQ_EE) {
if (local_paca->irq_happened & PACA_IRQ_EE) {
local_paca->irq_happened &= ~PACA_IRQ_EE;
regs.trap = 0x500;
do_IRQ(&regs);
@ -233,7 +238,7 @@ void replay_soft_interrupts(void)
hard_irq_disable();
}
if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (happened & PACA_IRQ_DBELL)) {
if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
local_paca->irq_happened &= ~PACA_IRQ_DBELL;
if (IS_ENABLED(CONFIG_PPC_BOOK3E))
regs.trap = 0x280;
@ -245,7 +250,7 @@ void replay_soft_interrupts(void)
}
/* Book3E does not support soft-masking PMI interrupts */
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_PMI)) {
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
local_paca->irq_happened &= ~PACA_IRQ_PMI;
regs.trap = 0xf00;
performance_monitor_exception(&regs);
@ -253,8 +258,7 @@ void replay_soft_interrupts(void)
hard_irq_disable();
}
happened = local_paca->irq_happened;
if (happened & ~PACA_IRQ_HARD_DIS) {
if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
/*
* We are responding to the next interrupt, so interrupt-off
* latencies should be reset here.

View File

@ -35,7 +35,7 @@ void uv_query_info(void)
uv_info.guest_cpu_stor_len = uvcb.cpu_stor_len;
uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
uv_info.max_guest_cpus = uvcb.max_guest_cpus;
uv_info.max_guest_cpu_id = uvcb.max_guest_cpu_id;
}
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST

View File

@ -96,7 +96,7 @@ struct uv_cb_qui {
u32 max_num_sec_conf;
u64 max_guest_stor_addr;
u8 reserved88[158 - 136];
u16 max_guest_cpus;
u16 max_guest_cpu_id;
u8 reserveda0[200 - 160];
} __packed __aligned(8);
@ -273,7 +273,7 @@ struct uv_info {
unsigned long guest_cpu_stor_len;
unsigned long max_sec_stor_addr;
unsigned int max_num_sec_conf;
unsigned short max_guest_cpus;
unsigned short max_guest_cpu_id;
};
extern struct uv_info uv_info;

View File

@ -368,7 +368,7 @@ static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return scnprintf(page, PAGE_SIZE, "%d\n",
uv_info.max_guest_cpus);
uv_info.max_guest_cpu_id + 1);
}
static struct kobj_attribute uv_query_max_guest_cpus_attr =

View File

@ -31,7 +31,7 @@ SYM_FUNC_START_NOALIGN(\name)
.endif
call \func
jmp .L_restore
jmp __thunk_restore
SYM_FUNC_END(\name)
_ASM_NOKPROBE(\name)
.endm
@ -44,7 +44,7 @@ SYM_FUNC_END(\name)
#endif
#ifdef CONFIG_PREEMPTION
SYM_CODE_START_LOCAL_NOALIGN(.L_restore)
SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore)
popq %r11
popq %r10
popq %r9
@ -56,6 +56,6 @@ SYM_CODE_START_LOCAL_NOALIGN(.L_restore)
popq %rdi
popq %rbp
ret
_ASM_NOKPROBE(.L_restore)
SYM_CODE_END(.L_restore)
_ASM_NOKPROBE(__thunk_restore)
SYM_CODE_END(__thunk_restore)
#endif

View File

@ -616,6 +616,7 @@ DECLARE_IDTENTRY_VC(X86_TRAP_VC, exc_vmm_communication);
#ifdef CONFIG_XEN_PV
DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback);
DECLARE_IDTENTRY_RAW(X86_TRAP_OTHER, exc_xen_unknown_trap);
#endif
/* Device interrupts common/spurious */

View File

@ -199,6 +199,10 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (WARN_ON(!is_guest_mode(vcpu)))
return true;
if (!nested_svm_vmrun_msrpm(svm)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
@ -595,6 +599,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->nested.vmcb12_gpa = 0;
WARN_ON_ONCE(svm->nested.nested_run_pending);
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
/* in case we halted in L2 */
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;

View File

@ -3123,13 +3123,9 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
return 0;
}
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_host_map *map;
struct page *page;
u64 hpa;
/*
* hv_evmcs may end up being not mapped after migration (when
@ -3152,6 +3148,17 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
}
}
return true;
}
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_host_map *map;
struct page *page;
u64 hpa;
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
/*
* Translate L1 physical address to host physical
@ -3220,6 +3227,18 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
else
exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
return true;
}
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
if (!nested_get_evmcs_page(vcpu))
return false;
if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
return false;
return true;
}
@ -4416,6 +4435,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
/* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending);
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
/* Service the TLB flush request for L2 before switching to L1. */
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
@ -6049,11 +6070,14 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (is_guest_mode(vcpu)) {
sync_vmcs02_to_vmcs12(vcpu, vmcs12);
sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
} else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
if (vmx->nested.hv_evmcs)
copy_enlightened_to_vmcs12(vmx);
else if (enable_shadow_vmcs)
copy_shadow_to_vmcs12(vmx);
} else {
copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
if (!vmx->nested.need_vmcs12_to_shadow_sync) {
if (vmx->nested.hv_evmcs)
copy_enlightened_to_vmcs12(vmx);
else if (enable_shadow_vmcs)
copy_shadow_to_vmcs12(vmx);
}
}
BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
@ -6573,7 +6597,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
.hv_timer_pending = nested_vmx_preemption_timer_pending,
.get_state = vmx_get_nested_state,
.set_state = vmx_set_nested_state,
.get_nested_state_pages = nested_get_vmcs12_pages,
.get_nested_state_pages = vmx_get_nested_state_pages,
.write_log_dirty = nested_vmx_write_pml_buffer,
.enable_evmcs = nested_enable_evmcs,
.get_evmcs_version = nested_get_evmcs_version,

View File

@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
[7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
};
/* mapping between fixed pmc index and intel_arch_events array */
@ -345,7 +345,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
x86_pmu.num_counters_gp);
eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
pmu->available_event_types = ~entry->ebx &
((1ull << eax.split.mask_length) - 1);
@ -355,6 +357,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_fixed_counters =
min_t(int, edx.split.num_counters_fixed,
x86_pmu.num_counters_fixed);
edx.split.bit_width_fixed = min_t(int,
edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << edx.split.bit_width_fixed) - 1;
}

View File

@ -105,6 +105,7 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu);
static void process_smi(struct kvm_vcpu *vcpu);
static void enter_smm(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu);
@ -4199,6 +4200,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
{
process_nmi(vcpu);
if (kvm_check_request(KVM_REQ_SMI, vcpu))
process_smi(vcpu);
/*
* In guest mode, payload delivery should be deferred,
* so that the L1 hypervisor can intercept #PF before

View File

@ -583,6 +583,13 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
exc_debug(regs);
}
DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
{
/* This should never happen and there is no way to handle it. */
pr_err("Unknown trap in Xen PV mode.");
BUG();
}
struct trap_array_entry {
void (*orig)(void);
void (*xen)(void);
@ -631,6 +638,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
{
unsigned int nr;
bool ist_okay = false;
bool found = false;
/*
* Replace trap handler addresses by Xen specific ones.
@ -645,6 +653,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
if (*addr == entry->orig) {
*addr = entry->xen;
ist_okay = entry->ist_okay;
found = true;
break;
}
}
@ -655,9 +664,13 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
nr = (*addr - (void *)early_idt_handler_array[0]) /
EARLY_IDT_HANDLER_SIZE;
*addr = (void *)xen_early_idt_handler_array[nr];
found = true;
}
if (WARN_ON(ist != 0 && !ist_okay))
if (!found)
*addr = (void *)xen_asm_exc_xen_unknown_trap;
if (WARN_ON(found && ist != 0 && !ist_okay))
return false;
return true;

View File

@ -178,6 +178,7 @@ xen_pv_trap asm_exc_simd_coprocessor_error
#ifdef CONFIG_IA32_EMULATION
xen_pv_trap entry_INT80_compat
#endif
xen_pv_trap asm_exc_xen_unknown_trap
xen_pv_trap asm_exc_xen_hypervisor_callback
__INIT

View File

@ -303,7 +303,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
struct request_queue *q = hctx->queue;
struct blk_mq_tag_set *set = q->tag_set;
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags))
if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return true;
users = atomic_read(&set->active_queues_shared_sbitmap);
} else {

View File

@ -1107,6 +1107,11 @@ static int nc_dma_get_range(struct device *dev, u64 *size)
ncomp = (struct acpi_iort_named_component *)node->node_data;
if (!ncomp->memory_address_limit) {
pr_warn(FW_BUG "Named component missing memory address limit\n");
return -EINVAL;
}
*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1ULL<<ncomp->memory_address_limit;
@ -1126,6 +1131,11 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
rc = (struct acpi_iort_root_complex *)node->node_data;
if (!rc->memory_address_limit) {
pr_warn(FW_BUG "Root complex missing memory address limit\n");
return -EINVAL;
}
*size = rc->memory_address_limit >= 64 ? U64_MAX :
1ULL<<rc->memory_address_limit;
@ -1173,8 +1183,8 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
end = dmaaddr + size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
dev->coherent_dma_mask = mask;
*dev->dma_mask = mask;
dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
*dev->dma_mask = min(*dev->dma_mask, mask);
}
*dma_addr = dmaaddr;

View File

@ -251,20 +251,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
if (len < 0)
return len;
env->buflen += len;
if (!adev->data.of_compatible)
return 0;
if (len > 0 && add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
len = create_of_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
if (adev->data.of_compatible)
len = create_of_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
else
len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
if (len < 0)
return len;

View File

@ -174,6 +174,8 @@ struct acpi_thermal {
struct thermal_zone_device *thermal_zone;
int kelvin_offset; /* in millidegrees */
struct work_struct thermal_check_work;
struct mutex thermal_check_lock;
refcount_t thermal_check_count;
};
/* --------------------------------------------------------------------------
@ -495,14 +497,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
return 0;
}
static void acpi_thermal_check(void *data)
{
struct acpi_thermal *tz = data;
thermal_zone_device_update(tz->thermal_zone,
THERMAL_EVENT_UNSPECIFIED);
}
/* sys I/F for generic thermal sysfs support */
static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
@ -900,6 +894,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
Driver Interface
-------------------------------------------------------------------------- */
static void acpi_queue_thermal_check(struct acpi_thermal *tz)
{
if (!work_pending(&tz->thermal_check_work))
queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
}
static void acpi_thermal_notify(struct acpi_device *device, u32 event)
{
struct acpi_thermal *tz = acpi_driver_data(device);
@ -910,17 +910,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
switch (event) {
case ACPI_THERMAL_NOTIFY_TEMPERATURE:
acpi_thermal_check(tz);
acpi_queue_thermal_check(tz);
break;
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
acpi_thermal_check(tz);
acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
acpi_thermal_check(tz);
acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
@ -1020,7 +1020,25 @@ static void acpi_thermal_check_fn(struct work_struct *work)
{
struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
thermal_check_work);
acpi_thermal_check(tz);
/*
* In general, it is not sufficient to check the pending bit, because
* subsequent instances of this function may be queued after one of them
* has started running (e.g. if _TMP sleeps). Avoid bailing out if just
* one of them is running, though, because it may have done the actual
* check some time ago, so allow at least one of them to block on the
* mutex while another one is running the update.
*/
if (!refcount_dec_not_one(&tz->thermal_check_count))
return;
mutex_lock(&tz->thermal_check_lock);
thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
refcount_inc(&tz->thermal_check_count);
mutex_unlock(&tz->thermal_check_lock);
}
static int acpi_thermal_add(struct acpi_device *device)
@ -1052,6 +1070,8 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
refcount_set(&tz->thermal_check_count, 3);
mutex_init(&tz->thermal_check_lock);
INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
@ -1117,7 +1137,7 @@ static int acpi_thermal_resume(struct device *dev)
tz->state.active |= tz->trips.active[i].flags.enabled;
}
queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
acpi_queue_thermal_check(tz);
return AE_OK;
}

View File

@ -1029,6 +1029,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
if (!sock)
return err;
/*
* We need to make sure we don't get any errant requests while we're
* reallocating the ->socks array.
*/
blk_mq_freeze_queue(nbd->disk->queue);
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
nbd->task_setup = current;
@ -1067,10 +1073,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
nsock->cookie = 0;
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
blk_mq_unfreeze_queue(nbd->disk->queue);
return 0;
put_socket:
blk_mq_unfreeze_queue(nbd->disk->queue);
sockfd_put(sock);
return err;
}

View File

@ -945,7 +945,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
if (info->feature_discard) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd));
rq->limits.discard_granularity = info->discard_granularity;
rq->limits.discard_granularity = info->discard_granularity ?:
info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
@ -2179,19 +2180,12 @@ static void blkfront_closing(struct blkfront_info *info)
static void blkfront_setup_discard(struct blkfront_info *info)
{
int err;
unsigned int discard_granularity;
unsigned int discard_alignment;
info->feature_discard = 1;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"discard-granularity", "%u", &discard_granularity,
"discard-alignment", "%u", &discard_alignment,
NULL);
if (!err) {
info->discard_granularity = discard_granularity;
info->discard_alignment = discard_alignment;
}
info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
"discard-granularity",
0);
info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
"discard-alignment", 0);
info->feature_secdiscard =
!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
0);

View File

@ -6,8 +6,6 @@ config MXC_CLK
config MXC_CLK_SCU
tristate
depends on ARCH_MXC
depends on IMX_SCU && HAVE_ARM_SMCCC
config CLK_IMX1
def_bool SOC_IMX1

View File

@ -392,7 +392,8 @@ static int mmp2_audio_clk_remove(struct platform_device *pdev)
return 0;
}
static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
#ifdef CONFIG_PM
static int mmp2_audio_clk_suspend(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
@ -404,7 +405,7 @@ static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
return 0;
}
static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
static int mmp2_audio_clk_resume(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
@ -415,6 +416,7 @@ static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
return 0;
}
#endif
static const struct dev_pm_ops mmp2_audio_clk_pm_ops = {
SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL)

View File

@ -722,7 +722,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_4,
.num_parents = 5,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_floor_ops,
},
};
@ -745,7 +745,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
.name = "gcc_sdcc4_apps_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = 3,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_floor_ops,
},
};

View File

@ -300,11 +300,11 @@ struct mv_cesa_tdma_desc {
__le32 byte_cnt;
union {
__le32 src;
dma_addr_t src_dma;
u32 src_dma;
};
union {
__le32 dst;
dma_addr_t dst_dma;
u32 dst_dma;
};
__le32 next_dma;

View File

@ -3,8 +3,9 @@
* apple-properties.c - EFI device properties on Macs
* Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
*
* Note, all properties are considered as u8 arrays.
* To get a value of any of them the caller must use device_property_read_u8_array().
* Properties are stored either as:
* u8 arrays which can be retrieved with device_property_read_u8_array() or
* booleans which can be queried with device_property_present().
*/
#define pr_fmt(fmt) "apple-properties: " fmt
@ -88,8 +89,12 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
entry_data = ptr + key_len + sizeof(val_len);
entry_len = val_len - sizeof(val_len);
entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
entry_len);
if (entry_len)
entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
entry_len);
else
entry[i] = PROPERTY_ENTRY_BOOL(key);
if (dump_properties) {
dev_info(dev, "property: %s\n", key);
print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,

View File

@ -13,6 +13,7 @@ config IMX_DSP
config IMX_SCU
bool "IMX SCU Protocol driver"
depends on IMX_MBOX
select SOC_BUS
help
The System Controller Firmware (SCFW) is a low-level system function
which runs on a dedicated Cortex-M core to provide power, clock, and

View File

@ -575,6 +575,7 @@ struct pptable_funcs {
int (*conv_power_profile_to_pplib_workload)(int power_profile);
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
int (*gfx_off_control)(struct smu_context *smu, bool enable);

View File

@ -200,6 +200,9 @@ int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode);
int
smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed);

View File

@ -2255,19 +2255,14 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
int ret = 0;
uint32_t rpm;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_fan_speed_rpm) {
if (speed > 100)
speed = 100;
rpm = speed * smu->fan_max_rpm / 100;
ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm);
}
if (smu->ppt_funcs->set_fan_speed_percent)
ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
mutex_unlock(&smu->mutex);

View File

@ -2366,6 +2366,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,

View File

@ -2710,6 +2710,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,

View File

@ -2776,6 +2776,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,

View File

@ -1122,6 +1122,35 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
return 0;
}
int
smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
uint32_t duty100, duty;
uint64_t tmp64;
if (speed > 100)
speed = 100;
if (smu_v11_0_auto_fan_control(smu, 0))
return -EINVAL;
duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
CG_FDO_CTRL1, FMAX_DUTY100);
if (!duty100)
return -EINVAL;
tmp64 = (uint64_t)speed * duty100;
do_div(tmp64, 100);
duty = (uint32_t)tmp64;
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
}
int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
@ -1130,7 +1159,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
switch (mode) {
case AMD_FAN_CTRL_NONE:
ret = smu_v11_0_set_fan_speed_rpm(smu, smu->fan_max_rpm);
ret = smu_v11_0_set_fan_speed_percent(smu, 100);
break;
case AMD_FAN_CTRL_MANUAL:
ret = smu_v11_0_auto_fan_control(smu, 0);

View File

@ -390,6 +390,16 @@ static void emit_batch(struct i915_vma * const vma,
&cb_kernel_ivb,
desc_count);
/* Reset inherited context registers */
gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
batch_add(&cmds, 0xffff0000);
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
gen7_emit_pipeline_flush(&cmds);
/* Switch to the media pipeline and our base address */
gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
batch_add(&cmds, MI_NOOP);
@ -399,9 +409,11 @@ static void emit_batch(struct i915_vma * const vma,
gen7_emit_state_base_address(&cmds, descriptors);
gen7_emit_pipeline_invalidate(&cmds);
/* Set the clear-residual kernel state */
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
/* Execute the kernel on all HW threads */
for (i = 0; i < num_primitives(bv); i++)
gen7_emit_media_object(&cmds, i);

View File

@ -526,16 +526,39 @@ static int init_ggtt(struct i915_ggtt *ggtt)
mutex_init(&ggtt->error_mutex);
if (ggtt->mappable_end) {
/* Reserve a mappable slot for our lockless error capture */
ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
&ggtt->error_capture,
PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
if (ret)
return ret;
/*
* Reserve a mappable slot for our lockless error capture.
*
* We strongly prefer taking address 0x0 in order to protect
* other critical buffers against accidental overwrites,
* as writing to address 0 is a very common mistake.
*
* Since 0 may already be in use by the system (e.g. the BIOS
* framebuffer), we let the reservation fail quietly and hope
* 0 remains reserved always.
*
* If we fail to reserve 0, and then fail to find any space
* for an error-capture, remain silent. We can afford not
* to reserve an error_capture node as we have fallback
* paths, and we trust that 0 will remain reserved. However,
* the only likely reason for failure to insert is a driver
* bug, which we expect to cause other failures...
*/
ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
drm_mm_insert_node_in_range(&ggtt->vm.mm,
&ggtt->error_capture,
ggtt->error_capture.size, 0,
ggtt->error_capture.color,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
}
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_dbg(&ggtt->vm.i915->drm,
"Reserved GGTT:[%llx, %llx] for use by error capture\n",
ggtt->error_capture.start,
ggtt->error_capture.start + ggtt->error_capture.size);
/*
* The upper portion of the GuC address space has a sizeable hole
@ -548,9 +571,9 @@ static int init_ggtt(struct i915_ggtt *ggtt)
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
drm_dbg_kms(&ggtt->vm.i915->drm,
"clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
drm_dbg(&ggtt->vm.i915->drm,
"clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
ggtt->vm.clear_range(&ggtt->vm, hole_start,
hole_end - hole_start);
}

View File

@ -631,24 +631,26 @@ static int flush_lazy_signals(struct i915_active *ref)
int __i915_active_wait(struct i915_active *ref, int state)
{
int err;
might_sleep();
if (!i915_active_acquire_if_busy(ref))
return 0;
/* Any fence added after the wait begins will not be auto-signaled */
err = flush_lazy_signals(ref);
i915_active_release(ref);
if (err)
return err;
if (i915_active_acquire_if_busy(ref)) {
int err;
if (!i915_active_is_idle(ref) &&
___wait_var_event(ref, i915_active_is_idle(ref),
state, 0, 0, schedule()))
return -EINTR;
err = flush_lazy_signals(ref);
i915_active_release(ref);
if (err)
return err;
if (___wait_var_event(ref, i915_active_is_idle(ref),
state, 0, 0, schedule()))
return -EINTR;
}
/*
* After the wait is complete, the caller may free the active.
* We have to flush any concurrent retirement before returning.
*/
flush_work(&ref->work);
return 0;
}

View File

@ -1347,7 +1347,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
{
const unsigned int pi = __platform_mask_index(info, p);
return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
}
static __always_inline bool

View File

@ -184,13 +184,24 @@ static u64 get_rc6(struct intel_gt *gt)
return val;
}
static void init_rc6(struct i915_pmu *pmu)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
intel_wakeref_t wakeref;
with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) {
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
pmu->sample[__I915_SAMPLE_RC6].cur;
pmu->sleep_last = ktime_get();
}
}
static void park_rc6(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
pmu->sleep_last = ktime_get();
}
@ -201,6 +212,7 @@ static u64 get_rc6(struct intel_gt *gt)
return __get_rc6(gt);
}
static void init_rc6(struct i915_pmu *pmu) { }
static void park_rc6(struct drm_i915_private *i915) {}
#endif
@ -613,10 +625,8 @@ static void i915_pmu_enable(struct perf_event *event)
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
intel_wakeref_t wakeref;
unsigned long flags;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
spin_lock_irqsave(&pmu->lock, flags);
/*
@ -627,13 +637,6 @@ static void i915_pmu_enable(struct perf_event *event)
GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(pmu->enable_count[bit] == ~0);
if (pmu->enable_count[bit] == 0 &&
config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
pmu->sleep_last = ktime_get();
}
pmu->enable |= BIT_ULL(bit);
pmu->enable_count[bit]++;
@ -674,8 +677,6 @@ static void i915_pmu_enable(struct perf_event *event)
* an existing non-zero value.
*/
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_pmu_disable(struct perf_event *event)
@ -1101,6 +1102,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
pmu->cpuhp.slot = CPUHP_INVALID;
init_rc6(pmu);
if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,

View File

@ -1880,7 +1880,7 @@ static int igt_cs_tlb(void *arg)
vma = i915_vma_instance(out, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put_batch;
goto out_put_out;
}
err = i915_vma_pin(vma, 0, 0,

View File

@ -88,7 +88,11 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
NVVAL(NV507C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV507C, SET_PROCESSING,
NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
SET_CONVERSION,
NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
NVVAL(NV507C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8);

View File

@ -49,7 +49,11 @@ base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
NVVAL(NV827C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV827C, SET_PROCESSING,
NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
SET_CONVERSION,
NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
NVVAL(NV827C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,

View File

@ -22,6 +22,7 @@
#include "head.h"
#include "core.h"
#include "nvif/push.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl917d.h>
@ -73,6 +74,31 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
return 0;
}
static int
head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
ret = PUSH_WAIT(push, 5);
if (ret)
return ret;
PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i),
NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
return 0;
}
int
head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
@ -101,7 +127,7 @@ head917d = {
.core_clr = head907d_core_clr,
.curs_layout = head917d_curs_layout,
.curs_format = head507d_curs_format,
.curs_set = head907d_curs_set,
.curs_set = head917d_curs_set,
.curs_clr = head907d_curs_clr,
.base = head917d_base,
.ovly = head907d_ovly,

View File

@ -702,6 +702,11 @@ nv50_wndw_init(struct nv50_wndw *wndw)
nvif_notify_get(&wndw->notify);
}
static const u64 nv50_cursor_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
int
nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
enum drm_plane_type type, const char *name, int index,
@ -713,6 +718,7 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
struct nvif_mmu *mmu = &drm->client.mmu;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_wndw *wndw;
const u64 *format_modifiers;
int nformat;
int ret;
@ -728,10 +734,13 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
for (nformat = 0; format[nformat]; nformat++);
ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
format, nformat,
nouveau_display(dev)->format_modifiers,
type, "%s-%d", name, index);
if (type == DRM_PLANE_TYPE_CURSOR)
format_modifiers = nv50_cursor_format_modifiers;
else
format_modifiers = nouveau_display(dev)->format_modifiers;
ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat,
format_modifiers, type, "%s-%d", name, index);
if (ret) {
kfree(*pwndw);
*pwndw = NULL;

View File

@ -66,6 +66,10 @@
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002)
#define NV917D_HEAD_SET_OFFSET_CURSOR(a) (0x00000484 + (a)*0x00000300)
#define NV917D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0
#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR(a) (0x0000048C + (a)*0x00000300)
#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0
#define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300)
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)

View File

@ -315,6 +315,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
struct drm_nouveau_svm_init *args = data;
int ret;
/* We need to fail if svm is disabled */
if (!cli->drm->svm)
return -ENOSYS;
/* Allocate tracking for SVM-enabled VMM. */
if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
return -ENOMEM;

View File

@ -618,11 +618,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
* for now we just allocate globally.
*/
if (!hvs->hvs5)
/* 96kB */
drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
/* 48k words of 2x12-bit pixels */
drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
else
/* 70k words */
drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024);
/* 60k words of 4x12-bit pixels */
drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
/* Upload filter kernels. We only have the one for now, so we
* keep it around for the lifetime of the driver.

View File

@ -437,6 +437,7 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
static u32 vc4_lbm_size(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
u32 pix_per_line;
u32 lbm;
@ -472,7 +473,11 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
lbm = pix_per_line * 16;
}
lbm = roundup(lbm, 32);
/* Align it to 64 or 128 (hvs5) bytes */
lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64);
/* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
lbm /= vc4->hvs->hvs5 ? 4 : 2;
return lbm;
}
@ -912,9 +917,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
if (!vc4_state->is_unity) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(vc4_state->crtc_w,
SCALER_POS1_SCL_WIDTH) |
SCALER5_POS1_SCL_WIDTH) |
VC4_SET_FIELD(vc4_state->crtc_h,
SCALER_POS1_SCL_HEIGHT));
SCALER5_POS1_SCL_HEIGHT));
}
/* Position Word 2: Source Image Size */

View File

@ -2471,7 +2471,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;

View File

@ -3305,8 +3305,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
int err;
dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
&dev->port[port_num].roce.nb);
err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
if (err) {
dev->port[port_num].roce.nb.notifier_call = NULL;
return err;
@ -3318,8 +3317,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
if (dev->port[port_num].roce.nb.notifier_call) {
unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
&dev->port[port_num].roce.nb);
unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
dev->port[port_num].roce.nb.notifier_call = NULL;
}
}

View File

@ -84,12 +84,9 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
}
static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
{
if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
return false;
return !!(iommu->features & f);
return !!(iommu->features & mask);
}
static inline u64 iommu_virt_to_phys(void *vaddr)

View File

@ -379,6 +379,10 @@
#define IOMMU_CAP_NPCACHE 26
#define IOMMU_CAP_EFR 27
/* IOMMU IVINFO */
#define IOMMU_IVINFO_OFFSET 36
#define IOMMU_IVINFO_EFRSUP BIT(0)
/* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_GASUP_SHIFT 6

View File

@ -257,6 +257,8 @@ static void init_device_table_dma(void);
static bool amd_iommu_pre_enabled = true;
static u32 amd_iommu_ivinfo __initdata;
bool translation_pre_enabled(struct amd_iommu *iommu)
{
return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
@ -296,6 +298,18 @@ int amd_iommu_get_num_iommus(void)
return amd_iommus_present;
}
/*
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
* Default to IVHD EFR since it is available sooner
* (i.e. before PCI init).
*/
static void __init early_iommu_features_init(struct amd_iommu *iommu,
struct ivhd_header *h)
{
if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
iommu->features = h->efr_reg;
}
/* Access to l1 and l2 indexed register spaces */
static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
@ -1584,6 +1598,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
(h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
early_iommu_features_init(iommu, h);
break;
default:
return -EINVAL;
@ -1775,6 +1792,35 @@ static const struct attribute_group *amd_iommu_groups[] = {
NULL,
};
/*
* Note: IVHD 0x11 and 0x40 also contains exact copy
* of the IOMMU Extended Feature Register [MMIO Offset 0030h].
* Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
*/
static void __init late_iommu_features_init(struct amd_iommu *iommu)
{
u64 features;
if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
return;
/* read extended feature bits */
features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
if (!iommu->features) {
iommu->features = features;
return;
}
/*
* Sanity check and warn if EFR values from
* IVHD and MMIO conflict.
*/
if (features != iommu->features)
pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
features, iommu->features);
}
static int __init iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
@ -1794,8 +1840,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false;
/* read extended feature bits */
iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
late_iommu_features_init(iommu);
if (iommu_feature(iommu, FEATURE_GT)) {
int glxval;
@ -2525,6 +2570,11 @@ static void __init free_dma_resources(void)
free_unity_maps();
}
static void __init ivinfo_init(void *ivrs)
{
amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
}
/*
* This is the hardware init function for AMD IOMMU in the system.
* This function is called either from amd_iommu_init or from the interrupt
@ -2579,6 +2629,8 @@ static int __init early_amd_iommu_init(void)
if (ret)
goto out;
ivinfo_init(ivrs_base);
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);

View File

@ -1496,7 +1496,7 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in
* ECAP.
*/
if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
addr, size_order);

View File

@ -378,14 +378,15 @@ void led_trigger_event(struct led_trigger *trig,
enum led_brightness brightness)
{
struct led_classdev *led_cdev;
unsigned long flags;
if (!trig)
return;
read_lock(&trig->leddev_list_lock);
read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
led_set_brightness(led_cdev, brightness);
read_unlock(&trig->leddev_list_lock);
read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
EXPORT_SYMBOL_GPL(led_trigger_event);
@ -396,11 +397,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
int invert)
{
struct led_classdev *led_cdev;
unsigned long flags;
if (!trig)
return;
read_lock(&trig->leddev_list_lock);
read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
led_blink_set_oneshot(led_cdev, delay_on, delay_off,
@ -408,7 +410,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
else
led_blink_set(led_cdev, delay_on, delay_off);
}
read_unlock(&trig->leddev_list_lock);
read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
void led_trigger_blink(struct led_trigger *trig,

View File

@ -33,6 +33,8 @@
#define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
return 0; \
return (((sb)->feature_compat & \
BCH##_FEATURE_COMPAT_##flagname) != 0); \
} \
@ -50,6 +52,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
#define BCH_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
return 0; \
return (((sb)->feature_ro_compat & \
BCH##_FEATURE_RO_COMPAT_##flagname) != 0); \
} \
@ -67,6 +71,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
#define BCH_FEATURE_INCOMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
return 0; \
return (((sb)->feature_incompat & \
BCH##_FEATURE_INCOMPAT_##flagname) != 0); \
} \

View File

@ -10,5 +10,6 @@ obj-$(CONFIG_CEC_MESON_AO) += meson/
obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
obj-$(CONFIG_CEC_SECO) += seco/
obj-$(CONFIG_CEC_STI) += sti/
obj-$(CONFIG_CEC_STM32) += stm32/
obj-$(CONFIG_CEC_TEGRA) += tegra/

View File

@ -320,7 +320,7 @@ static int ir_mce_kbd_decode(struct rc_dev *dev, struct ir_raw_event ev)
data->body);
spin_lock(&data->keylock);
if (scancode) {
delay = nsecs_to_jiffies(dev->timeout) +
delay = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(100);
mod_timer(&data->rx_timeout, jiffies + delay);
} else {

View File

@ -1551,7 +1551,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
rdev->s_rx_carrier_range = ite_set_rx_carrier_range;
/* FIFO threshold is 17 bytes, so 17 * 8 samples minimum */
rdev->min_timeout = 17 * 8 * ITE_BAUDRATE_DIVISOR *
itdev->params.sample_period;
itdev->params.sample_period / 1000;
rdev->timeout = IR_DEFAULT_TIMEOUT;
rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rdev->rx_resolution = ITE_BAUDRATE_DIVISOR *

View File

@ -737,7 +737,7 @@ static unsigned int repeat_period(int protocol)
void rc_repeat(struct rc_dev *dev)
{
unsigned long flags;
unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
unsigned int timeout = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(dev->last_protocol));
struct lirc_scancode sc = {
.scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
@ -855,7 +855,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode,
ir_do_keydown(dev, protocol, scancode, keycode, toggle);
if (dev->keypressed) {
dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
dev->keyup_jiffies = jiffies + usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(protocol));
mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
}
@ -1928,6 +1928,8 @@ int rc_register_device(struct rc_dev *dev)
goto out_raw;
}
dev->registered = true;
rc = device_add(&dev->dev);
if (rc)
goto out_rx_free;
@ -1937,8 +1939,6 @@ int rc_register_device(struct rc_dev *dev)
dev->device_name ?: "Unspecified device", path ?: "N/A");
kfree(path);
dev->registered = true;
/*
* once the the input device is registered in rc_setup_rx_device,
* userspace can open the input device and rc_open() will be called

View File

@ -385,7 +385,7 @@ static irqreturn_t serial_ir_irq_handler(int i, void *blah)
} while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
mod_timer(&serial_ir.timeout_timer,
jiffies + nsecs_to_jiffies(serial_ir.rcdev->timeout));
jiffies + usecs_to_jiffies(serial_ir.rcdev->timeout));
ir_raw_event_handle(serial_ir.rcdev);

View File

@ -1163,7 +1163,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
struct can_ctrlmode cm = {.flags = priv->ctrlmode};
struct can_berr_counter bec;
struct can_berr_counter bec = { };
enum can_state state = priv->state;
if (priv->do_get_state)

View File

@ -4046,20 +4046,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx];
/* When the VF is resetting wait until it is done.
* It can take up to 200 milliseconds,
* but wait for up to 300 milliseconds to be safe.
* If the VF is indeed in reset, the vsi pointer has
* to show on the newly loaded vsi under pf->vsi[id].
* Acquire the VSI pointer only after the VF has been
* properly initialized.
*/
for (i = 0; i < 15; i++) {
if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
if (i > 0)
vsi = pf->vsi[vf->lan_vsi_idx];
if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
break;
}
msleep(20);
}
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
@ -4068,6 +4064,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
ret = -EAGAIN;
goto error_param;
}
vsi = pf->vsi[vf->lan_vsi_idx];
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,

View File

@ -68,7 +68,9 @@
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
#define ICE_MIN_MSIX 2
#define ICE_MIN_LAN_TXRX_MSIX 1
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0

View File

@ -3258,8 +3258,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
return min_t(int, num_online_cpus(),
pf->hw.func_caps.common_cap.num_txq);
return min3(pf->num_lan_msix, (u16)num_online_cpus(),
(u16)pf->hw.func_caps.common_cap.num_txq);
}
/**
@ -3268,8 +3268,8 @@ static int ice_get_max_txq(struct ice_pf *pf)
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
return min_t(int, num_online_cpus(),
pf->hw.func_caps.common_cap.num_rxq);
return min3(pf->num_lan_msix, (u16)num_online_cpus(),
(u16)pf->hw.func_caps.common_cap.num_rxq);
}
/**

View File

@ -1576,7 +1576,13 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
sizeof(struct in6_addr));
input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
/* if no protocol requested, use IPPROTO_NONE */
if (!fsp->m_u.usr_ip6_spec.l4_proto)
input->ip.v6.proto = IPPROTO_NONE;
else
input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
sizeof(struct in6_addr));
memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,

View File

@ -161,8 +161,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
switch (vsi->type) {
case ICE_VSI_PF:
vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
num_online_cpus());
vsi->alloc_txq = min3(pf->num_lan_msix,
ice_get_avail_txq_count(pf),
(u16)num_online_cpus());
if (vsi->req_txq) {
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
@ -174,8 +175,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->alloc_rxq = 1;
} else {
vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
num_online_cpus());
vsi->alloc_rxq = min3(pf->num_lan_msix,
ice_get_avail_rxq_count(pf),
(u16)num_online_cpus());
if (vsi->req_rxq) {
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
@ -184,7 +186,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
pf->num_lan_rx = vsi->alloc_rxq;
vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
max_t(int, vsi->alloc_rxq,
vsi->alloc_txq));
break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];

View File

@ -3433,18 +3433,14 @@ static int ice_ena_msix_range(struct ice_pf *pf)
if (v_actual < v_budget) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
#define ICE_MIN_LAN_VECS 2
#define ICE_MIN_RDMA_VECS 2
#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
if (v_actual < ICE_MIN_LAN_VECS) {
if (v_actual < ICE_MIN_MSIX) {
/* error if we can't get minimum vectors */
pci_disable_msix(pf->pdev);
err = -ERANGE;
goto msix_err;
} else {
pf->num_lan_msix = ICE_MIN_LAN_VECS;
pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
}
}
@ -4887,9 +4883,15 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
goto err_update_filters;
}
/* Add filter for new MAC. If filter exists, just return success */
/* Add filter for new MAC. If filter exists, return success */
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) {
/* Although this MAC filter is already present in hardware it's
* possible in some cases (e.g. bonding) that dev_addr was
* modified outside of the driver and needs to be restored back
* to this value.
*/
memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
return 0;
}

View File

@ -1923,12 +1923,15 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
l4_proto = ip.v4->protocol;
} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
int ret;
tunnel |= ICE_TX_CTX_EIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
if (l4.hdr != exthdr)
ipv6_skip_exthdr(skb, exthdr - skb->data,
&l4_proto, &frag_off);
ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
&l4_proto, &frag_off);
if (ret < 0)
return -1;
}
/* define outer transport */

View File

@ -1675,12 +1675,18 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
cmd->base.phy_address = hw->phy.addr;
/* advertising link modes */
ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL)
ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF)
ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)
ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)
ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL)
ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
/* set autoneg settings */
if (hw->mac.autoneg == 1) {
@ -1792,6 +1798,12 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
/* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
* We have to check this and convert it to ADVERTISE_2500_FULL
* (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
*/
if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
advertising |= ADVERTISE_2500_FULL;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;

View File

@ -275,7 +275,7 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
if (err)
return err;
goto free_page;
cmd = mlx5_rsc_dump_cmd_create(mdev, key);
if (IS_ERR(cmd)) {

View File

@ -167,6 +167,12 @@ static const struct rhashtable_params tuples_nat_ht_params = {
.min_size = 16 * 1024,
};
static bool
mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
{
return !!(entry->tuple_nat_node.next);
}
static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
@ -911,13 +917,13 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
err_insert:
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
err_rules:
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node, tuples_nat_ht_params);
if (mlx5_tc_ct_entry_has_nat(entry))
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node, tuples_nat_ht_params);
err_tuple_nat:
if (entry->tuple_node.next)
rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
&entry->tuple_node,
tuples_ht_params);
rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
&entry->tuple_node,
tuples_ht_params);
err_tuple:
err_set:
kfree(entry);
@ -932,7 +938,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
{
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
mutex_lock(&ct_priv->shared_counter_lock);
if (entry->tuple_node.next)
if (mlx5_tc_ct_entry_has_nat(entry))
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);

View File

@ -76,7 +76,7 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
{
return NUM_IPSEC_SW_COUNTERS;
return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
}
static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
@ -105,7 +105,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{
return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)

View File

@ -1151,6 +1151,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
struct mlx5e_channels new_channels = {};
bool reset_channels = true;
bool opened;
int err = 0;
mutex_lock(&priv->state_lock);
@ -1159,22 +1160,24 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
trust_state);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (!opened)
reset_channels = false;
}
/* Skip if tx_min_inline is the same */
if (new_channels.params.tx_min_inline_mode ==
priv->channels.params.tx_min_inline_mode)
reset_channels = false;
if (reset_channels)
if (reset_channels) {
err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_update_trust_state_hw,
&trust_state);
else
} else {
err = mlx5e_update_trust_state_hw(priv, &trust_state);
if (!err && !opened)
priv->channels.params = new_channels.params;
}
mutex_unlock(&priv->state_lock);

View File

@ -444,12 +444,18 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
goto out;
}
new_channels.params = priv->channels.params;
new_channels.params = *cur_params;
new_channels.params.num_channels = count;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
struct mlx5e_params old_params;
old_params = *cur_params;
*cur_params = new_channels.params;
err = mlx5e_num_channels_changed(priv);
if (err)
*cur_params = old_params;
goto out;
}

View File

@ -3580,7 +3580,14 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
new_channels.params.num_tc = tc ? tc : 1;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
struct mlx5e_params old_params;
old_params = priv->channels.params;
priv->channels.params = new_channels.params;
err = mlx5e_num_channels_changed(priv);
if (err)
priv->channels.params = old_params;
goto out;
}
@ -3723,7 +3730,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
struct mlx5e_params *old_params;
struct mlx5e_params *cur_params;
int err = 0;
bool reset;
@ -3736,8 +3743,8 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
goto out;
}
old_params = &priv->channels.params;
if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
cur_params = &priv->channels.params;
if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
netdev_warn(netdev, "can't set LRO with legacy RQ\n");
err = -EINVAL;
goto out;
@ -3745,18 +3752,23 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
new_channels.params = *old_params;
new_channels.params = *cur_params;
new_channels.params.lro_en = enable;
if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
reset = false;
}
if (!reset) {
*old_params = new_channels.params;
struct mlx5e_params old_params;
old_params = *cur_params;
*cur_params = new_channels.params;
err = mlx5e_modify_tirs_lro(priv);
if (err)
*cur_params = old_params;
goto out;
}
@ -4030,9 +4042,16 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
}
if (!reset) {
unsigned int old_mtu = params->sw_mtu;
params->sw_mtu = new_mtu;
if (preactivate)
preactivate(priv, NULL);
if (preactivate) {
err = preactivate(priv, NULL);
if (err) {
params->sw_mtu = old_mtu;
goto out;
}
}
netdev->mtu = params->sw_mtu;
goto out;
}
@ -4990,7 +5009,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
#ifdef CONFIG_MLX5_ESWITCH
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
#ifdef CONFIG_MLX5_EN_ARFS

View File

@ -738,7 +738,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->features |= NETIF_F_NETNS_LOCAL;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
netdev->hw_features |= NETIF_F_IPV6_CSUM;

View File

@ -67,6 +67,7 @@
#include "lib/geneve.h"
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
#include <asm/div64.h>
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
@ -1164,6 +1165,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
@ -1194,6 +1198,9 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
{
flow_flag_clear(flow, OFFLOADED);
if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
goto offload_rule_0;
if (flow_flag_test(flow, CT)) {
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
return;
@ -1202,6 +1209,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
offload_rule_0:
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
@ -2271,8 +2279,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
dissector->used_keys);
netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
@ -5009,13 +5017,13 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
return err;
}
static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
struct netlink_ext_ack *extack)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch *esw;
u32 rate_mbps = 0;
u16 vport_num;
u32 rate_mbps;
int err;
vport_num = rpriv->rep->vport;
@ -5032,7 +5040,11 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
* Moreover, if rate is non zero we choose to configure to a minimum of
* 1 mbit/sec.
*/
rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
if (rate) {
rate = (rate * BITS_PER_BYTE) + 500000;
rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
}
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");

View File

@ -1141,6 +1141,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
destroy_ft:
root->cmds->destroy_flow_table(root, ft);
free_ft:
rhltable_destroy(&ft->fgs_hash);
kfree(ft);
unlock_root:
mutex_unlock(&root->chain_lock);

View File

@ -90,4 +90,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
u32 key_type, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
{
return devlink_net(priv_to_devlink(dev));
}
#endif

View File

@ -58,7 +58,7 @@ struct fw_page {
struct rb_node rb_node;
u64 addr;
struct page *page;
u16 func_id;
u32 function;
unsigned long bitmask;
struct list_head list;
unsigned free_count;
@ -74,12 +74,17 @@ enum {
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
static u32 get_function(u16 func_id, bool ec_function)
{
return func_id & (ec_function << 16);
}
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
int err;
root = xa_load(&dev->priv.page_root_xa, func_id);
root = xa_load(&dev->priv.page_root_xa, function);
if (root)
return root;
@ -87,7 +92,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
if (!root)
return ERR_PTR(-ENOMEM);
err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
if (err) {
kfree(root);
return ERR_PTR(err);
@ -98,7 +103,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
return root;
}
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
{
struct rb_node *parent = NULL;
struct rb_root *root;
@ -107,7 +112,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
struct fw_page *tfp;
int i;
root = page_root_per_func_id(dev, func_id);
root = page_root_per_function(dev, function);
if (IS_ERR(root))
return PTR_ERR(root);
@ -130,7 +135,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
nfp->addr = addr;
nfp->page = page;
nfp->func_id = func_id;
nfp->function = function;
nfp->free_count = MLX5_NUM_4K_IN_PAGE;
for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
set_bit(i, &nfp->bitmask);
@ -143,14 +148,14 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
}
static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
u32 func_id)
u32 function)
{
struct fw_page *result = NULL;
struct rb_root *root;
struct rb_node *tmp;
struct fw_page *tfp;
root = xa_load(&dev->priv.page_root_xa, func_id);
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return NULL;
@ -194,14 +199,14 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
return err;
}
static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
{
struct fw_page *fp = NULL;
struct fw_page *iter;
unsigned n;
list_for_each_entry(iter, &dev->priv.free_list, list) {
if (iter->func_id != func_id)
if (iter->function != function)
continue;
fp = iter;
}
@ -231,7 +236,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
{
struct rb_root *root;
root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
root = xa_load(&dev->priv.page_root_xa, fwp->function);
if (WARN_ON_ONCE(!root))
return;
@ -244,12 +249,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
kfree(fwp);
}
static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
{
struct fw_page *fwp;
int n;
fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
if (!fwp) {
mlx5_core_warn_rl(dev, "page not found\n");
return;
@ -263,7 +268,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
list_add(&fwp->list, &dev->priv.free_list);
}
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device);
@ -291,7 +296,7 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
goto map;
}
err = insert_page(dev, addr, page, func_id);
err = insert_page(dev, addr, page, function);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@ -328,6 +333,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
u64 addr;
@ -345,10 +351,10 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
for (i = 0; i < npages; i++) {
retry:
err = alloc_4k(dev, &addr, func_id);
err = alloc_4k(dev, &addr, function);
if (err) {
if (err == -ENOMEM)
err = alloc_system_page(dev, func_id);
err = alloc_system_page(dev, function);
if (err)
goto out_4k;
@ -384,7 +390,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
out_4k:
for (i--; i >= 0; i--)
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
out_free:
kvfree(in);
if (notify_fail)
@ -392,14 +398,15 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
return err;
}
static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
u32 function = get_function(func_id, ec_function);
struct rb_root *root;
struct rb_node *p;
int npages = 0;
root = xa_load(&dev->priv.page_root_xa, func_id);
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return;
@ -446,6 +453,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
struct rb_root *root;
struct fw_page *fwp;
struct rb_node *p;
bool ec_function;
u32 func_id;
u32 npages;
u32 i = 0;
@ -456,8 +464,9 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
func_id = MLX5_GET(manage_pages_in, in, function_id);
ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
root = xa_load(&dev->priv.page_root_xa, func_id);
root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
if (WARN_ON_ONCE(!root))
return -EEXIST;
@ -473,9 +482,10 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
return 0;
}
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int *nclaimed, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
@ -514,7 +524,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
}
for (i = 0; i < num_claimed; i++)
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
if (nclaimed)
*nclaimed = num_claimed;

View File

@ -991,7 +991,8 @@ static void __team_compute_features(struct team *team)
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
list_for_each_entry(port, &team->port_list, list) {
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list) {
vlan_features = netdev_increment_features(vlan_features,
port->dev->vlan_features,
TEAM_VLAN_FEATURES);
@ -1005,6 +1006,7 @@ static void __team_compute_features(struct team *team)
if (port->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = port->dev->hard_header_len;
}
rcu_read_unlock();
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
@ -1020,9 +1022,7 @@ static void __team_compute_features(struct team *team)
static void team_compute_features(struct team *team)
{
mutex_lock(&team->lock);
__team_compute_features(team);
mutex_unlock(&team->lock);
netdev_change_features(team->dev);
}

View File

@ -1325,6 +1325,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
{QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
{QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */

View File

@ -224,40 +224,46 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait)
{
const struct firmware *pnvm;
struct iwl_notification_wait pnvm_wait;
static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
PNVM_INIT_COMPLETE_NTFY) };
char pnvm_name[64];
int ret;
/* if the SKU_ID is empty, there's nothing to do */
if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
return 0;
/* if we already have it, nothing to do either */
if (trans->pnvm_loaded)
return 0;
/* load from disk only if we haven't done it (or tried) before */
if (!trans->pnvm_loaded) {
const struct firmware *pnvm;
char pnvm_name[64];
int ret;
/*
* The prefix unfortunately includes a hyphen at the end, so
* don't add the dot here...
*/
snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
trans->cfg->fw_name_pre);
/*
* The prefix unfortunately includes a hyphen at the end, so
* don't add the dot here...
*/
snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
trans->cfg->fw_name_pre);
/* ...but replace the hyphen with the dot here. */
if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
/* ...but replace the hyphen with the dot here. */
if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
if (ret) {
IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
pnvm_name, ret);
} else {
iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
if (ret) {
IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
pnvm_name, ret);
/*
* Pretend we've loaded it - at least we've tried and
* couldn't load it at all, so there's no point in
* trying again over and over.
*/
trans->pnvm_loaded = true;
} else {
iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
release_firmware(pnvm);
release_firmware(pnvm);
}
}
iwl_init_notification_wait(notif_wait, &pnvm_wait,

View File

@ -498,7 +498,7 @@ struct iwl_cfg {
#define IWL_CFG_CORES_BT_GNSS 0x5
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0100) >> 9)
#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
struct iwl_dev_info {

Some files were not shown because too many files have changed in this diff Show More